text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
# This code is written by Sunita Nayak at BigVision LLC. It is based on the OpenCV project. It is subject to the license terms in the LICENSE file found in this distribution and at http://opencv.org/license.html
# Usage example: python3 augmented_reality_with_aruco.py --image=test.jpg
# python3 augmented_reality_with_aruco.py --video=test.mp4
import cv2 as cv
#from cv2 import aruco
import argparse
import sys
import os.path
import numpy as np
parser = argparse.ArgumentParser(description='Augmented Reality using Aruco markers in OpenCV')
parser.add_argument('--image', help='Path to image file.')
parser.add_argument('--video', help='Path to video file.')
args = parser.parse_args()
im_src = cv.imread("new_scenery.jpg");
outputFile = "ar_out_py.avi"
if (args.image):
# Open the image file
if not os.path.isfile(args.image):
print("Input image file ", args.image, " doesn't exist")
sys.exit(1)
cap = cv.VideoCapture(args.image)
outputFile = args.image[:-4]+'_ar_out_py.jpg'
elif (args.video):
# Open the video file
if not os.path.isfile(args.video):
print("Input video file ", args.video, " doesn't exist")
sys.exit(1)
cap = cv.VideoCapture(args.video)
outputFile = args.video[:-4]+'_ar_out_py.avi'
print("Storing it as :", outputFile)
else:
# Webcam input
cap = cv.VideoCapture(0)
# Get the video writer initialized to save the output video
if (not args.image):
vid_writer = cv.VideoWriter(outputFile, cv.VideoWriter_fourcc('M','J','P','G'), 28, (round(2*cap.get(cv.CAP_PROP_FRAME_WIDTH)),round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))))
winName = "Augmented Reality using Aruco markers in OpenCV"
while cv.waitKey(1) < 0:
try:
# get frame from the video
hasFrame, frame = cap.read()
# Stop the program if reached end of video
if not hasFrame:
print("Done processing !!!")
print("Output file is stored as ", outputFile)
cv.waitKey(3000)
break
#Load the dictionary that was used to generate the markers.
dictionary = cv.aruco.Dictionary_get(cv.aruco.DICT_6X6_250)
# Initialize the detector parameters using default values
parameters = cv.aruco.DetectorParameters_create()
# Detect the markers in the image
markerCorners, markerIds, rejectedCandidates = cv.aruco.detectMarkers(frame, dictionary, parameters=parameters)
index = np.squeeze(np.where(markerIds==25));
refPt1 = np.squeeze(markerCorners[index[0]])[1];
index = np.squeeze(np.where(markerIds==33));
refPt2 = np.squeeze(markerCorners[index[0]])[2];
distance = np.linalg.norm(refPt1-refPt2);
scalingFac = 0.02;
pts_dst = [[refPt1[0] - round(scalingFac*distance), refPt1[1] - round(scalingFac*distance)]];
pts_dst = pts_dst + [[refPt2[0] + round(scalingFac*distance), refPt2[1] - round(scalingFac*distance)]];
index = np.squeeze(np.where(markerIds==30));
refPt3 = np.squeeze(markerCorners[index[0]])[0];
pts_dst = pts_dst + [[refPt3[0] + round(scalingFac*distance), refPt3[1] + round(scalingFac*distance)]];
index = np.squeeze(np.where(markerIds==23));
refPt4 = np.squeeze(markerCorners[index[0]])[0];
pts_dst = pts_dst + [[refPt4[0] - round(scalingFac*distance), refPt4[1] + round(scalingFac*distance)]];
pts_src = [[0,0], [im_src.shape[1], 0], [im_src.shape[1], im_src.shape[0]], [0, im_src.shape[0]]];
pts_src_m = np.asarray(pts_src)
pts_dst_m = np.asarray(pts_dst)
# Calculate Homography
h, status = cv.findHomography(pts_src_m, pts_dst_m)
# Warp source image to destination based on homography
warped_image = cv.warpPerspective(im_src, h, (frame.shape[1],frame.shape[0]))
# Prepare a mask representing region to copy from the warped image into the original frame.
mask = np.zeros([frame.shape[0], frame.shape[1]], dtype=np.uint8);
cv.fillConvexPoly(mask, np.int32([pts_dst_m]), (255, 255, 255), cv.LINE_AA);
# Erode the mask to not copy the boundary effects from the warping
element = cv.getStructuringElement(cv.MORPH_RECT, (3,3));
mask = cv.erode(mask, element, iterations=3);
# Copy the mask into 3 channels.
warped_image = warped_image.astype(float)
mask3 = np.zeros_like(warped_image)
for i in range(0, 3):
mask3[:,:,i] = mask/255
# Copy the warped image into the original frame in the mask region.
warped_image_masked = cv.multiply(warped_image, mask3)
frame_masked = cv.multiply(frame.astype(float), 1-mask3)
im_out = cv.add(warped_image_masked, frame_masked)
# Showing the original image and the new output image side by side
concatenatedOutput = cv.hconcat([frame.astype(float), im_out]);
cv.imshow("AR using Aruco markers", concatenatedOutput.astype(np.uint8))
# Write the frame with the detection boxes
if (args.image):
cv.imwrite(outputFile, concatenatedOutput.astype(np.uint8));
else:
vid_writer.write(concatenatedOutput.astype(np.uint8))
except Exception as inst:
print(inst)
cv.destroyAllWindows()
if 'vid_writer' in locals():
vid_writer.release()
print('Video writer released..')
|
{"hexsha": "31da7a673b40ef9c5ff689595fa802b3eff8af2d", "size": 5489, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/playground/arucoAR.py", "max_stars_repo_name": "manavjain99/oscar_buggy", "max_stars_repo_head_hexsha": "b5dab0848f8667c9515bcfb078730cd0c4060000", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/playground/arucoAR.py", "max_issues_repo_name": "manavjain99/oscar_buggy", "max_issues_repo_head_hexsha": "b5dab0848f8667c9515bcfb078730cd0c4060000", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/playground/arucoAR.py", "max_forks_repo_name": "manavjain99/oscar_buggy", "max_forks_repo_head_hexsha": "b5dab0848f8667c9515bcfb078730cd0c4060000", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.6592592593, "max_line_length": 211, "alphanum_fraction": 0.6533066132, "include": true, "reason": "import numpy", "num_tokens": 1361}
|
/**
\file
\author Datta Ramadasan
//==============================================================================
// Copyright 2015 INSTITUT PASCAL UMR 6602 CNRS/Univ. Clermont II
//
// Distributed under the Boost Software License, Version 1.0.
// See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt
//==============================================================================
*/
#ifndef __LMA_OPT2_TRAIT_USE_ESTIMATOR_HPP__
#define __LMA_OPT2_TRAIT_USE_ESTIMATOR_HPP__
#include <libv/lma/version.hpp>
#include <libv/lma/lm/container/container.hpp>
#include <boost/type_traits/is_convertible.hpp>
#include <boost/fusion/include/for_each.hpp>
#include <boost/mpl/bool.hpp>
#include <libv/lma/numeric/mediane.hpp>
#include <iostream>
namespace lma
{
struct MEstimator_{};
template<class Base> struct MEstimator : MEstimator_
{
Base const& cast() const {return static_cast<Base const &>(*this); }
template<class Float, size_t N>
Eigen::Matrix<Float,N,1> me(const std::array<Float,N>& res, Float C) const
{
Eigen::Matrix<Float,N,1> ret;
for(size_t i = 0 ; i < N ; ++i)
ret[i] = cast().weight(res[i],C);
return ret;
}
template<class Float>
Eigen::Matrix<Float,1,1> me(const Float& res,const Float& C) const
{
Eigen::Matrix<Float,1,1> ret;
ret << cast().weight(res,C);
return ret;
}
template<class Float, int N>
Eigen::Matrix<Float,N,1> me(const Eigen::Matrix<Float,N,1>& res, Float C) const
{
Eigen::Matrix<Float,N,1> ret;
for(int i = 0 ; i < N ; ++i)
ret[i] = cast().weight(res[i],C);
return ret;
}
};
namespace detail
{
template<class F> struct IsMEstimator : boost::is_convertible<F*,MEstimator_*>::type {};
template<class W> struct Weight_J
{
const W& weight;
Weight_J(const W& w_):weight(w_){}
template<class Pair> void operator()(Pair& pair) const
{
for(int i = 0 ; i < Rows<decltype(pair.second)>::value ; ++i)
for(int j = 0 ; j < Cols<decltype(pair.second)>::value ; ++j)
pair.second(i,j) = pair.second(i,j)*weight[i];
}
};
template< class F, class Jacobs, class Erreurs, class Mad>
void apply_mestimator(const F& f, Jacobs& jacobs, Erreurs& erreur, const Mad& mad, typename boost::enable_if<detail::IsMEstimator<F>>::type* =0)
{
auto weight = f.me(erreur,boost::fusion::at_key<F>(mad));
// std::cout << " erreur " << erreur[0] << ", " << weight << std::endl;
cwise_product(erreur,weight,erreur);
boost::fusion::for_each(jacobs,Weight_J<decltype(weight)>(weight));
}
template<class F, class Jacobs, class Erreurs, class Mad> void apply_mestimator(const F&, Jacobs&, Erreurs&, const Mad&,typename boost::disable_if<detail::IsMEstimator<F>>::type* =0) { }
template<class F, class Erreurs, class Mad>
void apply_mestimator_erreur(const F& f, Erreurs& erreur, const Mad& mad, typename boost::enable_if<detail::IsMEstimator<F>>::type* =0)
{
auto weight = f.me(erreur,boost::fusion::at_key<F>(mad));
cwise_product(erreur,weight,erreur);
}
template<class F, class Erreurs, class Mad> void apply_mestimator_erreur(const F&, Erreurs&, const Mad&, typename boost::disable_if<detail::IsMEstimator<F>>::type* =0) { }
}
template<class Float>
struct GermanMcClure : MEstimator<GermanMcClure<Float>>
{
Float coeff_mad;
GermanMcClure(Float a_):coeff_mad(a_){}
Float weight(const Float& res, const Float& C) const
{
return (C != 0 ? (C / (res*res+C*C)) : 1.0);
}
Float compute(std::vector<Float> norms) const
{
if (norms.empty())
return 0;
Float med = mediane(norms);
for(Float& m : norms)
m = std::abs(m-med);
Float MAD = mediane(norms);
Float C = med + coeff_mad * MAD;
// std::cout << " C : " << C << " = " << med << " + " << coeff_mad << " * " << MAD << std::endl;
return C;
}
};
}
#endif
|
{"hexsha": "d5eaf23a5088be447e9619f8c9c025bef6655ada", "size": 4152, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/libv/lma/lm/trait/use_estimator.hpp", "max_stars_repo_name": "bezout/LMA", "max_stars_repo_head_hexsha": "9555e41eed5f44690c5f6e3ea2d22d520ff1a9d2", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 29.0, "max_stars_repo_stars_event_min_datetime": "2015-12-08T12:07:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-08T21:23:01.000Z", "max_issues_repo_path": "src/libv/lma/lm/trait/use_estimator.hpp", "max_issues_repo_name": "bezout/LMA", "max_issues_repo_head_hexsha": "9555e41eed5f44690c5f6e3ea2d22d520ff1a9d2", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2016-07-11T16:23:48.000Z", "max_issues_repo_issues_event_max_datetime": "2017-04-05T13:33:00.000Z", "max_forks_repo_path": "src/libv/lma/lm/trait/use_estimator.hpp", "max_forks_repo_name": "bezout/LMA", "max_forks_repo_head_hexsha": "9555e41eed5f44690c5f6e3ea2d22d520ff1a9d2", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 8.0, "max_forks_repo_forks_event_min_datetime": "2015-12-21T01:52:27.000Z", "max_forks_repo_forks_event_max_datetime": "2017-12-26T02:26:55.000Z", "avg_line_length": 31.2180451128, "max_line_length": 190, "alphanum_fraction": 0.58477842, "num_tokens": 1152}
|
import sys
sys.path.append("./models")
import numpy as np
import torch
from datasets.VNRiceDataset import VNRiceDataset
from models.TransformerEncoder import TransformerEncoder
from models.multi_scale_resnet import MSResNet
from models.TempCNN import TempCNN
from models.rnn import RNN
from datasets.ConcatDataset import ConcatDataset
from datasets.GAFDataset import GAFDataset
from datasets.BavarianCrops_Dataset import BavarianCropsDataset
import argparse
from utils.trainer import Trainer
from torch.utils.data.sampler import RandomSampler, SequentialSampler
from utils.texparser import parse_run
from utils.logger import Logger
from utils.visdomLogger import VisdomLogger
from utils.scheduled_optimizer import ScheduledOptim
import torch.optim as optim
from experiments import experiments
import os
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-b', '--batchsize', type=int, default=256, help='batch size')
parser.add_argument(
'-e', '--epochs', type=int, default=150, help='number of training epochs')
parser.add_argument(
'-w', '--workers', type=int, default=4, help='number of CPU workers to load the next batch')
parser.add_argument('--overwrite', action='store_true',
help="Overwrite automatic snapshots if they exist")
parser.add_argument(
'--dataroot', type=str, default='../data', help='root to dataset. default ../data')
parser.add_argument(
'--classmapping', type=str, default=None, help='classmapping')
parser.add_argument(
'--hyperparameterfolder', type=str, default=None, help='hyperparameter folder')
parser.add_argument(
'-x', '--experiment', type=str, default="test", help='experiment prefix')
parser.add_argument(
'--store', type=str, default="/tmp", help='store run logger results')
parser.add_argument(
'--test_every_n_epochs', type=int, default=1, help='skip some test epochs for faster overall training')
parser.add_argument(
'--checkpoint_every_n_epochs', type=int, default=5, help='save checkpoints during training')
parser.add_argument(
'--seed', type=int, default=0, help='seed for batching and weight initialization')
parser.add_argument(
'--hparamset', type=int, default=0, help='rank of hyperparameter set 0: best hyperparameter')
parser.add_argument(
'-i', '--show-n-samples', type=int, default=1, help='show n samples in visdom')
args, _ = parser.parse_known_args()
return args
def prepare_dataset(args):
if args.dataset == "BavarianCrops":
root = os.path.join(args.dataroot,"BavarianCrops")
#ImbalancedDatasetSampler
test_dataset_list = list()
for region in args.testregions:
test_dataset_list.append(
BavarianCropsDataset(root=root, region=region, partition=args.test_on,
classmapping=args.classmapping, samplet=args.samplet,
scheme=args.scheme,mode=args.mode, seed=args.seed)
)
train_dataset_list = list()
for region in args.trainregions:
train_dataset_list.append(
BavarianCropsDataset(root=root, region=region, partition=args.train_on,
classmapping=args.classmapping, samplet=args.samplet,
scheme=args.scheme,mode=args.mode, seed=args.seed)
)
if args.dataset == "VNRice":
train_dataset_list=[VNRiceDataset(root=args.root, partition=args.train_on, samplet=args.samplet,
mode=args.mode, seed=args.seed)]
test_dataset_list=[VNRiceDataset(root=args.root, partition=args.test_on, samplet=args.samplet,
mode=args.mode, seed=args.seed)]
if args.dataset == "BreizhCrops":
root = "/home/marc/projects/BreizhCrops/data"
train_dataset_list = list()
for region in args.trainregions:
train_dataset_list.append(
CropsDataset(root=root, region=region, samplet=args.samplet)
)
#ImbalancedDatasetSampler
test_dataset_list = list()
for region in args.testregions:
test_dataset_list.append(
CropsDataset(root=root, region=region, samplet=args.samplet)
)
elif args.dataset == "GAFv2":
root = os.path.join(args.dataroot,"GAFdataset")
#ImbalancedDatasetSampler
test_dataset_list = list()
for region in args.testregions:
test_dataset_list.append(
GAFDataset(root, region=region, partition="test", scheme=args.scheme, classmapping=args.classmapping, features=args.features)
)
train_dataset_list = list()
for region in args.trainregions:
train_dataset_list.append(
GAFDataset(root, region=region, partition="train", scheme=args.scheme, classmapping=args.classmapping, features=args.features)
)
print("setting random seed to "+str(args.seed))
np.random.seed(args.seed)
if args.seed is not None:
torch.random.manual_seed(args.seed)
traindataset = ConcatDataset(train_dataset_list)
traindataloader = torch.utils.data.DataLoader(dataset=traindataset, sampler=RandomSampler(traindataset),
batch_size=args.batchsize, num_workers=args.workers)
testdataset = ConcatDataset(test_dataset_list)
testdataloader = torch.utils.data.DataLoader(dataset=testdataset, sampler=SequentialSampler(testdataset),
batch_size=args.batchsize, num_workers=args.workers)
return traindataloader, testdataloader
def train(args):
classmapping = args.classmapping
hyperparameterfolder = args.hyperparameterfolder
# prepare dataset, model, hyperparameters for the respective experiments
args = experiments(args)
if classmapping is not None:
print("overwriting classmapping with manual input")
args.classmapping = classmapping
if hyperparameterfolder is not None:
print("overwriting hyperparameterfolder with manual input")
args.hyperparameterfolder = hyperparameterfolder
traindataloader, testdataloader = prepare_dataset(args)
args.nclasses = traindataloader.dataset.nclasses
classname = traindataloader.dataset.classname
klassenname = traindataloader.dataset.klassenname
args.seqlength = traindataloader.dataset.sequencelength
#args.seqlength = args.samplet
args.input_dims = traindataloader.dataset.ndims
model = getModel(args)
store = os.path.join(args.store,args.experiment)
logger = Logger(columns=["accuracy"], modes=["train", "test"], rootpath=store)
visdomenv = "{}_{}".format(args.experiment, args.dataset)
visdomlogger = VisdomLogger(env=visdomenv)
if args.model in ["transformer"]:
optimizer = ScheduledOptim(
optim.Adam(
filter(lambda x: x.requires_grad, model.parameters()),
betas=(0.9, 0.98), eps=1e-09, weight_decay=args.weight_decay),
model.d_model, args.warmup)
elif args.model in ["rnn", "msresnet","tempcnn"]:
optimizer = optim.Adam(
filter(lambda x: x.requires_grad, model.parameters()),
betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay, lr=args.learning_rate)
else:
raise ValueError(args.model + "no valid model. either 'rnn', 'msresnet', 'transformer', 'tempcnn'")
config = dict(
epochs=args.epochs,
learning_rate=args.learning_rate,
show_n_samples=args.show_n_samples,
store=store,
visdomlogger=visdomlogger,
overwrite=args.overwrite,
checkpoint_every_n_epochs=args.checkpoint_every_n_epochs,
test_every_n_epochs=args.test_every_n_epochs,
logger=logger,
optimizer=optimizer
)
trainer = Trainer(model,traindataloader,testdataloader,**config)
logger = trainer.fit()
# stores all stored values in the rootpath of the logger
logger.save()
#pth = store+"/npy/confusion_matrix_{epoch}.npy".format(epoch = args.epochs)
parse_run(store, args.classmapping, outdir=store)
#confusionmatrix2table(pth,
# classnames=klassenname,
# outfile=store+"/npy/table.tex")
#texconfmat(pth)
#accuracy2table(store+"/npy/confusion_matrix_{epoch}.npy".format(epoch = args.epochs), classnames=klassenname)
#stats = trainer.test_epoch(evaldataloader)
pass
def getModel(args):
if args.model == "rnn":
model = RNN(input_dim=args.input_dims, nclasses=args.nclasses, hidden_dims=args.hidden_dims,
num_rnn_layers=args.num_layers, dropout=args.dropout, bidirectional=True)
if args.model == "msresnet":
model = MSResNet(input_channel=args.input_dims, layers=[1, 1, 1, 1], num_classes=args.nclasses, hidden_dims=args.hidden_dims)
if args.model == "tempcnn":
model = TempCNN(input_dim=args.input_dims, nclasses=args.nclasses, sequence_length=args.samplet, hidden_dims=args.hidden_dims, kernel_size=args.kernel_size)
elif args.model == "transformer":
hidden_dims = args.hidden_dims # 256
n_heads = args.n_heads # 8
n_layers = args.n_layers # 6
len_max_seq = args.samplet
dropout = args.dropout
d_inner = hidden_dims*4
model = TransformerEncoder(in_channels=args.input_dims, len_max_seq=len_max_seq,
d_word_vec=hidden_dims, d_model=hidden_dims, d_inner=d_inner,
n_layers=n_layers, n_head=n_heads, d_k=hidden_dims//n_heads, d_v=hidden_dims//n_heads,
dropout=dropout, nclasses=args.nclasses)
if torch.cuda.is_available():
model = model.cuda()
pytorch_total_params = sum(p.numel() for p in model.parameters())
print("initialized {} model ({} parameters)".format(args.model, pytorch_total_params))
return model
if __name__=="__main__":
args = parse_args()
train(args)
|
{"hexsha": "698bba5b7804837ae48b6714c357825e331c53a6", "size": 10216, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/train.py", "max_stars_repo_name": "Pratyush1991/crop-type-mapping", "max_stars_repo_head_hexsha": "d9d99ec92c3a090ec5576f9e46c89dfcc6f50cf3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 94, "max_stars_repo_stars_event_min_datetime": "2019-11-11T10:26:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T14:18:51.000Z", "max_issues_repo_path": "src/train.py", "max_issues_repo_name": "Pratyush1991/crop-type-mapping", "max_issues_repo_head_hexsha": "d9d99ec92c3a090ec5576f9e46c89dfcc6f50cf3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-03-29T18:28:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-04T14:59:51.000Z", "max_forks_repo_path": "src/train.py", "max_forks_repo_name": "Pratyush1991/crop-type-mapping", "max_forks_repo_head_hexsha": "d9d99ec92c3a090ec5576f9e46c89dfcc6f50cf3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2019-11-22T10:29:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-28T12:02:58.000Z", "avg_line_length": 40.062745098, "max_line_length": 164, "alphanum_fraction": 0.670614722, "include": true, "reason": "import numpy", "num_tokens": 2223}
|
% test for ti quicunx
name = 'turbulence';
name = 'lena';
n = 256;
M = load_image(name);
M = rescale( crop(M,n) );
Jmax = log2(n)-1;
Jmin = Jmax-5;
% boundary conditions
options.bound = 'per';
options.bound = 'sym';
% vanishing moments
vm = 6;
options.primal_vm = vm;
options.dual_vm = vm;
% transform
disp('Computing forward transform');
MW = perform_quicunx_wavelet_transform_ti(M,Jmin,options);
disp('Computing backward transform');
M1 = perform_quicunx_wavelet_transform_ti(MW,Jmin,options);
disp(['Error=' num2str(norm(M-M1,'fro')/norm(M,'fro')) ' (should be 0).']);
rep = 'results/quincunx/';
if not(exist(rep))
mkdir(rep);
end
% display dual wavelets
clf;
for j=7:12
MW = MW*0;
MW(end/2,end/2,j) = 1;
M1 = perform_quicunx_wavelet_transform_ti(MW,Jmin,options);
imageplot(M1, '', 4,3,j);
% save
if j>6
warning off;
% imwrite(rescale(M1), [rep 'quincunx-wavelet-' num2string_fixeddigit(j,2) '.png'], 'png');
warning on;
clf;
surf(M1);
shading interp; colormap jet(256)
view(-20,50); axis tight; axis off;
camlight;
saveas(gcf, [rep 'quincunx-wavelet-vm' num2str(vm) '-' num2string_fixeddigit(j,2) '.png'], 'png');
end
end
|
{"author": "gpeyre", "repo": "matlab-toolboxes", "sha": "0cd622c988cda6f63f64d35cd7bd096fa578e5c6", "save_path": "github-repos/MATLAB/gpeyre-matlab-toolboxes", "path": "github-repos/MATLAB/gpeyre-matlab-toolboxes/matlab-toolboxes-0cd622c988cda6f63f64d35cd7bd096fa578e5c6/toolbox_wavelets/tests/test_quincunx_ti.m"}
|
import numpy as np
from PIL import Image
import glob
import torch
from torch.utils.data.dataset import Dataset
import torchvision.transforms as transforms
class FairFaceDataset(Dataset):
def __init__(self, folder_path, dimensions):
"""
Args:
folder_path (string): path to image folder
dimensions (tuple): a tuple of (width, height)
"""
# Get image list
self.image_list = glob.glob(folder_path+'*')
# Calculate len
self.data_len = len(self.image_list)
#transforms
self.convert_image = transforms.Compose([
transforms.Resize(dimensions),
transforms.ToTensor(),
#comment the normalize line if you don't need it
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))
])
def __getitem__(self, index):
# Get image name from the pandas df
single_image_path = self.image_list[index]
# Open image
im_as_im = Image.open(single_image_path)
# Do some operations on image
# Convert to numpy, dim = 89x89
im_as_ten = self.convert_image(im_as_im)
return im_as_ten
def __len__(self):
return self.data_len
|
{"hexsha": "10eac2687199ba2a90d991bacd3cd91e3f864cf0", "size": 1240, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataset/fairface_dataset.py", "max_stars_repo_name": "Asap7772/DeepCriminalize", "max_stars_repo_head_hexsha": "c171c6ce6e87e126e6e2b0ed1d9709ee7d0ce667", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-28T02:40:07.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-28T02:40:07.000Z", "max_issues_repo_path": "dataset/fairface_dataset.py", "max_issues_repo_name": "Asap7772/DeepCriminalize", "max_issues_repo_head_hexsha": "c171c6ce6e87e126e6e2b0ed1d9709ee7d0ce667", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2020-01-28T23:05:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-27T03:02:38.000Z", "max_forks_repo_path": "dataset/fairface_dataset.py", "max_forks_repo_name": "Asap7772/DeepCriminalize", "max_forks_repo_head_hexsha": "c171c6ce6e87e126e6e2b0ed1d9709ee7d0ce667", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.0, "max_line_length": 75, "alphanum_fraction": 0.6217741935, "include": true, "reason": "import numpy", "num_tokens": 286}
|
[STATEMENT]
lemma higher_pderiv_0 [simp]: "(pderiv ^^ n) 0 = 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (pderiv ^^ n) 0 = 0
[PROOF STEP]
by (induction n) simp_all
|
{"llama_tokens": 85, "file": "E_Transcendental_E_Transcendental", "length": 1}
|
struct FluidParams
eta2d :: Float64 # length scale introduced by difference in viscosity of of surface fluid and external fluids
end
abstract type Object end
|
{"hexsha": "f9c764dba0802372ff882b309938cb62cda95c9f", "size": 163, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "physics/fluid.jl", "max_stars_repo_name": "sarthakbagaria/curved-membrane-fluid-dynamics", "max_stars_repo_head_hexsha": "6b6223c89011a179595a39656a4f866816dffc8c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "physics/fluid.jl", "max_issues_repo_name": "sarthakbagaria/curved-membrane-fluid-dynamics", "max_issues_repo_head_hexsha": "6b6223c89011a179595a39656a4f866816dffc8c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "physics/fluid.jl", "max_forks_repo_name": "sarthakbagaria/curved-membrane-fluid-dynamics", "max_forks_repo_head_hexsha": "6b6223c89011a179595a39656a4f866816dffc8c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1666666667, "max_line_length": 113, "alphanum_fraction": 0.7975460123, "num_tokens": 34}
|
import argparse, time, os, pickle
import numpy as np
import dgl
import torch
import torch.optim as optim
from models import LANDER
from dataset import LanderDataset
from utils import evaluation, decode, build_next_level, stop_iterating
###########
# ArgParser
parser = argparse.ArgumentParser()
# Dataset
parser.add_argument('--data_path', type=str, required=True)
parser.add_argument('--model_filename', type=str, default='lander.pth')
parser.add_argument('--faiss_gpu', action='store_true')
parser.add_argument('--early_stop', action='store_true')
# HyperParam
parser.add_argument('--knn_k', type=int, default=10)
parser.add_argument('--levels', type=int, default=1)
parser.add_argument('--tau', type=float, default=0.5)
parser.add_argument('--threshold', type=str, default='prob')
parser.add_argument('--metrics', type=str, default='pairwise,bcubed,nmi')
# Model
parser.add_argument('--hidden', type=int, default=512)
parser.add_argument('--num_conv', type=int, default=4)
parser.add_argument('--dropout', type=float, default=0.)
parser.add_argument('--gat', action='store_true')
parser.add_argument('--gat_k', type=int, default=1)
parser.add_argument('--balance', action='store_true')
parser.add_argument('--use_cluster_feat', action='store_true')
parser.add_argument('--use_focal_loss', action='store_true')
parser.add_argument('--use_gt', action='store_true')
args = parser.parse_args()
###########################
# Environment Configuration
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
##################
# Data Preparation
with open(args.data_path, 'rb') as f:
features, labels = pickle.load(f)
global_features = features.copy()
dataset = LanderDataset(features=features, labels=labels, k=args.knn_k,
levels=1, faiss_gpu=args.faiss_gpu)
g = dataset.gs[0].to(device)
global_labels = labels.copy()
ids = np.arange(g.number_of_nodes())
global_edges = ([], [])
global_edges_len = len(global_edges[0])
global_num_nodes = g.number_of_nodes()
##################
# Model Definition
if not args.use_gt:
feature_dim = g.ndata['features'].shape[1]
model = LANDER(feature_dim=feature_dim, nhid=args.hidden,
num_conv=args.num_conv, dropout=args.dropout,
use_GAT=args.gat, K=args.gat_k,
balance=args.balance,
use_cluster_feat=args.use_cluster_feat,
use_focal_loss=args.use_focal_loss)
model.load_state_dict(torch.load(args.model_filename))
model = model.to(device)
model.eval()
# number of edges added is the indicator for early stopping
num_edges_add_last_level = np.Inf
##################################
# Predict connectivity and density
for level in range(args.levels):
if not args.use_gt:
with torch.no_grad():
g = model(g)
new_pred_labels, peaks,\
global_edges, global_pred_labels, global_peaks = decode(g, args.tau, args.threshold, args.use_gt,
ids, global_edges, global_num_nodes)
ids = ids[peaks]
new_global_edges_len = len(global_edges[0])
num_edges_add_this_level = new_global_edges_len - global_edges_len
if stop_iterating(level, args.levels, args.early_stop, num_edges_add_this_level, num_edges_add_last_level, args.knn_k):
break
global_edges_len = new_global_edges_len
num_edges_add_last_level = num_edges_add_this_level
# build new dataset
features, labels, cluster_features = build_next_level(features, labels, peaks,
global_features, global_pred_labels, global_peaks)
# After the first level, the number of nodes reduce a lot. Using cpu faiss is faster.
dataset = LanderDataset(features=features, labels=labels, k=args.knn_k,
levels=1, faiss_gpu=False, cluster_features = cluster_features)
if len(dataset.gs) == 0:
break
g = dataset.gs[0].to(device)
evaluation(global_pred_labels, global_labels, args.metrics)
|
{"hexsha": "149d58336f468e4f5ce292bab15d1f7cb28f5a23", "size": 4086, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/pytorch/hilander/test.py", "max_stars_repo_name": "ketyi/dgl", "max_stars_repo_head_hexsha": "a1b859c29b63a673c148d13231a49504740e0e01", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9516, "max_stars_repo_stars_event_min_datetime": "2018-12-08T22:11:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T13:04:33.000Z", "max_issues_repo_path": "examples/pytorch/hilander/test.py", "max_issues_repo_name": "ketyi/dgl", "max_issues_repo_head_hexsha": "a1b859c29b63a673c148d13231a49504740e0e01", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2494, "max_issues_repo_issues_event_min_datetime": "2018-12-08T22:43:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:16:27.000Z", "max_forks_repo_path": "examples/pytorch/hilander/test.py", "max_forks_repo_name": "ketyi/dgl", "max_forks_repo_head_hexsha": "a1b859c29b63a673c148d13231a49504740e0e01", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2529, "max_forks_repo_forks_event_min_datetime": "2018-12-08T22:56:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T13:07:41.000Z", "avg_line_length": 38.5471698113, "max_line_length": 123, "alphanum_fraction": 0.684287812, "include": true, "reason": "import numpy", "num_tokens": 907}
|
from typing import Tuple
import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
# Build the actual model to run
class ActorCritic(keras.Model):
def __init__ (self, num_actions, num_hidden_units):
"""
Builds an actor critic network
Args:
num_actions: Number of possible actions the model can take
num_hidden_units: Number of units in the common hidden layer of the neural network
"""
super.__init__()
# Build Layers
self.common = keras.layers.Dense(num_hidden_units, activation='relu')
self.actor = keras.layers.Dense(num_actions)
self.critic = keras.layers.Dense(1)
def call(self, inputs: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]:
"""
Runs the actor critic netwrok
Args:
inputs: The observations from the environment to make an action choice on and calculate the reward for.
"""
x = self.common(inputs)
return self.actor(x), self.critic(x)
# Allow model to step through simulation
def env_step_wrapper(env):
"""
Creates the function that allows the model to interact with a certain environment.
Args:
env: The environment the model will interact with
"""
def env_step(action: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Returns the environments state, reward, and doneness after an action is taken
Args:
action: The action performed on the environment
"""
state, reward, done, _ = env.step(action)
return state.astype(np.float32), np.array(reward, np.int32), np.array(done, np.int32)
return env_step
|
{"hexsha": "5cc1f0cffff7b19b7e088d33b8db56f980b41908", "size": 1718, "ext": "py", "lang": "Python", "max_stars_repo_path": "ActorCriticNetwork/ActorCritic.py", "max_stars_repo_name": "esslushy/actor-critic-ml-practice", "max_stars_repo_head_hexsha": "1a49b8a3ea5cc92f26a290fa7b032c126e00a3f0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ActorCriticNetwork/ActorCritic.py", "max_issues_repo_name": "esslushy/actor-critic-ml-practice", "max_issues_repo_head_hexsha": "1a49b8a3ea5cc92f26a290fa7b032c126e00a3f0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ActorCriticNetwork/ActorCritic.py", "max_forks_repo_name": "esslushy/actor-critic-ml-practice", "max_forks_repo_head_hexsha": "1a49b8a3ea5cc92f26a290fa7b032c126e00a3f0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0612244898, "max_line_length": 115, "alphanum_fraction": 0.6478463329, "include": true, "reason": "import numpy", "num_tokens": 359}
|
line5
help! world
order
line 4
help! world
order
line 3
help! world
order
line 2
help! world
order
line 1
help! world
order
|
{"hexsha": "578a44b6b21c0200af43fa88a0d6d22c4984bef6", "size": 124, "ext": "r", "lang": "R", "max_stars_repo_path": "bin/ed/test/g1.r", "max_stars_repo_name": "lambdaxymox/DragonFlyBSD", "max_stars_repo_head_hexsha": "6379cf2998a4a073c65b12d99e62988a375b4598", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 700, "max_stars_repo_stars_event_min_datetime": "2017-12-07T02:08:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T06:42:56.000Z", "max_issues_repo_path": "text_cmds/ed/test/g1.r", "max_issues_repo_name": "yury/ios_system", "max_issues_repo_head_hexsha": "a56b915c54f42dfb00089a1d5d1217f95b8051aa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 114, "max_issues_repo_issues_event_min_datetime": "2018-01-10T20:12:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T06:55:35.000Z", "max_forks_repo_path": "text_cmds/ed/test/g1.r", "max_forks_repo_name": "yury/ios_system", "max_forks_repo_head_hexsha": "a56b915c54f42dfb00089a1d5d1217f95b8051aa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 134, "max_forks_repo_forks_event_min_datetime": "2018-01-08T08:33:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T11:51:27.000Z", "avg_line_length": 7.75, "max_line_length": 11, "alphanum_fraction": 0.7661290323, "num_tokens": 49}
|
open methanol.xyz
bond 1.55
%hybridize 0 SP2
model GAFF
thermostat ANDERSEN
energy
minimize 0.0001 0.00001 40000 1000 min.xyz
energy
heat 300 50 400 0.01 0.01 0.0001 0.00001 0.3 heat.xyz
temperature
energy
prod 0.0001 0.0001 300 .3 500000 1000 prod.xyz
|
{"hexsha": "efca66b77e0904f3871f6202d7c29bbf3f6072b1", "size": 254, "ext": "r", "lang": "R", "max_stars_repo_path": "examples/methanol/script.r", "max_stars_repo_name": "ThatPerson/MolecularDynamics", "max_stars_repo_head_hexsha": "967a9db20528f1c4806b46445df17464525f4a2e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-03T16:03:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-03T16:03:25.000Z", "max_issues_repo_path": "examples/methanol/script.r", "max_issues_repo_name": "ThatPerson/MolecularDynamics", "max_issues_repo_head_hexsha": "967a9db20528f1c4806b46445df17464525f4a2e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/methanol/script.r", "max_forks_repo_name": "ThatPerson/MolecularDynamics", "max_forks_repo_head_hexsha": "967a9db20528f1c4806b46445df17464525f4a2e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.5384615385, "max_line_length": 53, "alphanum_fraction": 0.7795275591, "num_tokens": 115}
|
import time
import edgeiq
import cv2
import numpy as np
"""
detect objects edges based on thermal detection
"""
def main():
fps = edgeiq.FPS()
try:
with edgeiq.WebcamVideoStream(cam=1) as video_stream, \
edgeiq.Streamer() as streamer:
# Allow Webcam to warm up
time.sleep(2.0)
fps.start()
# loop detection
while True:
frame = video_stream.read()
# HSV
frame_hsv = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)
frame_value = frame_hsv[:, :, 2]
# bilateral filter - edge-preserving image smoothing method
blurredBrightness = cv2.bilateralFilter(frame_value, 9, 150, 150)
# Canny edge detector
thresh = 50
edges = cv2.Canny(blurredBrightness, thresh, thresh*2, L2gradient=True)
# Generate text to display on streamer
text = "Thermal Edge Detector"
streamer.send_data(edges, text)
fps.update()
if streamer.check_exit():
break
finally:
fps.stop()
print("elapsed time: {:.2f}".format(fps.get_elapsed_seconds()))
print("approx. FPS: {:.2f}".format(fps.compute_fps()))
print("Program Ending")
if __name__ == "__main__":
main()
|
{"hexsha": "680fe6a1ae7bd6b37aaf783f6742fa8dc7b80b25", "size": 1404, "ext": "py", "lang": "Python", "max_stars_repo_path": "thermal-edges/app.py", "max_stars_repo_name": "alwaysai/thermal-imaging", "max_stars_repo_head_hexsha": "70a2d0b337c29b762a4dbce19309ab0dfae77a38", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-22T21:24:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-03T07:58:41.000Z", "max_issues_repo_path": "thermal-edges/app.py", "max_issues_repo_name": "alwaysai/thermal-imaging", "max_issues_repo_head_hexsha": "70a2d0b337c29b762a4dbce19309ab0dfae77a38", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-07-14T11:07:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-15T03:06:38.000Z", "max_forks_repo_path": "thermal-edges/app.py", "max_forks_repo_name": "alwaysai/thermal-imaging", "max_forks_repo_head_hexsha": "70a2d0b337c29b762a4dbce19309ab0dfae77a38", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-28T07:34:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-08T00:11:11.000Z", "avg_line_length": 25.0714285714, "max_line_length": 87, "alphanum_fraction": 0.5398860399, "include": true, "reason": "import numpy", "num_tokens": 304}
|
import subprocess
import sys
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
#source activate py36-udify-direct
#python biasCDA/biasCDA/e2e-scripts/step1-stanza-conllu.py
##subprocess.call(["source activate", "py36-udify-direct"])
##install('stanza')
import stanza
#stanza.download('en')
#install('spacy_conll')
from spacy_conll import init_parser
# Initialise English parser, already including the ConllFormatter as a pipeline component.
# Indicate that we want to get the CoNLL headers in the string output.
# `use_gpu` and `verbose` are specific to stanza (and stanfordnlp). These keywords arguments
# are passed onto their Pipeline() initialisation
nlp = init_parser("stanza", "en", parser_opts={"use_gpu": True, "verbose": False}, include_headers=True)
def getCampherConlluStr(line):
"""
conda activate CampherNlp
wget https://github.com/PKSHATechnology-Research/camphr_models/releases/download/0.7.0/en_udify-0.7.tar.gz
pip install en_udify-0.7.tar.gz
pip install en_udify-0.7.tar.gz -b /home/nlpsrv/tempdir
pip install camphr-allennlp
pip install spacy_conll
python
import spacy
from spacy_conll import init_parser
nlp = spacy.load("en_udify")
doc = nlp("The doctor is going home as she is tired.")
print(doc._.conll_str)
"""
def getConlluStr(line):
#print("Line{}: {}".format(count, line.strip()))
myStr = line.strip()
try:
if not "".__eq__(myStr):
# Parse a given string
doc = nlp(myStr)
# Get the CoNLL representation of the whole document, including headers
return doc._.conll_str
# print(conll)
# outputFile.write(conll)
except Exception as ex:
print('*' + myStr + '*')
print(ex)
raise ex
return ''
def getConlluStr2(line):
return 'getConlluStr ' + str(line)
import multiprocessing
from joblib import Parallel, delayed
num_cores = multiprocessing.cpu_count() - 1
language = 'en'
filePath = '/home/nlpsrv/biasCDA/training-data/en_fr/wmt15/news-commentary-v10.fr-en.en'
print(num_cores)
from tqdm import tqdm
# from math import sqrt
# import numpy as np
# myIntList = np.arange(11, 17, 0.5).tolist()
# print(type(myIntList))
# outputLines2 = Parallel(n_jobs=num_cores)(delayed(getConlluStr2)(myIntList[i]) for i in tqdm(range(len(myIntList))))
# print(outputLines2)
# Using readlines()
filePtr = open(filePath, 'r')
Lines = filePtr.readlines()
print(type(Lines))
#inputs = Lines
inputs = Lines[1:5]
inputsLength = len(inputs)
outputLines = []
##outputLines = Parallel(n_jobs=9)(delayed(getConlluStr)(inputs[i]) for i in tqdm(range(inputsLength)))
for inputLine in inputs:
outputLines.append(getConlluStr(inputLine))
outputFile = open(filePath + ".conllu-input.txt", "w")
outputFile.writelines("%s\n" % conllu for conllu in outputLines)
outputFile.close()
print(f'done processing. output filename={outputFile}')
# When running with max parallelism
# /home/nlpsrv/.local/lib/python3.6/site-packages/torch/nn/modules/rnn.py:585: UserWarning: RNN module weights are not part of single contiguous chunk of memory. This means they need to be compacted at every call, possibly greatly increasing memory usage. To compact weights again call flatten_parameters(). (Triggered internally at /pytorch/aten/src/ATen/native/cudnn/RNN.cpp:775.)
# self.num_layers, self.dropout, self.training, self.bidirectional)
# joblib.externals.loky.process_executor._RemoteTraceback:
# """
# Traceback (most recent call last):
# File "/home/nlpsrv/anaconda3/envs/py36-udify-direct/lib/python3.6/site-packages/joblib/externals/loky/process_executor.py", line 404, in _process_worker
# call_item = call_queue.get(block=True, timeout=timeout)
# File "/home/nlpsrv/anaconda3/envs/py36-udify-direct/lib/python3.6/multiprocessing/queues.py", line 113, in get
# return _ForkingPickler.loads(res)
# File "/home/nlpsrv/.local/lib/python3.6/site-packages/torch/storage.py", line 141, in _load_from_bytes
# return torch.load(io.BytesIO(b))
# File "/home/nlpsrv/.local/lib/python3.6/site-packages/torch/serialization.py", line 595, in load
# return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
# File "/home/nlpsrv/.local/lib/python3.6/site-packages/torch/serialization.py", line 774, in _legacy_load
# result = unpickler.load()
# File "/home/nlpsrv/.local/lib/python3.6/site-packages/torch/serialization.py", line 730, in persistent_load
# deserialized_objects[root_key] = restore_location(obj, location)
# File "/home/nlpsrv/.local/lib/python3.6/site-packages/torch/serialization.py", line 175, in default_restore_location
# result = fn(storage, location)
# File "/home/nlpsrv/.local/lib/python3.6/site-packages/torch/serialization.py", line 155, in _cuda_deserialize
# return storage_type(obj.size())
# File "/home/nlpsrv/.local/lib/python3.6/site-packages/torch/cuda/__init__.py", line 462, in _lazy_new
# return super(_CudaBase, cls).__new__(cls, *args, **kwargs)
# RuntimeError: CUDA error: out of memory
# """
# The above exception was the direct cause of the following exception:
# Traceback (most recent call last):
# File "biasCDA/e2e-scripts/step1-stanza-conllu.py", line 73, in <module>
# outputLines = Parallel(n_jobs=num_cores)(delayed(getConlluStr)(inputs[i]) for i in tqdm(range(inputsLength)))
# File "/home/nlpsrv/anaconda3/envs/py36-udify-direct/lib/python3.6/site-packages/joblib/parallel.py", line 1061, in __call__
# self.retrieve()
# File "/home/nlpsrv/anaconda3/envs/py36-udify-direct/lib/python3.6/site-packages/joblib/parallel.py", line 940, in retrieve
# self._output.extend(job.get(timeout=self.timeout))
# File "/home/nlpsrv/anaconda3/envs/py36-udify-direct/lib/python3.6/site-packages/joblib/_parallel_backends.py", line 542, in wrap_future_result
# return future.result(timeout=timeout)
# File "/home/nlpsrv/anaconda3/envs/py36-udify-direct/lib/python3.6/concurrent/futures/_base.py", line 432, in result
# return self.__get_result()
# File "/home/nlpsrv/anaconda3/envs/py36-udify-direct/lib/python3.6/concurrent/futures/_base.py", line 384, in __get_result
# raise self._exception
# joblib.externals.loky.process_executor.BrokenProcessPool: A task has failed to un-serialize. Please ensure that the arguments of the function are all picklable.
################ download file ############################
# '''
# Python 3.6.12 |Anaconda, Inc.| (default, Sep 8 2020, 23:10:56)
# [GCC 7.3.0] on linux
# Type "help", "copyright", "credits" or "license" for more information.
# >>> import stanza
# >>> stanza.download('en')
# Downloading https://raw.githubusercontent.com/stanfordnlp/stanza-resources/master/resources_1.1.0.json: 122kB [00:00, 29.8MB/s]
# 2021-01-12 14:10:41 INFO: Downloading default packages for language: en (English)...
# 2021-01-12 14:10:42 INFO: File exists: /home/nlpsrv/stanza_resources/en/default.zip.
# '''
|
{"hexsha": "72b7c240e8787f14ab68869af19a84f1b52ed3a5", "size": 7076, "ext": "py", "lang": "Python", "max_stars_repo_path": "e2e-scripts/step1-stanza-conllu.py", "max_stars_repo_name": "talktovishal/biasCDA", "max_stars_repo_head_hexsha": "270e4bdda72b12018f9a803b3e5c9e4476990011", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "e2e-scripts/step1-stanza-conllu.py", "max_issues_repo_name": "talktovishal/biasCDA", "max_issues_repo_head_hexsha": "270e4bdda72b12018f9a803b3e5c9e4476990011", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "e2e-scripts/step1-stanza-conllu.py", "max_forks_repo_name": "talktovishal/biasCDA", "max_forks_repo_head_hexsha": "270e4bdda72b12018f9a803b3e5c9e4476990011", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.7848101266, "max_line_length": 383, "alphanum_fraction": 0.7220180893, "include": true, "reason": "import numpy", "num_tokens": 1955}
|
# Copyright 2019 Pascal Audet
#
# This file is part of RfPy.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Module containing the main utility functions used in the `RfPy` scripts
that accompany this package.
"""
# -*- coding: utf-8 -*-
from obspy import UTCDateTime
from numpy import nan, isnan
from obspy.core import Stream, read
def get_orient_options():
"""
Get Options from :class:`~optparse.OptionParser` objects.
This function is used for data processing on-the-fly (requires web connection)
"""
from optparse import OptionParser, OptionGroup
from os.path import exists as exist
from obspy import UTCDateTime
from numpy import nan
parser = OptionParser(
usage="Usage: %prog [options] <station database>",
description="Script used to find orientation of station from "+
"receiver function data ")
# General Settings
parser.add_option(
"--keys",
action="store",
type=str,
dest="stkeys",
default="",
help="Specify a comma separated list of station keys for " +
"which to perform the analysis. These must be " +
"contained within the station database. Partial keys will " +
"be used to match against those in the dictionary. For " +
"instance, providing IU will match with all stations in " +
"the IU network [Default processes all stations in the database]")
parser.add_option(
"-v", "-V", "--verbose",
action="store_true",
dest="verb",
default=False,
help="Specify to increase verbosity.")
parser.add_option(
"-O", "--overwrite",
action="store_true",
dest="ovr",
default=False,
help="Force the overwriting of pre-existing figures. " +
"[Default False]")
PreGroup = OptionGroup(
parser,
title='Pre-processing Settings',
description="Options for pre-processing of receiver function " +
"data before plotting")
PreGroup.add_option(
"--snr",
action="store",
type=float,
dest="snr",
default=-9999.,
help="Specify the SNR threshold for extracting receiver functions. " +
"[Default None]")
PreGroup.add_option(
"--snrh",
action="store",
type=float,
dest="snrh",
default=-9999.,
help="Specify the horizontal component SNR threshold for extracting receiver functions. " +
"[Default None]")
PreGroup.add_option(
"--cc",
action="store",
type=float,
dest="cc",
default=-1.,
help="Specify the CC threshold for extracting receiver functions. " +
"[Default None]")
PreGroup.add_option(
"--no-outlier",
action="store_true",
dest="no_outl",
default=False,
help="Set this option to delete outliers based on the MAD "+
"on the variance. [Default False]")
PreGroup.add_option(
"--bp",
action="store",
type=str,
dest="bp",
default=None,
help="Specify the corner frequencies for the bandpass filter. " +
"[Default no filtering]")
PreGroup.add_option(
"--pws",
action="store_true",
dest="pws",
default=False,
help="Set this option to use phase-weighted stacking during binning "+
" [Default False]")
PreGroup.add_option(
"--nbaz",
action="store",
dest="nbaz",
type=int,
default=72,
help="Specify integer number of back-azimuth bins to consider " +
"(typically 36 or 72). If not None, the plot will show receiver " +
"functions sorted by back-azimuth values. [Default 72]")
PreGroup.add_option(
"--trange",
action="store",
default=None,
type=str,
dest="trange",
help="Specify the time range for decomposition (sec). Negative times "+
"are allowed [Default -1., 1.]")
PreGroup.add_option(
"--boot",
action="store_true",
dest="boot",
default=False,
help="Set this option to calculate bootstrap statistics "+
" [Default False]")
PreGroup.add_option(
"--plot-f",
action="store_true",
dest="plot_f",
default=False,
help="Set this option to plot the function f(phi) "+
"[Default False]")
PreGroup.add_option(
"--plot-comps",
action="store_true",
dest="plot_comps",
default=False,
help="Set this option to plot the misoriented and rotated harmonic "+
"components [Default False]")
parser.add_option_group(PreGroup)
(opts, args) = parser.parse_args()
# Check inputs
if len(args) != 1:
parser.error("Need station database file")
indb = args[0]
if not exist(indb):
parser.error("Input file " + indb + " does not exist")
# create station key list
if len(opts.stkeys) > 0:
opts.stkeys = opts.stkeys.split(',')
if opts.bp is not None:
opts.bp = [float(val) for val in opts.bp.split(',')]
opts.bp = sorted(opts.bp)
if (len(opts.bp)) != 2:
parser.error(
"Error: --bp should contain 2 " +
"comma-separated floats")
if opts.trange is None:
opts.tmin = -1.
opts.tmax = 1.
if opts.trange is not None:
opts.trange = [float(val) for val in opts.trange.split(',')]
opts.trange = sorted(opts.trange)
if (len(opts.trange)) != 2:
parser.error(
"Error: --trange should contain 2 " +
"comma-separated floats")
return (opts, indb)
|
{"hexsha": "44fedf54f7fa7cd9a00d0778620363c6afebb669", "size": 6760, "ext": "py", "lang": "Python", "max_stars_repo_path": "obstools/orient/options.py", "max_stars_repo_name": "wbythewood/OBStools", "max_stars_repo_head_hexsha": "1ce1e39aeb4200bb3120735bef27b77c3d52c956", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "obstools/orient/options.py", "max_issues_repo_name": "wbythewood/OBStools", "max_issues_repo_head_hexsha": "1ce1e39aeb4200bb3120735bef27b77c3d52c956", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "obstools/orient/options.py", "max_forks_repo_name": "wbythewood/OBStools", "max_forks_repo_head_hexsha": "1ce1e39aeb4200bb3120735bef27b77c3d52c956", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3444976077, "max_line_length": 99, "alphanum_fraction": 0.6119822485, "include": true, "reason": "from numpy", "num_tokens": 1502}
|
! @Copyright 2007 Kristjan Haule
module RealBubble
! ###############################
! # Computing real axis Bubble #
! ###############################
IMPLICIT NONE
REAL*8, allocatable :: Ome(:)
REAL*8, allocatable :: chi0r0(:,:,:)
INTEGER :: norb, nOme
CONTAINS
SUBROUTINE RealBubble__Init__()
use LinLogMesh
use Qlist
use greenk
IMPLICIT NONE
nOme=nom-zero_ind+1
allocate( Ome(nOme) )
Ome(:) = om(zero_ind:)
norb=cixdm
! This is the zero frequency value
allocate( chi0r0(nQ,norb,norb) )
END SUBROUTINE RealBubble__Init__
INTEGER FUNCTION find_index(list, element)
INTEGER, dimension(:) :: list
INTEGER, intent(in) :: element
!
INTEGER :: i
do i=1,size(list)
if (list(i).EQ.element) EXIT
enddo
find_index =i
return
END FUNCTION find_index
SUBROUTINE CmpChiQ0_real(fileout)
! This routine actaually computes Bubble on real axis using formula
! P''(Om)_{q,ab} = \sum_k \int_0^{Om} G''(x)_{k,ab} G''(x-Om)_{k-q,ba} /pi
! here G'' = (G - G^+)/(2*i) and is a complex function for off-diagonal components
! We compute P'(Om) by generalized Kramar's-Kronig relation, i.e.,
!
! 1/2[chi(om+i*delta)+chi(om-i*delta)] =-1/pi Principal \int_{-\infty}^{\infty} [chi(x+i*delta)-chi(x-i*delta)]/(2*i)/(om-x)
!
use LinLogMesh
use Qlist
use Klist
use greenk
IMPLICIT NONE
CHARACTER*200, intent(in) :: fileout
!
CHARACTER*10 :: skii
CHARACTER*210:: FNAME
INTEGER,PARAMETER :: CHUNK= 4
INTEGER,PARAMETER :: fh_out= 99
INTEGER :: Q(3)
INTEGER :: iQ, Qi, ik, ikq, iOm, im, iom1, iom2, izero, dOm, level, iomfin, iorb, jorb
REAL*8 :: PI, t1c, t1w, t2c, t2w, time_Q_c, time_Q_w, rf, dsum, dsum2
COMPLEX*16 :: csum, irhok, irhoq, cf
COMPLEX*16 :: ImBubO(norb,norb)
INTEGER, allocatable:: om_idx(:)
COMPLEX*16, allocatable :: ImBub(:,:,:), Bub(:,:,:)
REAL*8, allocatable :: dx(:), Imf(:), Ref(:), OmKK(:)
COMPLEX*16, allocatable :: Imc(:), Rec(:)
COMPLEX*16, PARAMETER :: IMAG = (0.0D0,1.0D0)
PI=ACOS(-1.0D0)
allocate( ImBub(norb,norb,nOme), Bub(norb,norb,nOme) )
allocate( om_idx(nom), dx(nom) )
! Arrays for Kramars-Kronig
allocate( Imf(2*nOme-1), Ref(nOme) )
allocate( Imc(2*nOme-1), Rec(nOme) )
allocate( OmKK(2*nOme-1) )
! Mesh for Kramars-Kronig
do iOm=1,nOme
OmKK(nOme+iOm-1) = Ome(iOm)
OmKK(nOme-iOm+1) = -Ome(iOm)
enddo
time_Q_c=0
time_Q_w=0
do iQ=1,nQ
Q = Qp(:3,iQ)
Qi = k_index(Q)
!print *, 'Qi=', Qi, Q
call cputim(t1c)
call walltim(t1w)
ImBub(:,:,1) = 0
do iOm=2,nOme
level = (iOm-2)/Nd+1 !# which linear mesh should we use?
om_idx(:) = idx(level,:) !# index for the linear mesh on this level
izero= nidx(level)/2+1 !# zero_ind on this level
iomfin = find_index(om_idx(:nidx(level)), zero_ind+iOm-1) ! finds index such that om(om_idx(iomfin))==Ome(iOm)
dOm = iomfin - izero !# om-Om in integer notation is im-dOm
! dx for trapezoid integration
dx(izero)=0.5*(om(om_idx(izero+1))-om(om_idx(izero)))
do im=izero+1,iomfin-1
dx(im) = 0.5*(om(om_idx(im+1))-om(om_idx(im-1)))
enddo
dx(iomfin) = 0.5*(om(om_idx(iomfin))-om(om_idx(iomfin-1)))
ImBubO=0.0
!$OMP PARALLEL DO SHARED(gk,nkp,norb,k_m_q,Qi,om_idx,dx)&
!$OMP& PRIVATE(ik,iorb,jorb,im,ikq,iom1,iom2,csum,irhok,irhoq)&
!$OMP& SCHEDULE(STATIC,CHUNK)&
!$OMP& REDUCTION(+:ImBubO)
do ik=1,nkp
ikq = k_m_q(ik,Qi)
do iorb=1,norb
do jorb=1,norb
csum=0.0
do im=izero,iomfin
iom1 = om_idx(im)
iom2 = om_idx(im-dOm)
irhok=(gk(iom1,iorb,jorb,ik) -conjg(gk(iom1,jorb,iorb,ik)))/2.0
irhoq=(gk(iom2,jorb,iorb,ikq)-conjg(gk(iom2,iorb,jorb,ikq)))/2.0
csum = csum - irhok*irhoq*dx(im) ! ImG_k * ImG_{k+q}
enddo
ImBubO(iorb,jorb) = ImBubO(iorb,jorb) + csum/(nkp*PI)
enddo
enddo
enddo
!$OMP END PARALLEL DO
ImBub(:,:,iOm)=ImBubO(:,:)
enddo
do iorb=1,norb
do jorb=1,norb
dsum=0.
do iOm=1,nOme
dsum = dsum + abs(dimag(ImBub(iorb,jorb,iOm)))
enddo
! Check if we need complex Kramars-Kronig
if (dsum.lt.1e-4) then
! creating a function which is defined for both positive and negative frequencies
Imf(nOme)=0.0
do iOm=2,nOme
Imf(nOme+iOm-1) = dreal(ImBub(iorb,jorb,iOm))
Imf(nOme-iOm+1) = -dreal(ImBub(iorb,jorb,iOm))
enddo
! Perform Kramars-Kronig for the real part
!$OMP PARALLEL DO SHARED(Imf,nOme,OmKK,Ref) PRIVATE(rf,iOm)
do iOm=2,nOme
CALL kramarskronig(rf, Imf, OmKK, nOme+iOm-1, 2*nOme-1)
Ref(iOm) = rf
enddo
!$OMP END PARALLEL DO
!
! zero frequency value done here, because it can not be obtained by subroutine
do iOm=1,nOme-1
Imf(iOm) = Imf(iOm)/OmKK(iOm)
enddo
do iOm=nOme+1,2*nOme-1
Imf(iOm) = Imf(iOm)/OmKK(iOm)
enddo
Imf(nOme)=0.5*(Imf(nOme-1)+Imf(nOme+1))
CALL integrate_trapz(rf, Imf, OmKK, 2*nOme-1)
Ref(1) = rf/PI
! Saving the complex function result into Bub
do iOm=1,nOme
Bub(iorb,jorb,iOm) = Ref(iOm) + IMAG*ImBub(iorb,jorb,iOm)
enddo
else
print *, 'Imaginary Kramars-Kronig', iorb, jorb
Imc(nOme)=0.0
do iOm=2,nOme
Imc(nOme+iOm-1) = ImBub(iorb,jorb,iOm)
Imc(nOme-iOm+1) = -ImBub(iorb,jorb,iOm)
enddo
! Perform Kramars-Kronig for the real part
!$OMP PARALLEL DO SHARED(Imc,nOme,OmKK,Rec) PRIVATE(cf,iOm)
do iOm=2,nOme
CALL complex_kramarskronig(cf, Imc, OmKK, nOme+iOm-1, 2*nOme-1)
Rec(iOm) = cf
enddo
!$OMP END PARALLEL DO
!
! zero frequency value done here, because it can not be obtained by subroutine
do iOm=1,nOme-1
Imc(iOm) = Imc(iOm)/OmKK(iOm)
enddo
do iOm=nOme+1,2*nOme-1
Imc(iOm) = Imc(iOm)/OmKK(iOm)
enddo
Imc(nOme)=0.5*(Imc(nOme-1)+Imc(nOme+1))
CALL complex_integrate_trapz(cf, Imc, OmKK, 2*nOme-1)
Rec(1) = cf/PI
! Saving the result
do iOm=1,nOme
Bub(iorb,jorb,iOm) = Rec(iOm) + IMAG*ImBub(iorb,jorb,iOm)
enddo
endif
enddo
enddo
! Writing out the result
WRITE(skii,fmt='(I5)') (iQ-1)
FNAME=TRIM(fileout)//ADJUSTL(TRIM(skii))
open(fh_out, FILE=FNAME, STATUS='unknown')
do iOm=1,nOme
WRITE(fh_out,'(F12.6)',advance='no') Ome(iOm)
do iorb=1,norb
do jorb=1,norb
WRITE(fh_out,'(F14.6,1x,F14.6,3x)',advance='no') dreal(Bub(iorb,jorb,iOm)),dimag(Bub(iorb,jorb,iOm))
enddo
enddo
WRITE(fh_out,*)
enddo
close(fh_out)
! Timings
call cputim(t2c)
call walltim(t2w)
time_Q_c=time_Q_c+t2c-t1c
time_Q_w=time_Q_w+t2w-t1w
WRITE(*,'(A,I4,A,f10.4,A,f10.4)') 'Q=', iQ, ' time[s]=', t2c-t1c, ' walltime[s]', t2w-t1w
enddo
deallocate( OmKK )
deallocate( Imf, Ref )
deallocate( Imc, Rec )
deallocate( om_idx, dx )
deallocate( ImBub, Bub )
WRITE(*,'(A,f15.4,A,f15.4)') 'Total-time [in min] to calculate Bubble=', time_Q_c/60., ' Total-walltime', time_Q_w/60.
end SUBROUTINE CmpChiQ0_real
SUBROUTINE RealBubble__Destruct__
DEALLOCATE( Ome , chi0r0 )
END SUBROUTINE RealBubble__Destruct__
end module RealBubble
program RealAxisBubble
use LinLogMesh
use Qlist
use Klist
use greenk
use RealBubble
IMPLICIT NONE
CHARACTER*200 :: filemesh, filegkr, fbubbleReal, fileQlist, fileKlist, fileout
INTEGER :: nargs, j, iom
INTEGER,PARAMETER :: fhi = 995
CHARACTER*100, allocatable :: argv(:) ! Command-line arguments
nargs = iargc()
if (nargs.LT.1) then
print *, 'Missing the input file to proceed. Create input file with the following lines'
print *, 'Ba122.klist # filename with k-list'
print *, 'Qlist.dat # filename with Qlist'
print *, 'rmesh.dat # real axis mesh'
print *, 'G_k1r_ # file with real axis k-dependent Grens function'
print *, 'G_local1r_ # file with real axis local Grens function'
print *, 'chi0_real. # name of the output Bubble on real axis'
call exit(1)
endif
ALLOCATE (argv(nargs)) ! wouldn't like to parse
WRITE(*,'(A,I2)') 'nargs=', nargs
DO j=1,nargs
CALL getarg(j, argv(j))
WRITE(*,'(A,A)') 'argi=', TRIM(argv(j))
ENDDO
open(fhi, FILE=argv(1),status='old', ERR=900, form='formatted')
READ(fhi,*) fileKlist ! k-list filename, Ba122.klist
READ(fhi,*) fileQlist ! Q-list
READ(fhi,*) filemesh ! real axis mesh
READ(fhi,*) filegkr ! Green's function G_k on real axis
READ(fhi,*)
READ(fhi,*) fileout ! output filename, for example 'fchiQ.'
close(fhi)
! Reading real axis mesh
CALL LinLogMesh__Init__(filemesh)
! Generates real axis mesh
CALL LinLogMesh_Generate()
! Reading Q list
CALL Qlist__Init__(fileQlist)
CALL Klist__Init__(fileKlist)
CALL greenk__Init__(filegkr)
if ( sum(abs(om-oml)).gt.1e-5 ) then
print *, 'Mesh in greens function and real-mesh are not compatible. Diff=', sum(abs(om-oml))
do iom=1,noml
print *, iom, om(iom), oml(iom)
enddo
endif
CALL RealBubble__Init__()
CALL CmpChiQ0_real(fileout)
CALL RealBubble__Destruct__()
CALL greenk__Destruct__()
CALL LinLogMesh__Destruct__()
CALL Qlist__Destruct__()
CALL Klist__Destruct__()
deallocate( argv )
STOP
900 print *, 'ERROR opening ',TRIM(argv(1)),' file'
end program RealAxisBubble
|
{"hexsha": "1747b03d5fff52f729fe7729b468684b472c2272", "size": 10881, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/magnetSusc/dmft_bubble/dmft_real_bubble.f90", "max_stars_repo_name": "dmft-wien2k/dmft-wien2k-v2", "max_stars_repo_head_hexsha": "83481be27e8a9ff14b9635d6cc1cd9d96f053487", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-05-13T13:04:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T10:08:09.000Z", "max_issues_repo_path": "src/magnetSusc/dmft_bubble/dmft_real_bubble.f90", "max_issues_repo_name": "dmft-wien2k/dmft-wien2k-v2", "max_issues_repo_head_hexsha": "83481be27e8a9ff14b9635d6cc1cd9d96f053487", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-07-12T21:37:53.000Z", "max_issues_repo_issues_event_max_datetime": "2016-07-12T21:42:01.000Z", "max_forks_repo_path": "src/magnetSusc/dmft_bubble/dmft_real_bubble.f90", "max_forks_repo_name": "dmft-wien2k/dmft-wien2k", "max_forks_repo_head_hexsha": "83481be27e8a9ff14b9635d6cc1cd9d96f053487", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2016-07-22T15:46:56.000Z", "max_forks_repo_forks_event_max_datetime": "2016-08-02T15:05:12.000Z", "avg_line_length": 34.9871382637, "max_line_length": 158, "alphanum_fraction": 0.5395643783, "num_tokens": 3617}
|
import os
import random
import numpy as np
import torch
from einops import repeat
def expand_to_batch(tensor, desired_size):
tile = desired_size // tensor.shape[0]
return repeat(tensor, 'b ... -> (b tile) ...', tile=tile)
def init_random_seed(seed, gpu=False):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
if gpu:
torch.backends.cudnn.deterministic = True
|
{"hexsha": "ff294275fdb68b6eba02e56a43280ed7fefa1da6", "size": 526, "ext": "py", "lang": "Python", "max_stars_repo_path": "self_attention_cv/common.py", "max_stars_repo_name": "MooseMouse/self-attention-cv", "max_stars_repo_head_hexsha": "867e7b1f08bf838c29770e43b7746b2a945d75e3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-30T11:07:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-30T11:07:47.000Z", "max_issues_repo_path": "self_attention_cv/common.py", "max_issues_repo_name": "MooseMouse/self-attention-cv", "max_issues_repo_head_hexsha": "867e7b1f08bf838c29770e43b7746b2a945d75e3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "self_attention_cv/common.py", "max_forks_repo_name": "MooseMouse/self-attention-cv", "max_forks_repo_head_hexsha": "867e7b1f08bf838c29770e43b7746b2a945d75e3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.8695652174, "max_line_length": 61, "alphanum_fraction": 0.6977186312, "include": true, "reason": "import numpy", "num_tokens": 129}
|
module libnpcf
use iso_c_binding
private
public :: npcf
include "ganpcf_cdef.f90"
type npcf
private
type(c_ptr) :: ptr
contains
#ifdef __GNUC__
procedure :: delete => delete_npcf_polymorph
#else
final :: delete_npcf
#endif
procedure :: getShells => get_shells
procedure :: getNumTriangles => get_num_triangles
procedure :: getTriangles => get_triangles
procedure :: setNumParticles => set_num_particles
procedure :: get2ptSize => get_2pt_size
procedure :: get3ptSize => get_3pt_size
procedure :: calculateCorrelations => calculate_correlations
procedure :: calculate2pt => calculate_2pt
procedure :: get2pt => get_2pt
procedure :: get3pt => get_3pt
end type
interface npcf
procedure create_npcf
end interface
contains
function create_npcf(timesRans, numShells, volBox, rMin, rMax)
implicit none
type(npcf) :: create_npcf
integer, intent(in) :: timesRans
integer, intent(in) :: numShells
double precision, intent(in) :: volBox
double precision, intent(in) :: rMin
double precision, intent(in) :: rMax
create_npcf%ptr = create_npcf_c(timesRans, numShells, volBox, rMin, rMax)
end function
subroutine delete_npcf(this)
implicit none
type(npcf) :: this
call delete_npcf_c(this%ptr)
end subroutine
subroutine delete_npcf_polymorph(this)
implicit none
class(npcf) :: this
call delete_npcf_c(this%ptr)
end subroutine
integer function get_shells(this, shells)
implicit none
class(npcf) :: this
double precision, dimension(:) :: shells
get_shells = get_shells_c(this%ptr, shells)
end function
integer function get_num_triangles(this)
implicit none
class(npcf) :: this
get_num_triangles = get_num_triangles_c(this%ptr)
end function
integer function get_triangles(this, tris)
implicit none
class(npcf) :: this
type(float3), dimension(:) :: tris
get_triangles = get_triangles_c(this%ptr, tris)
end function
integer function set_num_particles(this, numParts)
implicit none
class(npcf) :: this
integer, intent(in) :: numParts
set_num_particles = set_num_particles_c(this%ptr, numParts)
end function
integer function get_2pt_size(this)
implicit none
class(npcf) :: this
get_2pt_size = get_2pt_size_c(this%ptr)
end function
integer function get_3pt_size(this)
implicit none
class(npcf) :: this
get_3pt_size = get_3pt_size_c(this%ptr)
end function
integer function calculate_correlations(this, galaxies)
implicit none
class(npcf) :: this
type(float3), dimension(:) :: galaxies
calculate_correlations = calculate_correlations_c(this%ptr, galaxies)
end function
integer function calculate_2pt(this, galaxies)
implicit none
class(npcf) :: this
type(float3), dimension(:) :: galaxies
calculate_2pt = calculate_2pt_c(this%ptr, galaxies)
end function
integer function get_2pt(this, twoPoint)
implicit none
class(npcf) :: this
double precision, dimension(:) :: twoPoint
get_2pt = get_2pt_c(this%ptr, twoPoint)
end function
integer function get_3pt(this, threePoint)
implicit none
class(npcf) :: this
double precision, dimension(:) :: threePoint
get_3pt = get_3pt_c(this%ptr, threePoint)
end function
end module
|
{"hexsha": "9620e454741d61c0a4bb71b6799ff8904a807663", "size": 4131, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "source/ganpcf_mod.f90", "max_stars_repo_name": "dpearson1983/ganpcf", "max_stars_repo_head_hexsha": "d75fddfb094045a81916ffed10fec19d96b6d52e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-04T06:19:23.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-04T06:19:23.000Z", "max_issues_repo_path": "source/ganpcf_mod.f90", "max_issues_repo_name": "dpearson1983/ganpcf", "max_issues_repo_head_hexsha": "d75fddfb094045a81916ffed10fec19d96b6d52e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/ganpcf_mod.f90", "max_forks_repo_name": "dpearson1983/ganpcf", "max_forks_repo_head_hexsha": "d75fddfb094045a81916ffed10fec19d96b6d52e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-10-30T22:45:42.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-05T17:36:58.000Z", "avg_line_length": 32.2734375, "max_line_length": 85, "alphanum_fraction": 0.5754054708, "num_tokens": 919}
|
/*
* ping_pong_fiber_test.cpp
*
* Created on: Mar 25, 2017
* Author: zmij
*/
#ifndef WITH_BOOST_FIBERS
#define WITH_BOOST_FIBERS
#endif
#include <gtest/gtest.h>
#include <test/ping_pong.hpp>
#include <wire/core/connector.hpp>
#include <wire/core/connection.hpp>
#include "sparring/sparring_test.hpp"
#include <boost/fiber/all.hpp>
#include <pushkin/asio/fiber/shared_work.hpp>
#include <thread>
#include <vector>
namespace wire {
namespace test {
namespace {
const char* const alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
class thread_catalogue {
private:
std::map<std::thread::id, std::string> names_{};
const char* next_{ alpha };
std::mutex mtx_{};
public:
thread_catalogue() = default;
std::string lookup() {
std::unique_lock<std::mutex> lk( mtx_);
auto this_id( std::this_thread::get_id() );
auto found = names_.find( this_id );
if ( found != names_.end() ) {
return found->second;
}
BOOST_ASSERT( *next_);
std::string name(1, *next_++ );
names_[ this_id ] = name;
return name;
}
};
thread_catalogue thread_names;
class fiber_catalogue {
private:
std::map<boost::fibers::fiber::id, std::string> names_{};
unsigned next_{ 0 };
boost::fibers::mutex mtx_{};
public:
fiber_catalogue() = default;
std::string lookup() {
std::unique_lock<boost::fibers::mutex> lk( mtx_);
auto this_id( boost::this_fiber::get_id() );
auto found = names_.find( this_id );
if ( found != names_.end() ) {
return found->second;
}
std::ostringstream out;
// Bake into the fiber's name the thread name on which we first
// lookup() its ID, to be able to spot when a fiber hops between
// threads.
out << thread_names.lookup() << next_++;
std::string name( out.str() );
names_[ this_id ] = name;
return name;
}
};
fiber_catalogue fiber_names;
::std::ostream&
tag(::std::ostream& out)
{
using ::boost::fibers::context;
using context_type = ::boost::fibers::type;
::std::ostream::sentry s{out};
if (s) {
out << thread_names.lookup() << ":"
<< ::std::setw(4) << fiber_names.lookup() << " "
<< ::std::this_thread::get_id() << " ";
// if (context::active()->is_context(context_type::dispatcher_context)) {
// out << "disp ";
// } else if (context::active()->is_context(context_type::main_context)) {
// out << "main ";
// } else {
// out << "work ";
// }
}
return out;
}
} /* namespace */
namespace this_fiber = ::boost::this_fiber;
class FiberPingPong : public wire::test::sparring::SparringTest {
protected:
void
SetUp() override
{
connector_ = core::connector::create_connector(io_svc);
runner_ = ::psst::asio::fiber::use_shared_work_algorithm( io_svc );
StartPartner();
}
void
SetupArgs(args_type& args) override
{
//args.push_back("--log=ping-pong-test.log");
}
void
ReadSparringOutput(::std::istream& is) override
{
::std::string proxy_str;
::std::getline(is, proxy_str);
prx_ = connector_->string_to_proxy(proxy_str);
::std::cerr << "Sparring proxy object is " << *prx_ << "\n";
}
core::connector_ptr connector_;
core::object_prx prx_;
::psst::asio::fiber::runner_ptr runner_;
};
void
ping_fiber(core::object_prx prx)
{
::std::cerr << "ping_fiber start.\n";
EXPECT_NO_THROW(prx->wire_ping());
::std::cerr << "ping_fiber done.\n";
}
void
checked_cast_fiber(core::object_prx prx)
{
::test::ping_pong_prx pp_prx;
EXPECT_NO_THROW( pp_prx = core::checked_cast< ::test::ping_pong_proxy >(prx) );
EXPECT_TRUE(pp_prx.get());
::std::cerr << "checked cast fiber done.\n";
}
TEST_F(FiberPingPong, SyncPing)
{
using ::boost::fibers::fiber;
ASSERT_NE(0, child_.pid);
ASSERT_TRUE(connector_.get());
ASSERT_TRUE(prx_.get());
const auto fiber_cnt = 1;
const auto thread_cnt = 2;
::std::atomic<int> finish_cnt{0};
auto test_f = [&](::boost::fibers::barrier& barrier){
try {
for (auto i = 0; i < 1000; ++i) {
tag(::std::cerr) << " Fiber start " << ::boost::this_fiber::get_id() << "\n";
tag(::std::cerr) << "Start sync get connection\n";
auto conn = prx_->wire_get_connection();
tag(::std::cerr) << "End sync get connection\n";
EXPECT_TRUE(conn.get());
}
tag(::std::cerr) << "Start ping the proxy\n";
EXPECT_NO_THROW(prx_->wire_ping());
tag(::std::cerr) << "End ping the proxy\n";
::test::ping_pong_prx pp_prx;
tag(::std::cerr) << "Start checked cast\n";
EXPECT_NO_THROW( pp_prx = core::checked_cast< ::test::ping_pong_proxy >(prx_) );
tag(::std::cerr) << "End checked cast\n";
EXPECT_TRUE(pp_prx.get());
tag(::std::cerr) << "Start test int\n";
EXPECT_EQ(42, pp_prx->test_int(42));
tag(::std::cerr) << "End test int\n";
} catch (::std::exception const& e) {
tag(std::cerr) << "Exception while running test " << e.what() << "\n";
}
tag(::std::cerr) << "Wait barrier " << ++finish_cnt << "\n";
if (barrier.wait()) {
// tag(::std::cerr) << "Sleep for a while\n";
// ::boost::this_fiber::sleep_for(::std::chrono::milliseconds{5});
tag(::std::cerr) << "Stop the io service\n";
io_svc->stop();
}
tag(::std::cerr) << " Fiber exit " << ::boost::this_fiber::get_id() << "\n";
};
::std::vector<::std::thread> threads;
threads.reserve(thread_cnt);
boost::fibers::barrier b(fiber_cnt * thread_cnt);
for (auto i = 0; i < thread_cnt; ++i) {
threads.emplace_back(
[&](){
tag(::std::cerr) << " Thread start\n";
auto runner = ::psst::asio::fiber::use_shared_work_algorithm( io_svc );
for (auto i = 0; i < fiber_cnt; ++i) {
fiber{ test_f, ::std::ref(b) }.detach();
}
runner->run();
tag(::std::cerr) << " Thread exit\n";
});
}
// ::std::cerr << "Run the io svc\n";
// io_svc->run();
for (auto& t : threads) {
t.join();
}
}
//TEST_F(FiberPingPong, CheckedCast)
//{
// using boost::fibers::fiber;
// ASSERT_NE(0, child_.pid);
// ASSERT_TRUE(connector_.get());
// ASSERT_TRUE(prx_.get());
//
// fiber f1{ checked_cast_fiber, prx_ };
// fiber f2{ checked_cast_fiber, prx_ };
// f1.join();
// f2.join();
//}
} /* namespace test */
} /* namespace wire */
|
{"hexsha": "371f9a9f07ebf1d224a6a73a464b17c7a5b118f9", "size": 6868, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/connector/ping_pong_fiber_test.cpp", "max_stars_repo_name": "zmij/wire", "max_stars_repo_head_hexsha": "9981eb9ea182fc49ef7243eed26b9d37be70a395", "max_stars_repo_licenses": ["Artistic-2.0"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2016-04-07T19:49:39.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-03T05:24:11.000Z", "max_issues_repo_path": "test/connector/ping_pong_fiber_test.cpp", "max_issues_repo_name": "zmij/wire", "max_issues_repo_head_hexsha": "9981eb9ea182fc49ef7243eed26b9d37be70a395", "max_issues_repo_licenses": ["Artistic-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/connector/ping_pong_fiber_test.cpp", "max_forks_repo_name": "zmij/wire", "max_forks_repo_head_hexsha": "9981eb9ea182fc49ef7243eed26b9d37be70a395", "max_forks_repo_licenses": ["Artistic-2.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-12-27T11:47:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-27T11:47:31.000Z", "avg_line_length": 27.253968254, "max_line_length": 93, "alphanum_fraction": 0.5547466511, "num_tokens": 1820}
|
import tensorflow as tf
import numpy as np
import os
from PIL import Image
filename = 'model.pb'
labels_filename = 'labels.txt'
graph_def = tf.GraphDef()
labels = []
# Import the TF graph
with tf.gfile.FastGFile(filename, 'rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Create a list of labels.
with open(labels_filename, 'rt') as lf:
for l in lf:
labels.append(l.strip())
# Load from a file
imageFile = "./images/Test/test_image.jpg"
image = Image.open(imageFile)
# Resize
image = image.resize((224, 224), resample=Image.BILINEAR)
# Convert to numpy array - tensor
image_tensor = np.asarray(image)
# Convert RGB -> BGR
r,g,b = np.array(image_tensor).T
image_tensor = np.array([b,g,r]).transpose()
print("Numpy array mode=BGR shape={}".format(image_tensor.shape))
# These names are part of the model and cannot be changed.
output_layer = 'loss:0'
input_node = 'Placeholder:0'
with tf.Session() as sess:
prob_tensor = sess.graph.get_tensor_by_name(output_layer)
predictions, = sess.run(prob_tensor, {input_node: [image_tensor] })
print(predictions)
# Print the highest probability label
highest_probability_index = np.argmax(predictions)
print('Classified as: ' + labels[highest_probability_index])
# Or you can print out all of the results mapping labels to probabilities.
label_index = 0
for p in predictions:
truncated_probablity = np.float64(np.round(p,4))
print(labels[label_index], truncated_probablity)
label_index += 1
|
{"hexsha": "e0e42e26904a9aacb1ec0b0994ba0b2d7c2222fd", "size": 1527, "ext": "py", "lang": "Python", "max_stars_repo_path": "Lab-02/source/inference.py", "max_stars_repo_name": "alexandergg/Cognitive-Services-AI-Labs", "max_stars_repo_head_hexsha": "e1e0aef5e9a4ad43144f2ba8003d29d587f6bf4c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Lab-02/source/inference.py", "max_issues_repo_name": "alexandergg/Cognitive-Services-AI-Labs", "max_issues_repo_head_hexsha": "e1e0aef5e9a4ad43144f2ba8003d29d587f6bf4c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-11-13T18:38:28.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:34:12.000Z", "max_forks_repo_path": "Lab-02/source/inference.py", "max_forks_repo_name": "alexandergg/Cognitive-Services-AI-Labs", "max_forks_repo_head_hexsha": "e1e0aef5e9a4ad43144f2ba8003d29d587f6bf4c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3275862069, "max_line_length": 74, "alphanum_fraction": 0.7308447937, "include": true, "reason": "import numpy", "num_tokens": 375}
|
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
import numpy as np
import typing
def algae(relative_humidity: typing.List[float], temperature: typing.List[float], material_name, porosity, roughness,
total_pore_area):
"""
UNIVPM Algae Model
Currently a dummy function!
:param relative_humidity
:param temperature
:param material_name: dictionary with relevant properties and their values
:return growth: list with eval. values
"""
def extract_material_type(material: str):
materials = {
"BrickBernhard": "brick",
"BrickJoens": "brick",
"HistoricalBrickClusterEdge": "brick",
"HistoricalBrickCluster4DD": "brick",
"HistoricalBrickCluster": "brick",
"WienerbergerNormalBrick": "brick",
"AltbauziegelDresdenZQ": "brick",
"AltbauziegelDresdenZA": "brick",
"AltbauziegelDresdenZC": "brick",
"AltbauziegelDresdenZD": "brick",
"AltbauziegelDresdenZE": "brick",
"AltbauziegelDresdenZF": "brick",
"AltbauziegelDresdenZG": "brick",
"AltbauziegelDresdenZH": "brick",
"AltbauziegelDresdenZI": "brick",
"AltbauziegelDresdenZJ": "brick",
"AltbauziegelDresdenZK": "brick",
"AltbauziegelDresdenZL": "brick",
"AltbauziegelDresdenZM": "brick",
"AltbauziegelDresdenZN": "brick",
"AltbauziegelDresdenZO": "brick",
"AltbauziegelElbphilharmonie": "brick",
"WienerbergerHochlochBrick": "brick",
"BrickWienerberger": "brick",
"CeramicBrick": "brick",
"AltbauklinkerHamburgHolstenkamp": "brick",
"AltbauziegelAmWeinbergBerlin": "brick",
"AltbauziegelAmWeinbergBerlininside": "brick",
"AltbauziegelAussenziegelII": "brick",
"AltbauziegelBolonga3enCult": "brick",
"AltbauziegelDresdenZb": "brick",
"AltbauziegelPersiusspeicher": "brick",
"AltbauziegelReithallePotsdamAussenziegel1": "brick",
"AltbauziegelReithallePotsdamAussenziegel2": "brick",
"AltbauziegelReithallePotsdamAussenziegel3": "brick",
"AltbauziegelRoteKasernePotsdamAussenziegel1": "brick",
"AltbauziegelRoteKasernePotsdamAussenziegel2": "brick",
"AltbauziegelRoteKasernePotsdamInnenziegel1": "brick",
"AltbauziegelRoteKasernePotsdamInnenziegel2": "brick",
"AltbauziegelSchlossGueterfeldeEGAussenwand1": "brick",
"AltbauziegelSchlossGueterfeldeEGAussenwand2": "brick",
"AltbauziegelTivoliBerlinAussenziegel1": "brick",
"AltbauziegelTivoliBerlinAussenziegel2": "brick",
"AltbauziegelTivoliBerlinInnenziegel": "brick",
"AltbauziegelUSHauptquartierBerlin": "brick",
"ZiegelSchlagmannVollziegel": "brick",
"ZiegelSchlagmannWDZZiegelhuelle": "brick",
"Brick": "brick",
"LehmbausteinUngebrannt": "brick",
"DTUBrick": "brick",
"LimeSandBrickIndustrial": "brick",
"LimeSandBrickTraditional": "brick",
"SandstoneCotta": "sandstone",
"SandstonePosta": "sandstone",
"SandstoneReinhardsdorf": "sandstone",
"WeatheredGranite": "sandstone",
"BundsandsteinrotHessen": "sandstone",
"CarraraMamor": "sandstone",
"KrensheimerMuschelkalk": "sandstone",
"SandsteinBadBentheim": "sandstone",
"SandsteinHildesheim": "sandstone",
"SandstoneIndiaNewSaInN": "sandstone",
"SandsteinMuehlleiteeisenhaltigeBank": "sandstone",
"SandsteinRuethen": "sandstone",
"SandsteinVelbke": "sandstone",
"Tuffstein": "other",
"TuffsteinJapan": "other",
"limesandstone": "sandstone",
"LimeSandBrick": "limestone",
"XellaKalksandstein": "sandstone",
"KalksandsteinXellaYtong2002": "other",
"KalksandsteinXellaYtong2004": "other",
"BundsandsteinIndienHumayunVerwittert": "sandstone",
"CarraraMamorSkluptur": "sandstone",
"SandstoneArholzen": "sandstone",
"SandstoneKarlshafener": "sandstone",
"SandstoneKrenzheimer": "sandstone",
"SandstoneMonteMerlo": "sandstone",
"SandstoneOberkirchner": "sandstone",
"SandstoneSander": "sandstone",
"SandstoneSchleerither": "sandstone",
"LimeSandbrick": "limestone",
"Lime Cement Plaster Light": "limestone",
"Lime Cement Mortar(High Cement Ratio)": "sandstone",
"Lime Cement Mortar(Low Cement Ratio)": "limestone",
"LimeCementMortar": "limestone",
"DTUMortar": "sandstone",
"LimePlasterHist": "limestone"
}
return materials[material]
def material_parameters(material_name):
material_type = extract_material_type(material_name)
default_parameters = {"alfa": 1, "beta": 1, "gamma": 1, "deltaA": 1, "etaA": 1, "lambdaA": 1, "muA": 1,
"deltaK": 1, "etaK": 1, "lambdaK": 1, "muK": 1}
if material_type == 'sandstone':
default_parameters.update({'alfa': 2, "beta": 1.724, "gamma": 0.2})
elif material_type == 'limestone':
default_parameters.update({'alfa': 100, "beta": 6.897, "gamma": 1.6})
return default_parameters
def create_a_parameters(porosity, roughness, material_parameters):
A1 = 3.8447E-4
A2 = -4.0800E-6
A3 = -2.1164E-4
B1 = -2.7874E-2
B2 = 2.95905E-4
B3 = 1.1856E-2
C1 = 5.5270E-1
C2 = -5.8670E-3
C3 = -1.4727E-1
D1 = -2.1146
D2 = 2.2450E-2
D3 = 4.7041E-1
ra = material_parameters['deltaA'] * (A1 * porosity + A2 * roughness + A3)
sa = material_parameters['etaA'] * (B1 * porosity + B2 * roughness + B3)
ua = material_parameters['lambdaA'] * (C1 * porosity + C2 * roughness + C3)
va = material_parameters['muA'] * (D1 * porosity + D2 * roughness + D3)
return ra, sa, ua, va
def create_k_parameters(porosity, roughness, material_parameters):
E1 = 8.3270E-5
E2 = 6.7E-7
E3 = -1.8459E-4
F1 = -6.0378E-3
F2 = -4.88E-5
F3 = 9.877E-3
G1 = 1.1971E-1
G2 = 9.69E-4
G3 = -1.0759E-1
H1 = -4.5803E-1
H2 = -3.71E-3
H3 = 3.1809E-1
rk = material_parameters['deltaK'] * (E1 * porosity + E2 * roughness + E3)
sk = material_parameters['etaK'] * (F1 * porosity + F2 * roughness + F3)
uk = material_parameters['lambdaK'] * (G1 * porosity + G2 * roughness + G3)
vk = material_parameters['muK'] * (H1 * porosity + H2 * roughness + H3)
return rk, sk, uk, vk
def initial_t(roughness, gamma):
if roughness == 5.02:
return 30
else:
return 24 * gamma * (5 / ((roughness - 5.02) ** 2))
def create_ac_at(alfa, porosity, roughness):
ac_at = (1 - np.exp(-alfa * (2.48 * porosity + 0.126 * roughness) ** 4))
if ac_at < 0:
ac_at = 0
elif ac_at > 1:
ac_at = 1
return ac_at
def create_k_rate_coefficient(beta, porosity, total_pore_area):
k_rate_coefficient = (1 - np.exp(-beta * ((4.49e-3 * (porosity * total_pore_area) - 5.79e-3) / 2.09) ** 2))
k_rate_coefficient = np.max([0.0, k_rate_coefficient])
return k_rate_coefficient
def tau_a_func(temp, ra, sa, ua, va):
tau_a = ra * temp ** 3 + sa * temp ** 2 + ua * temp + va
return float(np.clip(tau_a, 0, 1))
def tau_k_func(temp, rk, sk, uk, vk):
tau_k = rk * temp ** 3 + sk * temp ** 2 + uk * temp + vk
return float(np.clip(tau_k, 0, 1))
def favourable_growth_conditions(rh, temp, time, t1):
if rh >= 0.98 and 5 < temp < 40 and time > t1:
return True
else:
return False
material = material_parameters(material_name)
rk, sk, uk, vk = create_k_parameters(porosity, roughness, material)
ra, sa, ua, va = create_a_parameters(porosity, roughness, material)
t1 = initial_t(roughness, material['gamma'])
ac_at = create_ac_at(material['alfa'], porosity, roughness)
k_rate_coefficient = create_k_rate_coefficient(material['beta'], porosity, total_pore_area)
covered_area = [0, ]
for time in range(len(temperature)):
temp = temperature[time]
try:
rh = relative_humidity[time]
except IndexError:
break
if favourable_growth_conditions(rh, temp, time, t1):
tau_a = tau_a_func(temp, ra, sa, ua, va)
tau_k = tau_k_func(temp, rk, sk, uk, vk)
if covered_area[-1] < tau_a * ac_at:
delta_t = (-(1 / (tau_k * k_rate_coefficient)) * np.log(1 - (covered_area[-1] / (tau_a * ac_at)))) ** (
1 / 4) - (time - 1 - t1)
covered_area.append(
tau_a * ac_at * (1 - np.exp(-tau_k * k_rate_coefficient * (time + delta_t - t1) ** 4)))
else:
covered_area.append(covered_area[-1])
else:
covered_area.append(covered_area[-1])
return covered_area
|
{"hexsha": "593a19f940fc0de9ce0bf0c512b23cfc156370cd", "size": 9472, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_process/algae_script/algae_model.py", "max_stars_repo_name": "ribuild/delphin_6_automation", "max_stars_repo_head_hexsha": "12024381fc1042b46314c55d88b6349229ea33b7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-11-08T18:37:36.000Z", "max_stars_repo_stars_event_max_datetime": "2018-01-09T12:10:58.000Z", "max_issues_repo_path": "data_process/algae_script/algae_model.py", "max_issues_repo_name": "ribuild/delphin_6_automation", "max_issues_repo_head_hexsha": "12024381fc1042b46314c55d88b6349229ea33b7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 111, "max_issues_repo_issues_event_min_datetime": "2018-02-26T08:25:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-31T19:17:19.000Z", "max_forks_repo_path": "data_process/algae_script/algae_model.py", "max_forks_repo_name": "thp44/delphin_6_automation", "max_forks_repo_head_hexsha": "12024381fc1042b46314c55d88b6349229ea33b7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-11-06T10:01:25.000Z", "max_forks_repo_forks_event_max_datetime": "2018-02-14T09:45:28.000Z", "avg_line_length": 39.9662447257, "max_line_length": 119, "alphanum_fraction": 0.5812922297, "include": true, "reason": "import numpy", "num_tokens": 2885}
|
// (C) Copyright 2008 CodeRage, LLC (turkanis at coderage dot com)
// (C) Copyright 2004-2007 Jonathan Turkanis
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt.)
// See http://www.boost.org/libs/iostreams for documentation.
#ifndef BOOST_IOSTREAMS_ARRAY_HPP_INCLUDED
#define BOOST_IOSTREAMS_ARRAY_HPP_INCLUDED
#if defined(_MSC_VER)
# pragma once
#endif
#include <boost/config.hpp> // BOOST_MSVC, make sure size_t is in std.
#include <boost/detail/workaround.hpp>
#include <cstddef> // std::size_t.
#include <utility> // pair.
#include <boost/iostreams/categories.hpp>
#include <boost/preprocessor/cat.hpp>
#include <boost/static_assert.hpp>
#include <boost/type_traits/is_convertible.hpp>
#include <boost/type_traits/is_same.hpp>
namespace boost { namespace iostreams {
namespace detail {
template<typename Mode, typename Ch>
class array_adapter {
public:
typedef Ch char_type;
typedef std::pair<char_type*, char_type*> pair_type;
struct category
: public Mode,
public device_tag,
public direct_tag
{ };
array_adapter(char_type* begin, char_type* end);
array_adapter(char_type* begin, std::size_t length);
array_adapter(const char_type* begin, const char_type* end);
array_adapter(const char_type* begin, std::size_t length);
template<int N>
array_adapter(char_type (&ar)[N])
: begin_(ar), end_(ar + N)
{ }
pair_type input_sequence();
pair_type output_sequence();
private:
char_type* begin_;
char_type* end_;
};
} // End namespace detail.
#define BOOST_IOSTREAMS_ARRAY(name, mode) \
template<typename Ch> \
struct BOOST_PP_CAT(basic_, name) : detail::array_adapter<mode, Ch> { \
private: \
typedef detail::array_adapter<mode, Ch> base_type; \
public: \
typedef typename base_type::char_type char_type; \
typedef typename base_type::category category; \
BOOST_PP_CAT(basic_, name)(char_type* begin, char_type* end) \
: base_type(begin, end) { } \
BOOST_PP_CAT(basic_, name)(char_type* begin, std::size_t length) \
: base_type(begin, length) { } \
BOOST_PP_CAT(basic_, name)(const char_type* begin, const char_type* end) \
: base_type(begin, end) { } \
BOOST_PP_CAT(basic_, name)(const char_type* begin, std::size_t length) \
: base_type(begin, length) { } \
template<int N> \
BOOST_PP_CAT(basic_, name)(Ch (&ar)[N]) \
: base_type(ar) { } \
}; \
typedef BOOST_PP_CAT(basic_, name)<char> name; \
typedef BOOST_PP_CAT(basic_, name)<wchar_t> BOOST_PP_CAT(w, name); \
/**/
BOOST_IOSTREAMS_ARRAY(array_source, input_seekable)
BOOST_IOSTREAMS_ARRAY(array_sink, output_seekable)
BOOST_IOSTREAMS_ARRAY(array, seekable)
#undef BOOST_IOSTREAMS_ARRAY
//------------------Implementation of array_adapter---------------------------//
namespace detail {
template<typename Mode, typename Ch>
array_adapter<Mode, Ch>::array_adapter
(char_type* begin, char_type* end)
: begin_(begin), end_(end)
{ }
template<typename Mode, typename Ch>
array_adapter<Mode, Ch>::array_adapter
(char_type* begin, std::size_t length)
: begin_(begin), end_(begin + length)
{ }
template<typename Mode, typename Ch>
array_adapter<Mode, Ch>::array_adapter
(const char_type* begin, const char_type* end)
: begin_(const_cast<char_type*>(begin)), // Treated as read-only.
end_(const_cast<char_type*>(end)) // Treated as read-only.
{ BOOST_STATIC_ASSERT((!is_convertible<Mode, output>::value)); }
template<typename Mode, typename Ch>
array_adapter<Mode, Ch>::array_adapter
(const char_type* begin, std::size_t length)
: begin_(const_cast<char_type*>(begin)), // Treated as read-only.
end_(const_cast<char_type*>(begin) + length) // Treated as read-only.
{ BOOST_STATIC_ASSERT((!is_convertible<Mode, output>::value)); }
template<typename Mode, typename Ch>
typename array_adapter<Mode, Ch>::pair_type
array_adapter<Mode, Ch>::input_sequence()
{ BOOST_STATIC_ASSERT((is_convertible<Mode, input>::value));
return pair_type(begin_, end_); }
template<typename Mode, typename Ch>
typename array_adapter<Mode, Ch>::pair_type
array_adapter<Mode, Ch>::output_sequence()
{ BOOST_STATIC_ASSERT((is_convertible<Mode, output>::value));
return pair_type(begin_, end_); }
} // End namespace detail.
//----------------------------------------------------------------------------//
} } // End namespaces iostreams, boost.
#endif // #ifndef BOOST_IOSTREAMS_ARRAY_HPP_INCLUDED
|
{"hexsha": "ad4ae05d7cddbf471e20e839d8f3119185f77ae0", "size": 4902, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "ios/Pods/boost-for-react-native/boost/iostreams/device/array.hpp", "max_stars_repo_name": "rudylee/expo", "max_stars_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_stars_repo_licenses": ["Apache-2.0", "MIT"], "max_stars_count": 8805.0, "max_stars_repo_stars_event_min_datetime": "2015-11-03T00:52:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T22:30:03.000Z", "max_issues_repo_path": "ios/Pods/boost-for-react-native/boost/iostreams/device/array.hpp", "max_issues_repo_name": "rudylee/expo", "max_issues_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_issues_repo_licenses": ["Apache-2.0", "MIT"], "max_issues_count": 14694.0, "max_issues_repo_issues_event_min_datetime": "2015-02-24T15:13:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T13:16:45.000Z", "max_forks_repo_path": "ios/Pods/boost-for-react-native/boost/iostreams/device/array.hpp", "max_forks_repo_name": "rudylee/expo", "max_forks_repo_head_hexsha": "b3e65a7a5b205f14a3eb6cd6fa8d13c8d663b1cc", "max_forks_repo_licenses": ["Apache-2.0", "MIT"], "max_forks_count": 1329.0, "max_forks_repo_forks_event_min_datetime": "2015-11-03T20:25:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T18:10:38.000Z", "avg_line_length": 36.5820895522, "max_line_length": 83, "alphanum_fraction": 0.6517747858, "num_tokens": 1151}
|
subroutine foo(d)
real(kind=8) :: d
print *, d
end subroutine foo
program test
call foo(0.01d0)
end program
|
{"hexsha": "92bc813c9a7f9847c965be852777a8d34862479e", "size": 115, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/mlir_out_tests/const_arg.f90", "max_stars_repo_name": "clementval/fc", "max_stars_repo_head_hexsha": "a5b444963c1b46e4eb34d938d992836d718010f7", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/mlir_out_tests/const_arg.f90", "max_issues_repo_name": "clementval/fc", "max_issues_repo_head_hexsha": "a5b444963c1b46e4eb34d938d992836d718010f7", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/mlir_out_tests/const_arg.f90", "max_forks_repo_name": "clementval/fc", "max_forks_repo_head_hexsha": "a5b444963c1b46e4eb34d938d992836d718010f7", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 12.7777777778, "max_line_length": 19, "alphanum_fraction": 0.6869565217, "num_tokens": 39}
|
# https://www.kaggle.com/maniyar2jaimin/interactive-plotly-guide-to-pca-lda-t-sne
# PCA (Principal Component Analysis),
# LDA ( Linear Discriminant Analysis) and
# TSNE ( T-Distributed Stochastic Neighbour Embedding)
import numpy as np
import pandas as pd
df = pd.read_csv('https://covid.ourworldindata.org/data/owid-covid-data.csv')
# latest data
exclude_countries = ['OWID_AFR', 'OWID_ASI', 'OWID_EUN', 'OWID_INT', 'OWID_EUR',
'OWID_KOS', 'OWID_NAM','OWID_CYN', 'OWID_OCE', 'OWID_SAM', 'OWID_WRL']
features=['iso_code', 'continent', 'location', 'population_density',
'median_age', 'gdp_per_capita', 'cardiovasc_death_rate',
'diabetes_prevalence', 'life_expectancy', 'human_development_index'
,'total_cases_per_million']
chosen_features = ['gdp_per_capita', 'cardiovasc_death_rate',
'diabetes_prevalence', 'life_expectancy', 'human_development_index']
df2 = df[df.date==df.date.max()][~df.iso_code.isin(exclude_countries)][features].dropna().reset_index(drop=True)
'''
Annotation funciton
'''
def header_and_footer(header, offset_top, offset_bottom):
# Title
fig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=offset_top,xanchor='center',yanchor='top',
font=dict(family='Arial',size=20,color='grey'),showarrow=False,
text=header))
fig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=(offset_top-0.07),xanchor='center',yanchor='top',
font=dict(family='Arial',size=16,color='grey'),showarrow=False,
text="Each point represents a country"))
# Footer
fig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=-(offset_bottom),xanchor='center',yanchor='top',
font=dict(family='Arial', size=12, color='grey'),showarrow=False,
text='#30DayChartChallenge - multivariate - 2021/04/15 | Data: OWID | twitter.com/vivekparasharr'))
fig.add_annotation(dict(xref='paper',yref='paper',x=0.5,y=-(offset_bottom+0.05),xanchor='center',yanchor='top',
font=dict(family='Arial', size=11, color='grey'),showarrow=False,
text='*note: gdp per capita, cardiovasc death rate, diabetes prevalence, life expectancy, human development index'))
# PRINCIPAL COMPONENT ANALYSIS
# Using SKLEARN
# feature scaling
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
x_scaled=sc.fit_transform(df2[chosen_features])
# pca
from sklearn.decomposition import PCA
pca = PCA(n_components=None)
x_pca = pca.fit_transform(x_scaled)
# variance explained by principal components
pca.explained_variance_ratio_
# Plotting explained variance
exp_var_cumul=pca.explained_variance_ratio_.cumsum()
import plotly.express as px
px.area(
x=range(1, exp_var_cumul.shape[0] + 1),
y=exp_var_cumul,
labels={"x": "# Components", "y": "Explained Variance"}
)
# VISUALIZATION
# Using PLOTLY
# https://plotly.com/python/pca-visualization/
# Correlation plot of original features
import plotly.express as px
fig = px.scatter_matrix(df2, dimensions=chosen_features, color="continent",
labels={"gdp_per_capita": "gdp*",
"cardiovasc_death_rate": "heart*",
"diabetes_prevalence": "diabetes*",
"life_expectancy": "life*",
"human_development_index": "hdi*"})
fig.update_traces(diagonal_visible=False, showupperhalf=False,)
fig.update_layout(template="plotly_dark")
header_and_footer("Scatter Matrix showing correlation between multiple variables", 1.2, 0.15)
fig.show() #renderer="browser")
# Visualize all principal components
import plotly.express as px
labels = {
str(i): f"PC {i+1} ({var:.1f}%)"
for i, var in enumerate(pca.explained_variance_ratio_ * 100)
}
fig = px.scatter_matrix(x_pca,labels=labels,dimensions=range(4),
color=df2["continent"])
fig.update_traces(diagonal_visible=False)
fig.update_layout(template="plotly_dark")
fig.show()#(renderer="browser")
# 2D PCA Scatter Plot
import plotly.express as px
fig = px.scatter(x_pca, x=0, y=1, color=df2['continent'],
labels={'0': 'PC 1 (59.2%)', '1': 'PC 2 (21.4%)',})
fig.update_layout(template="plotly_dark")
header_and_footer("Principal Component Analysis: 80.6pct of variance explained", 1.2, 0.15)
fig.show()
# Visualize Loadings
# indicate which feature a certain loading original belong to
# loadings = eigenvectors * sqrt(eigenvalues)
import plotly.express as px
fig = px.scatter(x_pca, x=0, y=1, color=df2['continent'])
# additional code for loading
loadings = pca.components_.T * np.sqrt(pca.explained_variance_)
for i, feature in enumerate(chosen_features):
fig.add_shape(
type='line',
x0=0, y0=0,
x1=loadings[i, 0],
y1=loadings[i, 1]
)
fig.add_annotation(
x=loadings[i, 0],
y=loadings[i, 1],
ax=0, ay=0,
xanchor="center",
yanchor="bottom",
text=feature,
)
fig.show(renderer='browser')
# LDA - LINEAR DISCRIMINANT ANALYSIS
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA(n_components=None)
# Taking in as second argument the Target as labels
x_lda = lda.fit_transform(df2[chosen_features], df2.continent.values)
# variance explained by linear discriminants
lda.explained_variance_ratio_
# Plotting explained variance
exp_var_cumul=lda.explained_variance_ratio_.cumsum()
import plotly.express as px
px.area(
x=range(1, exp_var_cumul.shape[0] + 1),
y=exp_var_cumul,
labels={"x": "# Discriminants", "y": "Explained Variance"}
)
# Visualize all linear discriminants
import plotly.express as px
labels = {
str(i): f"LD {i+1} ({var:.1f}%)"
for i, var in enumerate(lda.explained_variance_ratio_ * 100)
}
fig = px.scatter_matrix(x_lda,labels=labels,dimensions=range(4),
color=df2["continent"])
fig.update_traces(diagonal_visible=False)
fig.update_layout(template="plotly_dark")
fig.show(renderer="browser")
# 2D LDA Scatter Plot
import plotly.express as px
fig = px.scatter(x_lda, x=0, y=1, color=df2['continent'],
labels={'0': 'LD 1 (74.1%)', '1': 'LD 2 (16.0%)'})
fig.update_layout(template="plotly_dark")
header_and_footer("Linear Discriminant Analysis: 90.1pct of variance explained", 1.2, 0.15)
fig.show()
# TSNE (T-Distributed Stochastic Neighbour Embedding
from sklearn.manifold import TSNE
tsne = TSNE()
# Taking in as second argument the Target as labels
x_tsne = tsne.fit_transform(df2[chosen_features])
# 2D Scatter Plot
import plotly.express as px
fig = px.scatter(x_tsne, x=0, y=1, color=df2['continent'])
fig.update_layout(template="plotly_dark")
header_and_footer("TSNE (T-Distributed Stochastic Neighbour Embedding", 1.2, 0.15)
fig.show()
|
{"hexsha": "ccb28f561b826bfeedc96a195b2de1cee7593e71", "size": 6523, "ext": "py", "lang": "Python", "max_stars_repo_path": "30DayChartChallenge/20210415-multivariate.py", "max_stars_repo_name": "vivekparasharr/Challenges-and-Competitions", "max_stars_repo_head_hexsha": "c99d67838a0bb14762d5f4be4993dbcce6fe0c5a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-01-11T20:12:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-15T04:53:45.000Z", "max_issues_repo_path": "30DayChartChallenge/20210415-multivariate.py", "max_issues_repo_name": "vivekparasharr/Challenges-and-Competitions", "max_issues_repo_head_hexsha": "c99d67838a0bb14762d5f4be4993dbcce6fe0c5a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "30DayChartChallenge/20210415-multivariate.py", "max_forks_repo_name": "vivekparasharr/Challenges-and-Competitions", "max_forks_repo_head_hexsha": "c99d67838a0bb14762d5f4be4993dbcce6fe0c5a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-30T19:15:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-30T19:15:46.000Z", "avg_line_length": 33.6237113402, "max_line_length": 120, "alphanum_fraction": 0.7269661199, "include": true, "reason": "import numpy", "num_tokens": 1895}
|
import numpy as np
import torch
from torch import nn
from torch import optim
import matplotlib.pyplot as plt
from torchvision import datasets, transforms, models
import torch.nn.functional as F
from collections import OrderedDict
import json
import argparse
import os
from image_processing import transform_image
from model import create_model
def get_args():
"""
Get arguments from command line
"""
parser = argparse.ArgumentParser()
parser.add_argument("data_directory", action="store", type=str, default="/home/workspace/aipnd-project/flowers", help="data directory containing training and testing data")
parser.add_argument("model_arch", type=str, default='Densenet121', help="Choose a model: VGG19 or Densenet121")
parser.add_argument("Learning_rate", type=float, default = 0.001)
parser.add_argument("hidden_units", type = int, default = 500)
parser.add_argument("epochs", type=int, default=3)
parser.add_argument("device", type=str, default = "GPU", help="Use CPU or GPU")
args = parser.parse_args()
return args
def main():
input = get_args()
data_dir = input.data_directory
trainloader, testloader, validloader, traindata = transform_image(data_dir)
if input.device == "GPU":
device = "cuda"
else:
device = "cpu"
model, criterion, optimizer, classifier = create_model(input.model_arch, device, input.Learning_rate, input.hidden_units)
train_model = do_deep_learning(model, trainloader, validloader, input.epochs, 40, criterion, optimizer, device)
validate(train_model, testloader, device, criterion)
save_checkpoint(train_model, classifier, optimizer, traindata)
print('Completed!')
def do_deep_learning(model, trainloader, validloader, epochs, print_every, criterion, optimizer, device='cpu'):
epochs = epochs
print_every = print_every
steps = 0
train_losses, valid_losses = [], []
# change to cuda
model.to(device)
for e in range(epochs):
running_loss = 0
for images, labels in iter(trainloader):
steps += 1
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
valid_loss = 0
correct = 0
with torch.no_grad():
for inputs, labels in validloader:
inputs, labels = inputs.to(device), labels.to(device)
outputs = model.forward(inputs)
batch_loss = criterion(outputs, labels)
valid_loss += batch_loss.item()
prediction = torch.exp(outputs)
top_p, top_class = prediction.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
correct += torch.mean(equals.type(torch.FloatTensor)).item()
train_losses.append(running_loss/len(trainloader))
valid_losses.append(valid_loss/len(validloader))
print("Epoch: {}/{}... ".format(e+1, epochs),
"Train loss: {:.4f}... ".format(running_loss/print_every),
"Validation loss: {:4f}...".format(valid_loss/len(validloader)),
"Validation accuracy: {:.4f}".format(correct/len(validloader)) )
running_loss = 0
model.train()
return model
def validate(model, testloader, device, criterion):
model.eval()
correct = 0
test_loss = 0
model.to(device)
with torch.no_grad():
for inputs, labels in testloader:
inputs, labels = inputs.to(device), labels.to(device)
outputs = model.forward(inputs)
batch_loss = criterion(outputs, labels)
test_loss += batch_loss.item()
prediction = torch.exp(outputs)
top_p, top_class = prediction.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
correct += torch.mean(equals.type(torch.FloatTensor)).item()
print("Testing Loss: {:.3f}".format(test_loss/len(testloader)),"Testing Accuracy: {:.3f}".format(correct/len(testloader)))
def save_checkpoint(model, classifier, optimizer, train_data):
checkpoint = {'arch': 'densenet121',
'classifier': classifier,
#'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'mapping': train_data.class_to_idx}
torch.save(checkpoint, '/home/workspace/aipnd-project/checkpoint.pth')
print("Saved")
if __name__ == '__main__':
main()
|
{"hexsha": "a5ad751eec191766b5151790886e0d93113e6f97", "size": 5073, "ext": "py", "lang": "Python", "max_stars_repo_path": "train.py", "max_stars_repo_name": "ryahollands99/Udacity-AI-Programming-Final-Project", "max_stars_repo_head_hexsha": "936dcddf72385c15e37a360a713f2c59e7b183de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train.py", "max_issues_repo_name": "ryahollands99/Udacity-AI-Programming-Final-Project", "max_issues_repo_head_hexsha": "936dcddf72385c15e37a360a713f2c59e7b183de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train.py", "max_forks_repo_name": "ryahollands99/Udacity-AI-Programming-Final-Project", "max_forks_repo_head_hexsha": "936dcddf72385c15e37a360a713f2c59e7b183de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.9862068966, "max_line_length": 176, "alphanum_fraction": 0.6035876207, "include": true, "reason": "import numpy", "num_tokens": 1041}
|
"""This module tests the PauliGate class."""
from __future__ import annotations
import numpy as np
import pytest
from hypothesis import given
from hypothesis.strategies import floats
from hypothesis.strategies import integers
from bqskit.ir.gates import IdentityGate
from bqskit.ir.gates import PauliGate
from bqskit.ir.gates import RXGate
from bqskit.ir.gates import RXXGate
from bqskit.ir.gates import RYGate
from bqskit.ir.gates import RYYGate
from bqskit.ir.gates import RZGate
from bqskit.ir.gates import RZZGate
from bqskit.qis.unitary.unitarymatrix import UnitaryMatrix
from bqskit.utils.test.strategies import num_qudits
from bqskit.utils.test.strategies import unitaries
class TestInit:
@given(num_qudits(4))
def test_valid(self, num_qudits: int) -> None:
g = PauliGate(num_qudits)
assert g.num_qudits == num_qudits
assert g.num_params == 4 ** num_qudits
identity = np.identity(2 ** num_qudits)
assert g.get_unitary([0] * 4 ** num_qudits) == identity
@given(integers(max_value=0))
def test_invalid(self, num_qudits: int) -> None:
with pytest.raises(ValueError):
PauliGate(num_qudits)
class TestGetUnitary:
@given(floats(allow_nan=False, allow_infinity=False, width=16))
def test_i(self, angle: float) -> None:
g = PauliGate(1)
i = IdentityGate(1).get_unitary()
dist = g.get_unitary([angle, 0, 0, 0]).get_distance_from(i)
assert dist < 1e-7
@given(floats(allow_nan=False, allow_infinity=False, width=16))
def test_x(self, angle: float) -> None:
g = PauliGate(1)
x = RXGate()
assert g.get_unitary([0, angle, 0, 0]) == x.get_unitary([angle])
@given(floats(allow_nan=False, allow_infinity=False, width=16))
def test_y(self, angle: float) -> None:
g = PauliGate(1)
y = RYGate()
assert g.get_unitary([0, 0, angle, 0]) == y.get_unitary([angle])
@given(floats(allow_nan=False, allow_infinity=False, width=16))
def test_z(self, angle: float) -> None:
g = PauliGate(1)
z = RZGate()
assert g.get_unitary([0, 0, 0, angle]) == z.get_unitary([angle])
@given(floats(allow_nan=False, allow_infinity=False, width=16))
def test_xx(self, angle: float) -> None:
g = PauliGate(2)
xx = RXXGate()
params = [0.0] * 16
params[5] = angle
assert g.get_unitary(params) == xx.get_unitary([angle])
@given(floats(allow_nan=False, allow_infinity=False, width=16))
def test_yy(self, angle: float) -> None:
g = PauliGate(2)
yy = RYYGate()
params = [0.0] * 16
params[10] = angle
assert g.get_unitary(params) == yy.get_unitary([angle])
@given(floats(allow_nan=False, allow_infinity=False, width=16))
def test_zz(self, angle: float) -> None:
g = PauliGate(2)
zz = RZZGate()
params = [0.0] * 16
params[15] = angle
assert g.get_unitary(params) == zz.get_unitary([angle])
@given(unitaries(1, (2,)))
def test_optimize(utry: UnitaryMatrix) -> None:
g = PauliGate(1)
params = g.optimize(np.array(utry))
assert g.get_unitary(params).get_distance_from(utry.conj().T) < 1e-7
|
{"hexsha": "3c98b4d92c6fde1af6165b51067ec1f98324512a", "size": 3221, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/ir/gates/parameterized/test_pauli.py", "max_stars_repo_name": "jkalloor3/bqskit", "max_stars_repo_head_hexsha": "ad34a6eae3c0e62d2bd960cd4cd841ba8e845811", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-05-26T21:32:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T17:48:10.000Z", "max_issues_repo_path": "tests/ir/gates/parameterized/test_pauli.py", "max_issues_repo_name": "jkalloor3/bqskit", "max_issues_repo_head_hexsha": "ad34a6eae3c0e62d2bd960cd4cd841ba8e845811", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2021-05-26T20:17:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-27T20:04:10.000Z", "max_forks_repo_path": "tests/ir/gates/parameterized/test_pauli.py", "max_forks_repo_name": "jkalloor3/bqskit", "max_forks_repo_head_hexsha": "ad34a6eae3c0e62d2bd960cd4cd841ba8e845811", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-10-05T16:00:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-08T01:30:06.000Z", "avg_line_length": 34.2659574468, "max_line_length": 72, "alphanum_fraction": 0.6588016144, "include": true, "reason": "import numpy", "num_tokens": 960}
|
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.autograd import Variable
from torch.optim import lr_scheduler
import numpy as np
import h5py
from .EDSR_models.rcan import RCAN
from .RegreClass import *
from .weights_init import *
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm)
return norm_layer
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.niter) / float(opt.niter_decay)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def define_G(opt, input_nc, output_nc, ngf, which_model_netG, norm='batch', use_dropout=False, gpu_ids=[]):
netG = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if opt.magnitude:
input_nc = int(opt.input_nc/2)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netG == 'simple_conv':
netG = SimpleCNN(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'simple_conv_large':
netG = SimpleCNN_large(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'simple_conv_larger':
netG = SimpleCNN_larger(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'simple_conv_small':
netG = SimpleCNN_small(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'simple_conv_small_PCA':
netG = SimpleCNN_small_PCA(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'simple_conv_small_at':
netG = SimpleCNN_small_at(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'nn':
netG = SimpleNN(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'nn_large':
netG = SimpleNN_large(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Unet':
netG = Unet(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Unet_PCA':
netG = Unet_PCA(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Unet_PCA_small':
netG = Unet_PCA_small(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Unet_PCA_convconn':
netG = Unet_PCA_convconn(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Unet_PCA_multiloss':
netG = Unet_PCA_multiloss(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Unet_small':
netG = Unet_small(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Unet_PCA_deconv':
netG = Unet_PCA_deconv(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Unet_T1T2':
netG = Unet_T1T2(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Unet_MultiTask':
netG = Unet_MultiTask(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Lian':
netG = Lian(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'UniNet':
netG = UniNet(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Hoppe':
netG = hoppe_struc(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Cohen':
netG = Cohen_struc(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Unet_T1T2_3ds':
netG = Unet_T1T2_3ds(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'UniNet_init':
netG = UniNet_init(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'RCAN':
netG = RCAN(opt, opt.input_nc)
elif which_model_netG == 'UniNet_residue':
netG = UniNet_residue(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'UniNet_residue_multiOut':
netG = UniNet_residue_multiOut(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'UniNet_RegreClass':
netG = UniNet_RegreClass(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'RegreClass':
netG = RegreClass(input_nc)
elif which_model_netG == 'Unet_3ds_subpixel':
netG = Unet_3ds_subpixel(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'hoppe_ISMRM2018':
netG = hoppe_ISMRM2018(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Cohen_struc':
netG = Cohen_struc(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'FNN':
netG = FNN(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
elif which_model_netG == 'Unet_double':
netG = Unet_double(opt, input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % which_model_netG)
if len(gpu_ids) > 0:
# netG.cuda(device=gpu_ids[0])
print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Multiple GPUs!!!!')
netG = nn.DataParallel(netG, device_ids=gpu_ids).cuda()
# if which_model_netG != 'UniNet':
# netG.apply(weights_init)
if opt.PCA:
weights_init_PCA(netG, opt)
# if which_model_netG == 'UniNet':
# weights_init_Pre(netG, opt)
return netG
def define_D(input_nc, ndf=64, which_model_netD='n_layers',
n_layers_D=3, norm='batch', use_sigmoid=False, gpu_ids=[]):
netD = None
use_gpu = len(gpu_ids) > 0
norm_layer = get_norm_layer(norm_type=norm)
if use_gpu:
assert(torch.cuda.is_available())
if which_model_netD == 'basic':
netD = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
elif which_model_netD == 'n_layers':
netD = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer, use_sigmoid=use_sigmoid, gpu_ids=gpu_ids)
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' %
which_model_netD)
if use_gpu:
netD.cuda(device_id=gpu_ids[0])
# netD.apply(weights_init)
return netD
def print_network(net):
num_params = 0
for param in net.parameters():
num_params += param.numel()
print(net)
print('Total number of parameters: %d' % num_params)
|
{"hexsha": "269859d73eeb9450502983887a3dd5d4988d4aa2", "size": 8630, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/networks.py", "max_stars_repo_name": "YilinLiu97/MR_Fingerprinting", "max_stars_repo_head_hexsha": "dbcfc85352c58f7a9027f2f4e02674ff85e59681", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-11-18T15:12:47.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-30T16:41:20.000Z", "max_issues_repo_path": "models/networks.py", "max_issues_repo_name": "YilinLiu97/MR_Fingerprinting", "max_issues_repo_head_hexsha": "dbcfc85352c58f7a9027f2f4e02674ff85e59681", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/networks.py", "max_forks_repo_name": "YilinLiu97/MR_Fingerprinting", "max_forks_repo_head_hexsha": "dbcfc85352c58f7a9027f2f4e02674ff85e59681", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.6202531646, "max_line_length": 134, "alphanum_fraction": 0.7210892236, "include": true, "reason": "import numpy", "num_tokens": 2421}
|
#pragma once
#include <vector>
#include <Eigen/Dense>
#include "Utils/IO/IOUtilities.hpp"
class TVRParameter {
public:
double swing_time;
Eigen::Vector2d des_loc;
Eigen::Vector3d stance_foot_loc;
bool b_positive_sidestep;
double yaw_angle;
};
class TVROutput {
public:
double time_modification;
double switching_state[4];
};
class TVRPlanner {
public:
TVRPlanner();
virtual ~TVRPlanner();
void PlannerInitialization(const YAML::Node& node);
void getNextFootLocation(const Eigen::Vector3d& com_pos,
const Eigen::Vector3d& com_vel,
Eigen::Vector3d& target_loc,
const void* additional_input = NULL,
void* additional_output = NULL);
// Set Functions
void setOmega(double com_height) {
b_set_omega_ = true;
omega_ = sqrt(9.81 / com_height);
}
void CheckEigenValues(double swing_time);
protected:
// current com state: (x, y, xdot, ydot) : 4
// switching com state: (x, y, xdot, ydot) : 4
// target foot: (x, y) : 2
// swing time: t : 1
Eigen::VectorXd planner_save_data_;
std::vector<double> t_prime_;
std::vector<double> kappa_;
std::vector<double> x_step_length_limit_;
std::vector<double> y_step_length_limit_;
std::vector<double> com_vel_limit_;
Eigen::MatrixXd R_w_t_;
double omega_;
bool b_set_omega_;
void _computeSwitchingState(double swing_time,
const Eigen::Vector3d& com_pos,
const Eigen::Vector3d& com_vel,
const Eigen::Vector3d& stance_foot_loc,
std::vector<Eigen::Vector2d>& switching_state);
void _StepLengthCheck(Eigen::Vector3d& target_loc,
const std::vector<Eigen::Vector2d>& switching_state);
void _StepLengthCheck(Eigen::Vector3d& target_loc, bool b_positive_sidestep,
const Eigen::Vector3d& stance_foot);
void _StepLengthCheckConsideringRotation(
Eigen::Vector3d& target_loc, bool b_positive_sidestep,
const Eigen::Vector3d& stance_foot);
void _UpdateRotation(double yaw_angle);
int _check_switch_velocity(
const std::vector<Eigen::Vector2d>& switch_state);
int _check_switch_velocity_considering_rotation(
const std::vector<Eigen::Vector2d>& switch_state);
};
|
{"hexsha": "c0d79be7ce5701a46ea814ad48f8ab15656793c3", "size": 2485, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "PnC/PlannerSet/LIPMPlanner/TVRPlanner.hpp", "max_stars_repo_name": "shbang91/PnC", "max_stars_repo_head_hexsha": "880cbbcf96a48a93a0ab646634781e4f112a71f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "PnC/PlannerSet/LIPMPlanner/TVRPlanner.hpp", "max_issues_repo_name": "shbang91/PnC", "max_issues_repo_head_hexsha": "880cbbcf96a48a93a0ab646634781e4f112a71f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PnC/PlannerSet/LIPMPlanner/TVRPlanner.hpp", "max_forks_repo_name": "shbang91/PnC", "max_forks_repo_head_hexsha": "880cbbcf96a48a93a0ab646634781e4f112a71f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.858974359, "max_line_length": 80, "alphanum_fraction": 0.6277665996, "num_tokens": 590}
|
import os
import random
from glob import glob
import numpy as np
import cv2
import matplotlib.pyplot as plt
from Udacity_self_driving_car_challenge_4.image_processing.calibration import camera_cal, found_chessboard, read_camera_cal_file
from Udacity_self_driving_car_challenge_4.image_processing.edge_detection import combing_color_thresh
from Udacity_self_driving_car_challenge_4.image_processing.find_lines import histogram_search, histogram_search2
from Udacity_self_driving_car_challenge_4.image_processing.line_fit_fix import Line
ROOT_PATH = os.getcwd()
IMAGE_TEST_DIR = os.path.join(ROOT_PATH, 'test_images')
IMAGE_OUTPUT_DIR = os.path.join(ROOT_PATH, 'output_images')
VIDEO_OUTPUT_DIR = os.path.join(ROOT_PATH, 'output_video')
IMAGE_PROCESSING_PATH = os.path.join(ROOT_PATH, 'image_processing')
WIDE_DIST_FILE = os.path.join(IMAGE_PROCESSING_PATH, 'wide_dist_pickle.p')
IMAGES_PATH = glob(IMAGE_TEST_DIR + '/*.jpg')
# Load cameraMatrix and distCoeffs parameter
if not os.path.exists(WIDE_DIST_FILE):
objpoints, imgpoints = found_chessboard()
mtx, dist = camera_cal(objpoints, imgpoints)
else:
print('Get parameter from pickle file')
mtx, dist = read_camera_cal_file(WIDE_DIST_FILE)
# Get Perspective Transform Parameter
offset = 1280 / 2
src = np.float32([(596, 447), (683, 447), (1120, 720), (193, 720)]) # Longer line
# src = np.float32([(578, 460), (704, 460), (1120, 720), (193, 720)]) # shorter line
dst = np.float32([(offset-300, 0), (offset+300, 0), (offset+300, 720), (offset-300, 720)])
perspective_M = cv2.getPerspectiveTransform(src, dst)
inver_perspective_M = cv2.getPerspectiveTransform(dst, src)
left_line = Line()
right_line = Line()
count_h1 = 0
count_h2 = 0
def process_image(image, show_birdview=False):
global count_h1, count_h2
# Apply a distortion correction to raw images.
image = cv2.undistort(image, mtx, dist, None, None)
# Use color transforms, gradients to find the object edge and change into binary image
image_binary = combing_color_thresh(image)
# Transform image to bird view
image_bird_view = cv2.warpPerspective(image_binary, perspective_M, image.shape[1::-1], flags=cv2.INTER_LINEAR)
# find the road lines, curvature and distance between car_center and road_center
color_warp, curv, center, left_or_right, left_line.new_fit, right_line.new_fit, left_line.allx, right_line.allx = histogram_search(image_bird_view)
# Warp the blank back to original image space using inverse perspective matrix (Minv)
warp_back = cv2.warpPerspective(color_warp, inver_perspective_M, (image.shape[1], image.shape[0]))
# Combine the result with the original image
img_out = cv2.addWeighted(image, 1, warp_back, 0.3, 0)
# Add description on images
text1 = "Radius of Curature = {:.2f}(m)".format(curv)
text2 = "Vehicle is {:.3f}m {} of center".format(abs(center), left_or_right)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img_out, text1, (50, 50), font, 1.5, color=(255, 255, 255), thickness=3)
cv2.putText(img_out, text2, (50, 100), font, 1.5, color=(255, 255, 255), thickness=3)
if show_birdview:
show_image_bird_view = cv2.resize(image_bird_view, (360, 360))
show_image_bird_view = cv2.cvtColor(show_image_bird_view, cv2.COLOR_GRAY2RGB)
show_color_warp = cv2.resize(color_warp, (360, 360))
show_color_warp = cv2.addWeighted(show_image_bird_view, 1, show_color_warp, 0.5, 0)
return img_out, show_image_bird_view, show_color_warp
return img_out
def process_video(image, show_birdview=False):
global count_h1, count_h2
# Apply a distortion correction to raw images.
image = cv2.undistort(image, mtx, dist, None, None)
# Use color transforms, gradients to find the object edge and change into binary image
image_binary = combing_color_thresh(image)
# Transform image to bird view
image_bird_view = cv2.warpPerspective(image_binary, perspective_M, image.shape[1::-1], flags=cv2.INTER_LINEAR)
# find the road lines, curvature and distance between car_center and road_center
if not left_line.detected or not right_line.detected:
color_warp, curv, center, left_or_right, left_line.new_fit, right_line.new_fit, \
left_line.allx, right_line.allx = histogram_search(image_bird_view)
count_h1 += 1
else:
color_warp, curv, center, left_or_right, left_line.new_fit, right_line.new_fit, \
left_line.allx, right_line.allx = histogram_search2(image_bird_view, left_line.best_fit, right_line.best_fit)
count_h2 += 1
# Check the lines health
left_line.fit_fix()
right_line.fit_fix()
# Warp the blank back to original image space using inverse perspective matrix (Minv)
warp_back = cv2.warpPerspective(color_warp, inver_perspective_M, (image.shape[1], image.shape[0]))
# Combine the result with the original image
img_out = cv2.addWeighted(image, 1, warp_back, 0.3, 0)
# Add description on images
text1 = "Radius of Curature = {:.2f}(m)".format(curv)
text2 = "Vehicle is {:.3f}m {} of center".format(abs(center), left_or_right)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img_out, text1, (50, 50), font, 1.5, color=(255, 255, 255), thickness=3)
cv2.putText(img_out, text2, (50, 100), font, 1.5, color=(255, 255, 255), thickness=3)
if show_birdview:
show_image_bird_view = cv2.resize(image_bird_view, (360, 360))
show_image_bird_view = cv2.cvtColor(show_image_bird_view, cv2.COLOR_GRAY2RGB)
show_color_warp = cv2.resize(color_warp, (360, 360))
show_color_warp = cv2.addWeighted(show_image_bird_view, 1, show_color_warp, 0.5, 0)
return img_out, show_image_bird_view, show_color_warp
return img_out
def test_image():
# random chose image to test
random_chose = random.randint(0, len(IMAGES_PATH)-1)
img_test = IMAGES_PATH[random_chose]
print(img_test)
img = cv2.imread('./test_images/test3.jpg')
img_out = process_image(img)
# Converter BGR -> RGB for plt show
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_out = cv2.cvtColor(img_out, cv2.COLOR_BGR2RGB)
plt.figure(figsize=(10, 8))
plt.subplot(2, 1, 1)
plt.title('Original Image')
plt.imshow(img)
plt.subplot(2, 1, 2)
plt.title('Output Image')
plt.imshow(img_out, cmap='gray')
plt.show()
def test_images():
for path in IMAGES_PATH:
img = cv2.imread(path)
img_out, show_image_bird_view, show_color_warp = process_image(img, show_birdview=True)
add_image = np.vstack((show_image_bird_view, show_color_warp))
img_out = np.hstack((img_out, add_image))
img_out_path = os.path.join(IMAGE_OUTPUT_DIR, os.path.split(path)[-1].split('.')[0] + '.png')
cv2.imwrite(img_out_path, img_out)
def test_video():
video_file = 'project_video.mp4'
video_output_file = os.path.join(VIDEO_OUTPUT_DIR, video_file.split('.')[0] + '.avi')
# Video save
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(video_output_file, fourcc, 20, (1640, 720))
# Video read
cap = cv2.VideoCapture(video_file)
while cap.isOpened():
ret, frame = cap.read()
if ret:
img_out, show_image_bird_view, show_color_warp = process_video(frame, show_birdview=True)
add_image = np.vstack((show_image_bird_view, show_color_warp))
img_out = np.hstack((img_out, add_image))
out.write(img_out)
cv2.imshow('frame', img_out)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
print("h1: {}\th2: {}".format(count_h1, count_h2))
cap.release()
out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
test_video()
|
{"hexsha": "bb60ef9946f9dd26727e16e9ffd39d96849119d1", "size": 7805, "ext": "py", "lang": "Python", "max_stars_repo_path": "Advanced_Lane_Lines.py", "max_stars_repo_name": "KUASWoodyLIN/Udacity_self_driving_car_challenge_4", "max_stars_repo_head_hexsha": "36ca5ed50f74c49645b43ffcab1b27055540d8e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Advanced_Lane_Lines.py", "max_issues_repo_name": "KUASWoodyLIN/Udacity_self_driving_car_challenge_4", "max_issues_repo_head_hexsha": "36ca5ed50f74c49645b43ffcab1b27055540d8e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Advanced_Lane_Lines.py", "max_forks_repo_name": "KUASWoodyLIN/Udacity_self_driving_car_challenge_4", "max_forks_repo_head_hexsha": "36ca5ed50f74c49645b43ffcab1b27055540d8e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.2319587629, "max_line_length": 151, "alphanum_fraction": 0.7118513773, "include": true, "reason": "import numpy", "num_tokens": 2147}
|
#
# File: gofALAAM.py
# Author: Alex Stivala
# Created: May 2020
#
"""ALAAM goodness-of-fit by simulating from estimated parameters, and
comparing observed statistics to statistics of simulated outcome vectors,
including statistics not included in the estimated model.
The ALAAM is described in:
G. Daraganova and G. Robins. Autologistic actor attribute models. In
D. Lusher, J. Koskinen, and G. Robins, editors, Exponential Random
Graph Models for Social Networks, chapter 9, pages 102-114. Cambridge
University Press, New York, 2013.
G. Robins, P. Pattison, and P. Elliott. Network models for social
influence processes. Psychometrika, 66(2):161-189, 2001.
"""
import math
import numpy as np # used for matrix & vector data types and functions
from Graph import Graph,NA_VALUE,int_or_na
from changeStatisticsALAAM import *
from simulateALAAM import simulateALAAM
from computeObservedStatistics import computeObservedStatistics
from basicALAAMsampler import basicALAAMsampler
def gof(G, Aobs, changestats_func_list, theta, numSamples = 1000,
sampler_func = basicALAAMsampler, Ainitial = None):
"""
ALAAM goodness-of-fit by simulating from estimated parameters, and
comparing observed statistics to statistics of simulated outcome vectors,
including statistics not included in the estimated model.
Parameters:
G - Graph object of observed network
Aobs - vector of 0/1 observed outcome variables for ALAAM
changestats_func_list-list of change statistics functions
theta - corresponding vector of estimated theta values
(0 for those not included in estiamted model)
numSamples - number of simulations, default 1000
sampler_func - ALAAM sampler function with signature
(G, A, changestats_func_list, theta, performMove,
sampler_m); see basicALAAMsampler.py
default basicALAAMsampler
Ainitial - vector of 0/1 outcome variables to initialize
the outcome vector to before simulation process,
rather than starting from all 0 or random.
Default None, for random initialization.
Return value:
vector of t-ratios
"""
n = len(changestats_func_list)
assert len(theta) == n
iterationInStep = 1000 # number of MCMC steps between each sample
burnIn = 10000 # number of iterations to discard at start
print('Gof numSamples =', numSamples, 'iterationInStep =', iterationInStep, 'burnIn = ', burnIn)
# Calculate observed statistics by summing change stats for each 1 variable
Zobs = computeObservedStatistics(G, Aobs, changestats_func_list)
# Compute simulated outcome vector statistics from MCMC
sim_results = simulateALAAM(G, changestats_func_list, theta,
numSamples, iterationInStep, burnIn,
sampler_func, Ainitial)
#simulateALAAM() return list of tuples (simvec,stats,acceptance_rate,t)
# convert to matrix where each row is sample, each column is statistic
Zmatrix = np.stack([r[1] for r in sim_results])
assert(np.shape(Zmatrix) == (numSamples, n))
Zmean = np.mean(Zmatrix, axis=0)
Zsd = np.std(Zmatrix, axis=0)
print('Zmatrix = ',Zmatrix) #XXX
print('obs stats =', Zobs)
print('mean stats =', Zmean)
print('sd stats =', Zsd)
# compute t-statistics
tratio = (Zmean - Zobs) / Zsd
return tratio
|
{"hexsha": "88bb2587525150e12f720590556461a8265b25f5", "size": 3692, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/gofALAAM.py", "max_stars_repo_name": "stivalaa/ALAAMEE", "max_stars_repo_head_hexsha": "30147b4227488dca07ab48812aa98ab68df79f4f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-04-16T06:57:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T10:30:33.000Z", "max_issues_repo_path": "python/gofALAAM.py", "max_issues_repo_name": "stivalaa/ALAAMEE", "max_issues_repo_head_hexsha": "30147b4227488dca07ab48812aa98ab68df79f4f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/gofALAAM.py", "max_forks_repo_name": "stivalaa/ALAAMEE", "max_forks_repo_head_hexsha": "30147b4227488dca07ab48812aa98ab68df79f4f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.9545454545, "max_line_length": 100, "alphanum_fraction": 0.6622426869, "include": true, "reason": "import numpy", "num_tokens": 865}
|
#!/usr/bin/env python
""" Abstract class representing a HIAS AI OpenVINO Model.
Represents a HIAS AI OpenVINO Model. HIAS AI OpenVINO
Models are used by AI Agents to process incoming data.
MIT License
Copyright (c) 2021 Asociación de Investigacion en Inteligencia Artificial
Para la Leucemia Peter Moss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files(the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Contributors:
- Adam Milton-Barker
"""
import cv2
import os
import time
import numpy as np
from modules.AbstractOpenVINO import AbstractOpenVINO
class model_openvino(AbstractOpenVINO):
""" Class representing a HIAS AI OpenVINO Model.
This object represents a HIAS AI OpenVINO Model.
HIAS AI OpenVINO Models are used by AI Agents
to process incoming data.
"""
def load(self):
""" Loads the model """
mxml = self.helpers.confs["rpi4"]["ir"]
mbin = os.path.splitext(mxml)[0] + ".bin"
self.net = cv2.dnn.readNet(mxml, mbin)
self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
self.helpers.logger.info("OpenVINO loaded.")
def setBlob(self, frame):
""" Gets a blob from the color frame """
blob = cv2.dnn.blobFromImage(
frame, self.helpers.confs["rpi4"]["inScaleFactor"],
size=(self.imsize, self.imsize),
mean=(self.helpers.confs["rpi4"]["meanVal"],
self.helpers.confs["rpi4"]["meanVal"],
self.helpers.confs["rpi4"]["meanVal"]),
swapRB=True, crop=False)
self.net.setInput(blob)
def forwardPass(self):
""" Gets a blob from the color frame """
out = self.net.forward()
return out
def predict(self):
""" Gets a prediction for an image. """
predictions = self.forwardPass()
predictions = predictions[0]
idx = np.argsort(predictions)[::-1][0]
prediction = self.helpers.confs["data"]["labels"][idx]
return prediction
def test(self):
""" Test mode
Loops through the test directory and classifies the images.
"""
files = 0
tp = 0
fp = 0
tn = 0
fn = 0
totaltime = 0
for testFile in os.listdir(self.testing_dir):
if os.path.splitext(testFile)[1] in self.valid:
files += 1
fileName = self.testing_dir + "/" + testFile
img = cv2.imread(fileName)
self.helpers.logger.info(
"Loaded test image " + fileName)
self.setBlob(self.resize(img))
start = time.time()
prediction = self.predict()
end = time.time()
benchmark = end - start
totaltime += benchmark
msg = ""
if prediction == 1 and "_1." in testFile:
tp += 1
msg = "Acute Lymphoblastic Leukemia correctly detected (True Positive) in " \
+ str(benchmark) + " seconds."
elif prediction == 1 and "_0." in testFile:
fp += 1
msg = "Acute Lymphoblastic Leukemia incorrectly detected (False Positive) in " \
+ str(benchmark) + " seconds."
elif prediction == 0 and "_0." in testFile:
tn += 1
msg = "Acute Lymphoblastic Leukemia correctly not detected (True Negative) in " \
+ str(benchmark) + " seconds."
elif prediction == 0 and "_1." in testFile:
fn += 1
msg = "Acute Lymphoblastic Leukemia incorrectly not detected (False Negative) in " \
+ str(benchmark) + " seconds."
self.helpers.logger.info(msg)
self.helpers.logger.info(
"Images Classifier: " + str(files))
self.helpers.logger.info(
"True Positives: " + str(tp))
self.helpers.logger.info(
"False Positives: " + str(fp))
self.helpers.logger.info(
"True Negatives: " + str(tn))
self.helpers.logger.info(
"False Negatives: " + str(fn))
self.helpers.logger.info(
"Total Time Taken: " + str(totaltime))
def test_http(self):
""" HTTP test mode
Loops through the test directory and classifies the images
by sending data to the classifier using HTTP requests.
"""
totaltime = 0
files = 0
tp = 0
fp = 0
tn = 0
fn = 0
self.addr = "http://" + self.helpers.get_ip_addr() + \
':'+str(self.helpers.credentials["server"]["port"]) + '/Inference'
self.headers = {'content-type': 'image/jpeg'}
for testFile in os.listdir(self.testing_dir):
if os.path.splitext(testFile)[1] in self.valid:
start = time.time()
prediction = self.http_request(
self.testing_dir + "/" + testFile)
end = time.time()
benchmark = end - start
totaltime += benchmark
msg = ""
status = ""
outcome = ""
if prediction["Diagnosis"] == "Positive" and "_1." in testFile:
tp += 1
status = "correctly"
outcome = "(True Positive)"
elif prediction["Diagnosis"] == "Positive" and "_0." in testFile:
fp += 1
status = "incorrectly"
outcome = "(False Positive)"
elif prediction["Diagnosis"] == "Negative" and "_0." in testFile:
tn += 1
status = "correctly"
outcome = "(True Negative)"
elif prediction["Diagnosis"] == "Negative" and "_1." in testFile:
fn += 1
status = "incorrectly"
outcome = "(False Negative)"
files += 1
self.helpers.logger.info(
"Acute Lymphoblastic Leukemia " + status +
" detected " + outcome + " in " + str(benchmark) + " seconds.")
self.helpers.logger.info(
"Images Classified: " + str(files))
self.helpers.logger.info(
"True Positives: " + str(tp))
self.helpers.logger.info(
"False Positives: " + str(fp))
self.helpers.logger.info(
"True Negatives: " + str(tn))
self.helpers.logger.info(
"False Negatives: " + str(fn))
self.helpers.logger.info(
"Total Time Taken: " + str(totaltime))
def resize(self, img):
""" Reshapes an image. """
img = cv2.resize(img, (self.helpers.confs["data"]["dim"],
self.helpers.confs["data"]["dim"]))
return img
|
{"hexsha": "59a0f0dc32674ae84999a6265fdc33a88caac7af", "size": 7908, "ext": "py", "lang": "Python", "max_stars_repo_path": "modules/model_openvino.py", "max_stars_repo_name": "AdamMiltonBarker/hias-all-oneapi-classifier", "max_stars_repo_head_hexsha": "7afdbcde0941b287df2e153d64e14d06f2341aa2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-30T21:13:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-30T21:13:11.000Z", "max_issues_repo_path": "modules/model_openvino.py", "max_issues_repo_name": "AdamMiltonBarker/hias-all-oneapi-classifier", "max_issues_repo_head_hexsha": "7afdbcde0941b287df2e153d64e14d06f2341aa2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-09-18T20:02:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-21T19:18:16.000Z", "max_forks_repo_path": "modules/model_openvino.py", "max_forks_repo_name": "AIIAL/oneAPI-Acute-Lymphoblastic-Leukemia-Classifier", "max_forks_repo_head_hexsha": "05fb9cdfa5069b16cfe439be6d94d21b9eb21723", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-19T01:19:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-19T01:19:40.000Z", "avg_line_length": 34.5327510917, "max_line_length": 104, "alphanum_fraction": 0.5547546788, "include": true, "reason": "import numpy", "num_tokens": 1708}
|
@testset "test BasisMatrices.lookup" begin
table1 = [1.0, 4.0]
table2 = [1.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0]
x = [0.5, 1.0, 1.5, 4.0, 5.5]
x2 = [0.5, 2.0]
@test BasisMatrices.lookup(table1, x, 0) == [0, 1, 1, 2, 2]
@test BasisMatrices.lookup(table1, x, 1) == [1, 1, 1, 2, 2]
@test BasisMatrices.lookup(table1, x, 2) == [0, 1, 1, 1, 1]
@test BasisMatrices.lookup(table1, x, 3) == [1, 1, 1, 1, 1]
@test BasisMatrices.lookup(table2, x, 0) == [0, 3, 3, 12, 12]
@test BasisMatrices.lookup(table2, x, 1) == [3, 3, 3, 12, 12]
@test BasisMatrices.lookup(table2, x, 2) == [0, 3, 3, 8, 8]
@test BasisMatrices.lookup(table2, x, 3) == [3, 3, 3, 8, 8]
@test BasisMatrices.lookup([1.0], x2, 0) == [0, 1]
@test BasisMatrices.lookup([1.0], x2, 1) == [1, 1]
@test BasisMatrices.lookup([1.0], x2, 2) == [0, 1]
@test BasisMatrices.lookup([1.0], x2, 3) == [1, 1]
# test scalar version of lookup
x2 = collect(range(-2.0, stop=4.0, length=10))
@test [BasisMatrices.lookup(x2, -3.0, i) for i=0:3] == [0, 1, 0, 1]
@test [BasisMatrices.lookup(x2, 5.0, i) for i=0:3] == [10, 10, 9, 9]
@test [BasisMatrices.lookup(x2, i, 0) for i=x2] == collect(0:length(x2)-1)
@test [BasisMatrices.lookup(x2, i, 1) for i=x2] == [1; 1:length(x2)-1]
end
@testset "test BasisMatrices._check_order" begin
# check (::Int, ::Int) method
@test BasisMatrices._check_order(10, 0) == fill(0, 1, 10)
@test BasisMatrices._check_order(1, 0) == fill(0, 1, 1)
# check (::Int, ::Vector) method
ov = [0, 0]
@test BasisMatrices._check_order(2, ov) == reshape(ov, 1, 2)
@test_throws DimensionMismatch BasisMatrices._check_order(3, ov)
# check (::Int, ::Matrix) method
om = [0 0]
@test BasisMatrices._check_order(2, om) == om
@test BasisMatrices._check_order(1, om) == om'
@test_throws DimensionMismatch BasisMatrices._check_order(3, ov)
end
@testset "test BasisMatrices.ckronx" begin
# will test by constructing an interpoland, then evaluating at the nodes
# and verifying that we get back our original function
basis = Basis(Basis(Spline(), 13, -1.0, 1.0, 3),
Basis(Spline(), 18, -5.0, 3.0, 3))
X, x12 = nodes(basis);
# make up a funciton and evaluate at the nodes
f(x1, x2) = cos.(x1) ./ exp.(x2)
f(X::Matrix) = f(X[:, 1], X[:, 2])
y = f(X)
# fit the interpoland in Tensor form (tensor b/c using x12)
c, bs = funfitxy(basis, x12, y);
# verify that we are actually interpolating -- all heavy lifting in funeval
# is done by ckronx so this is effectively testing that we wrote that
# function properly
@test maximum(abs, funeval(c, bs, [0 0]) - y) <= 1e-13
end
@testset "test row_kron" begin
h = ["a" "b"; "c" "d"]
z = ["1" "2" "3"; "4" "5" "6"]
want = ["a1" "a2" "a3" "b1" "b2" "b3"; "c4" "c5" "c6" "d4" "d5" "d6"]
@test row_kron(h, z) == want
# now test on some bigger matrices
a = randn(400, 3)
b = randn(400, 5)
out = row_kron(a, b)
@test size(out) == (400, 15)
rows_good = true
for row=1:400
rows_good &= out[row, :] == kron(a[row, :], b[row, :])
end
@test rows_good == true
for i in 1:100
A = SparseArrays.sprandn(30, 5, 0.3)
B = SparseArrays.sprandn(30, 3, 0.3)
want = row_kron(A, B)
@test maximum(abs, want - row_kron(Array(A), B)) == 0
@test maximum(abs, want - row_kron(A, Array(B))) == 0
@test maximum(abs, want - row_kron(Array(A), Array(B))) == 0
end
end
@testset "RowKron" begin
# throws when try ot pass a non matrix
@test_throws MethodError RowKron((rand(2, 2), "foo"))
rk = RowKron(Matrix(1.0I, 3, 3), Matrix(1.0I, 3, 3), Matrix(1.0I, 3, 100))
@test size(rk, 1) == 3
@test size(rk, 2) == 900
@test size(rk) == (3, 900)
@test BasisMatrices.sizes(rk, 1) == [3, 3, 3]
@test BasisMatrices.sizes(rk, 2) == [3, 3, 100]
for i in 3:10
@test BasisMatrices.sizes(rk, i) == [1, 1, 1]
end
bs = AbstractMatrix[SparseArrays.sprandn(5, rand(5:13), 0.8) for _ in 1:3]
rk_last_sparse = RowKron(bs...)
big_last_sparse = reduce(row_kron, bs)
push!(bs, Matrix(1.0I, 5, 5))
rk_last_full = RowKron(bs...)
big_last_full = reduce(row_kron, bs)
for (rk, _big) in [(rk_last_sparse, big_last_sparse), (rk_last_full, big_last_full)]
c = rand(size(rk, 2))
c2 = rand(size(rk, 1))
# non-mutating
@test rk * c == _big * c
@test rk * [c c] == _big * [c c]
# mutating
mul!(c2, rk, c)
@test c2 == _big*c
# mutating matrix
c2mat = [c2 c2]
mul!(c2mat, rk, [c c])
@test c2mat == _big * [c c]
## transpose op
# reuse c and c2 for this...
mul!(c, Transpose(rk), c2)
@test c == _big'*c2
# vector and matrix versions
@test *(Transpose(rk), c2) == c
@test *(Transpose(rk), [c2 c2]) == [c c]
# mutating matrix
c1mat = [c c]
mul!(c1mat, Transpose(rk), [c2 c2])
@test c1mat == _big'*[c2 c2]
end
end
@testset "cdprodx" begin
for nrow in 3:3:100
for nb in 2:5
b = [SparseArrays.sprand(nrow, rand(5:13), 0.3) for _ in 1:nb]
c = rand(prod([size(A, 2) for A in b]))
full_b = reduce(row_kron, b)
want = full_b * c
have = BasisMatrices.cdprodx(b, c)
@test maximum(abs, want - have) < 1e-12
# test RowKron object
rk = RowKron(b...)
@test maximum(abs, want - rk*c) < 1e-12
# now test transpose
c2 = rand(size(rk, 1))
want2 = full_b'c2
@test maximum(abs, want2 - *(Transpose(rk), c2)) < 1e-12
end
end
x = rand(10)
@test BasisMatrices.cdprodx(Matrix(1.0I, 10, 10), x) == x
end
@testset "nodeunif" begin
X, x12 = BasisMatrices.nodeunif([5, 5], [0, 1], [3, 3])
@test x12[1] == range(0, stop=3, length=5)
@test x12[2] == range(1, stop=3, length=5)
@test size(X) == (25, 2)
@test X[:, 1] == repeat(x12[1], 5)
@test X[:, 2] == repeat(x12[2], inner=[5])
x, x1 = BasisMatrices.nodeunif(5, 0, 3)
@test x == range(0, stop=3, length=5)
@test x1 == range(0, stop=3, length=5)
end
|
{"hexsha": "a2a86edf848f9aaa2f70db12a1b724a50404652c", "size": 6345, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/util.jl", "max_stars_repo_name": "magerton/BasisMatrices.jl", "max_stars_repo_head_hexsha": "093925e67c2452a1f2da872571aff84d93cfac2d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 30, "max_stars_repo_stars_event_min_datetime": "2016-10-26T02:36:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-22T22:05:15.000Z", "max_issues_repo_path": "test/util.jl", "max_issues_repo_name": "magerton/BasisMatrices.jl", "max_issues_repo_head_hexsha": "093925e67c2452a1f2da872571aff84d93cfac2d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 58, "max_issues_repo_issues_event_min_datetime": "2016-10-25T21:55:45.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-01T10:14:37.000Z", "max_forks_repo_path": "test/util.jl", "max_forks_repo_name": "magerton/BasisMatrices.jl", "max_forks_repo_head_hexsha": "093925e67c2452a1f2da872571aff84d93cfac2d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2016-10-26T15:47:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-26T23:07:07.000Z", "avg_line_length": 32.3724489796, "max_line_length": 88, "alphanum_fraction": 0.5501970055, "num_tokens": 2492}
|
Load LFindLoad.
From lfind Require Import LFind.
Unset Printing Notations.
Set Printing Implicit.
From QuickChick Require Import QuickChick.
Inductive natural : Type := Succ : natural -> natural | Zero : natural.
Derive Show for natural. Derive Arbitrary for natural. Instance Dec_Eq_natural : Dec_Eq natural. Proof. dec_eq. Qed.
Inductive lst : Type := Cons : natural -> lst -> lst | Nil : lst.
Inductive tree : Type := Node : natural -> tree -> tree -> tree | Leaf : tree.
Inductive Pair : Type := mkpair : natural -> natural -> Pair
with Zlst : Type := zcons : Pair -> Zlst -> Zlst | znil : Zlst.
Fixpoint append (append_arg0 : lst) (append_arg1 : lst) : lst
:= match append_arg0, append_arg1 with
| Nil, x => x
| Cons x y, z => Cons x (append y z)
end.
Fixpoint rev (rev_arg0 : lst) : lst
:= match rev_arg0 with
| Nil => Nil
| Cons x y => append (rev y) (Cons x Nil)
end.
Theorem append_Nil: forall (l: lst), append l Nil = l.
Proof.
induction l.
{ simpl. f_equal. assumption. }
{ simpl. reflexivity. }
Qed.
Theorem append_assoc:
forall (l1 l2 l3: lst), append l1 (append l2 l3) = append (append l1 l2) l3.
Proof.
induction l1; induction l2; induction l3; try (simpl; reflexivity).
- simpl. rewrite <- IHl1. f_equal.
- simpl. rewrite 2 append_Nil. reflexivity.
- simpl. rewrite append_Nil. reflexivity.
- simpl. rewrite 2 append_Nil. reflexivity.
Qed.
Theorem append_rev_Cons:
forall (l1 l2: lst) (x: natural),
rev (append l1 (Cons x l2)) = append (rev l2) (Cons x (rev l1)).
Proof.
induction l1; induction l2; try (simpl; reflexivity).
{ intro. simpl. rewrite IHl1. simpl. rewrite <- append_assoc.
f_equal. }
{ intro. simpl. rewrite IHl1. simpl. reflexivity. }
Qed.
Theorem rev_append: forall (l1 l2: lst), rev (append l1 l2) = append (rev l2) (rev l1).
Proof.
induction l1.
{ induction l2.
{ simpl. lfind. Admitted.
|
{"author": "ana-brendel", "repo": "coq-benchmarks", "sha": "78b9ca2993b0fb579d814ed17e63c6859e79681c", "save_path": "github-repos/coq/ana-brendel-coq-benchmarks", "path": "github-repos/coq/ana-brendel-coq-benchmarks/coq-benchmarks-78b9ca2993b0fb579d814ed17e63c6859e79681c/modifications/quickchick_fails/test110_goal10/lfind_goal10.v"}
|
import numpy as np
import scipy.sparse as ss
import logging
import time
import warnings
from .feature_selection import get_significant_genes
from .feature_selection import calculate_minmax
warnings.simplefilter("ignore")
logging.basicConfig(format='%(process)d - %(levelname)s : %(asctime)s - %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
def run_CDR_analysis(data, phenotype, capvar = 0.95, pernum = 2000, thres = 0.05):
"""Main CDR-g analysis function
The key step in CDR-g is an SVD-decomposition on gene co-expression matrices.
Depending on the sequencing platform, this SVD step can produce thousands of
factor loadings. By default, CDR-g selects number of factor loadings which
captures 95% of variance in the dataset.
Args:
data (anndata): anndata object of interest
phenotype (str): condition of interest
capvar (float, optional): specifies the number of factor loadings to examine. Defaults to 0.95.
pernum (int, optional): number of permutations to determine importance score. Defaults to 2000.
thres (float, optional): cut-off for permutation importance to select genes. Defaults to 0.05.
"""
start = time.time()
gene_num = data.X.shape[0]
cell_num = data.X.shape[1]
logger.info('processing dataset of %s genes X %s cells', cell_num, gene_num)
logger.info('target class label:: %s', phenotype)
logger.info("SVD and threshold selection")
res = pvalgenerator(data, phenotype, capvar)
logger.info("completed SVD and varimax")
logger.info("permutation testing for gene sets:: perms:: %s threshold :: %s", pernum, thres)
npheno= data.uns["n_pheno"]
#get_significant_genes_perms(data, npheno, permnum = pernum, thres = thres)
get_significant_genes(data, npheno, permnum = pernum, thres = thres)
logger.info("computed thresholds for gene selection")
end = time.time()
timediff = end - start
numfact = data.uns["selected_loading"]
logger.info('N factor loadings:: %s', numfact)
logger.info('wall clock time in seconds:: %s', timediff)
def dask_ver(matrixlist, capvar):
"""provides svd and concatenation with dask"""
import dask.array as da
from dask_ml.decomposition import TruncatedSVD
if ss.issparse(matrixlist[0]):
list_of_mats_as_dask_arrays = [da.from_array(np.array(d.todense())) for d in matrixlist]
else:
list_of_mats_as_dask_arrays = [da.from_array(d) for d in matrixlist]
list_of_corr_mats = [da.corrcoef(d) for d in list_of_mats_as_dask_arrays]
X = da.concatenate(list_of_corr_mats, axis=1)
X[da.isnan(X)] = 0.0
_, y, Ek, Ss = get_optimal_threshold(X, capvar)
#Ek = svd.components_
#Ss = svd.singular_values_
return Ek, Ss, X, y
def process_svd_to_factors(Ek, Ss, N_k):
"""function for rotation and flips"""
Ek = Ek.T
ind = np.argsort(Ss)[::-1]
Ss = Ss[ind]
Ek = Ek[:, ind]
Lk = Ss**2 # singular values to eigenvalues
Fk = (Lk[:N_k]**0.5)*Ek[:,:N_k] # factor loadings
# Varimax rotation of the factor loadings
ROT = classic_orthomax(Fk, gamma=1) # finding rotation (gamma=1 implyes at CLASSIC varimax)
Fs = np.dot(Fk,ROT) # rotated factor loadings
Ls = np.diag(ROT.T@np.diag(Lk[:N_k])@ROT) # rotated eigenvalues
ind = np.argsort(Ls)[::-1]
Ls = Ls[ind]
Fs = Fs[:, ind]
Fs = flip_Ek(Fs)
return Fs, Ls, Fk, Lk
### aux functions for matrix extraction
def get_numbers_of_pheno(ad, pheno):
"""return list of nums"""
vals = ad.obs[pheno].value_counts().tolist()
return vals
def get_bools_of_pheno(ad, pheno):
"""return list of booleans"""
phenotypes = ad.obs[pheno].unique()
bool_list = [ad.obs[pheno] == i for i in phenotypes]
return bool_list
def extract_matrix_from_anndata(ad, pheno_column):
ind = get_bools_of_pheno(ad, pheno_column)
rands = [ad[i,:].X.T for i in ind]
return rands, len(rands)
#### functions for generating pvals and integrating whole varimax
def _full_Fs(ad, pheno, capvar):
matlist, numpheno = extract_matrix_from_anndata(ad, pheno)
Ee, Ss, _, N = dask_ver(matlist, capvar) # specify algorithm
Fs, Ls, Fk, Lk = process_svd_to_factors(Ee, Ss, N)
ad.uns["selected_loading"] = N
ad.uns["Fs"] = Fs
ad.uns["Ls"] = Ls
ad.uns["Fk"] = Fk
ad.uns["Lk"] = Lk
ad.uns["n_pheno"] = numpheno
Fs_diff = calculate_minmax(Fs, numpheno)
return Fs_diff
def pvalgenerator(ad, pheno, capvar):
Fs_diff = _full_Fs(ad, pheno, capvar)
ad.uns["Fs_diff"] = Fs_diff
return Fs_diff
# leos' aux functions
def classic_orthomax(Phi, gamma = 1, q = 20, tol = 1e-6):
"""Returns the orthomax rotation"""
from numpy import eye, asarray, dot, sum, diag
from numpy.linalg import svd
p,k = Phi.shape
R = eye(k)
d=0
for i in range(q):
d_old = d
Lambda = dot(Phi, R)
u,s,vh = svd(dot(Phi.T,asarray(Lambda)**3 - (gamma/p) * dot(Lambda, diag(diag(dot(Lambda.T,Lambda))))))
R = dot(u,vh)
d = sum(s)
if d_old!=0 and d/d_old < 1 + tol: break
return R
def flip_Ek(Ek):
"""That functions guaranties that the eigenvectors will "point up".
"""
n, m = Ek.shape
e_k_to_flip = abs(Ek.min(axis=0)) > Ek.max(axis=0)
flip = np.ones(m)
flip[e_k_to_flip] *= -1
Ek *= flip
return Ek
### aux functions for detecting factors.
def get_optimal_threshold(num, thres, ncomp = 2000):
"""
selects number of factors for truncated SVD
"""
from dask_ml.decomposition import TruncatedSVD
import dask.array as da
nrows = num.shape[0] # this shows num cells and is required for svd
numgenes = num.shape[1] # this is to make sure if less 2000
if numgenes < ncomp:
ncomp = numgenes - 1
print(ncomp)
numm = num.rechunk((nrows, 10))
svd = TruncatedSVD(n_components=ncomp, n_iter=5, random_state=42)
svd.fit(numm)
x = np.cumsum(svd.explained_variance_ratio_)
y = np.argmax(x>thres)
if y == 0:
y = ncomp
X = svd.components_[0:y]
v = svd.singular_values_[0:y]
return x, y, X, v
|
{"hexsha": "1551ac1a6a79309991708839cd5a99e957a025cb", "size": 6263, "ext": "py", "lang": "Python", "max_stars_repo_path": "pycdr/pycdr.py", "max_stars_repo_name": "wlchin/pycdr", "max_stars_repo_head_hexsha": "96e64a05f1b84fd01fbb003d3256e297d6492df4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pycdr/pycdr.py", "max_issues_repo_name": "wlchin/pycdr", "max_issues_repo_head_hexsha": "96e64a05f1b84fd01fbb003d3256e297d6492df4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pycdr/pycdr.py", "max_forks_repo_name": "wlchin/pycdr", "max_forks_repo_head_hexsha": "96e64a05f1b84fd01fbb003d3256e297d6492df4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.315, "max_line_length": 111, "alphanum_fraction": 0.6532013412, "include": true, "reason": "import numpy,from numpy,import scipy", "num_tokens": 1858}
|
#!/usr/bin/env python3
import asyncio
import logging
import tempfile
import argparse
import sys
import numpy as np
from aiocron import crontab
from pyppeteer import launch
from PIL import Image
# Import the waveshare folder (containing the waveshare display drivers) without refactoring it to a module
# find the lastest waveshare drivers here:
# https://github.com/waveshare/e-Paper/blob/master/RaspberryPi%26JetsonNano/python/lib/waveshare_epd/
sys.path.insert(0, './waveshare')
import epd7in5b
# Global config
display_width = 480 # Width of the display
display_height = 800 # Height of the display
is_portrait = False # True of the display should be in landscape mode (make sure to adjust the width and height accordingly)
wait_to_load = 60 # Page load timeout
wait_after_load = 30 # Time to evaluate the JS afte the page load (f.e. to lazy-load the calendar data)
url = 'http://192.168.0.108:8088/' # URL to create the screenshot of
black_threshold = 35 # if pixels have less than this much color (ie, are quite dim), make them black
def reset_screen():
global display_width
global display_height
epd = epd7in5b.EPD()
epd.init()
epd.Clear()
epd.sleep()
async def create_screenshot(file_path):
global display_width
global display_height
global wait_to_load
global wait_after_load
global url
global is_portrait
logging.debug('Creating screenshot')
browser = await launch(headless=True, args=['--no-sandbox', '--disable-setuid-sandbox', '--headless', '--disable-gpu', '--disable-dev-shm-usage'], executablePath='/usr/bin/chromium-browser')
page = await browser.newPage()
if is_portrait:
await page.setViewport({"width": display_width,"height": display_height})
else:
await page.setViewport({"width": display_height,"height": display_width})
await page.goto(url, timeout=wait_to_load * 1000)
await page.waitFor(wait_after_load * 1000);
await page.screenshot({'path': file_path})
await browser.close()
logging.debug('Finished creating screenshot')
def get_images(image):
global black_threshold
red = (255,000,000)
black = (000,000,000)
white = (255,255,255)
img = image.convert('RGB')
data = np.array(img)
data_w = data.copy()
data_r = data.copy()
# If the value of the pixel is less than black_threshold, make it black
black_mask = np.bitwise_and(data[:,:,0] <= black_threshold, data[:,:,1] <= black_threshold, data[:,:,2] <= black_threshold)
# If the R value is higher than the G or B value, make red (assumes simple coloring)
red_mask = np.bitwise_or(data[:,:,0] > data[:,:,1], data[:,:,0] > data[:,:,2])
# Everything else should be white
white_mask = np.bitwise_not(np.bitwise_or(red_mask, black_mask))
#make most things 'black' (use inverted colors)
data_w[black_mask] = white
data_r[black_mask] = white
#make 'white' mask only for non-red lettering
data_w[white_mask] = black
data_w[red_mask] = white
#make 'red' mask only for red lettering
data_r[red_mask] = red
data_r[white_mask] = white
return Image.fromarray(data_w, mode='RGB'), Image.fromarray(data_r, mode='RGB')
def main():
try:
parser = argparse.ArgumentParser(description='Python EInk MagicMirror')
parser.add_argument('-d', '--debug', action='store_true', dest='debug',
help='Enable debug logs.', default=False)
parser.add_argument('-c', '--cron', action='store', dest='cron',
help='Sets a schedule using cron syntax')
parser.add_argument('-r', '--reset', action='store_true', dest='reset',
help='Ignore all other settings and just reset the screen.', default=False)
args = parser.parse_args()
level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(level=level, format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
#save an image with the correct dimensions as 'yo.png' and put it in your project directory
image = Image.open("yo.png")
image_w, image_r = get_images(image)
# save black/white + black/red versions so you can tell what is going to make up the image
image_w.save("yo_aliased_w.png")
image_r.save("yo_aliased_r.png")
logging.info('Initializing / waking screen.')
epd = epd7in5b.EPD()
epd.init()
# takes about 18 seconds
logging.info('Sending image to screen.')
epd.display(epd.getbuffer(image_w), epd.getbuffer(image_r))
# go to no-power state
logging.info('Sending display back to sleep.')
epd.sleep()
logging.info('Refresh finished.')
except KeyboardInterrupt:
logging.info('Shutting down after receiving a keyboard interrupt.')
finally:
#logging.info('Resetting screen.')
#reset_screen()
pass
if __name__ == '__main__':
main()
|
{"hexsha": "010d0528448d03acfc88213403071719f148841b", "size": 5052, "ext": "py", "lang": "Python", "max_stars_repo_path": "show_image.py", "max_stars_repo_name": "lhoggatt17/rpi-magicmirror-eink", "max_stars_repo_head_hexsha": "2a47a257c9a4ca0d5af63c7f27406b1c8a73c4c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "show_image.py", "max_issues_repo_name": "lhoggatt17/rpi-magicmirror-eink", "max_issues_repo_head_hexsha": "2a47a257c9a4ca0d5af63c7f27406b1c8a73c4c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "show_image.py", "max_forks_repo_name": "lhoggatt17/rpi-magicmirror-eink", "max_forks_repo_head_hexsha": "2a47a257c9a4ca0d5af63c7f27406b1c8a73c4c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6086956522, "max_line_length": 194, "alphanum_fraction": 0.664885194, "include": true, "reason": "import numpy", "num_tokens": 1234}
|
/*
* The MIT License (MIT)
*
* Copyright (c) 2018 Sylko Olzscher
*
*/
#include "test-async-005.h"
#include <iostream>
#include <boost/test/unit_test.hpp>
#include <cyng/async/mux.h>
#include <cyng/io/io_chrono.hpp>
#include <iomanip>
#include <atomic>
#include <fstream>
// unit_test --run_test=ASYNC/async_005
namespace cyng
{
bool test_async_005()
{
//
// thread pool - 4 threads
//
async::scheduler ctx(4);
//
// fill a vector with 100 strands
//
std::vector<dispatcher_t> dispatchers;
dispatchers.reserve(100);
for (std::size_t pos = 0; pos < 100; pos++)
{
dispatchers.emplace_back(ctx.get_io_service());
}
//
// post the same lambda function to all strands
//
std::size_t counter{ 0 };
std::for_each(dispatchers.begin(), dispatchers.end(), [&counter](dispatcher_t& strand) {
++counter;
std::cerr << "post: " << counter << std::endl;
strand.post([counter]() {
std::cerr << "=> thread: " << counter << " / " << std::this_thread::get_id() << std::endl;
std::this_thread::sleep_for(std::chrono::seconds(1));
std::cerr << "<= thread: " << counter << " / " << std::this_thread::get_id() << std::endl;
});
});
//
// The output shows that the threads are overtaking each other in the lambda function.
// And it takes time until the posted lambda function is running.
ctx.stop();
return true;
}
}
|
{"hexsha": "87023745adcca2d4a1bd6d3de185027722fff1d9", "size": 1391, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/unit-test/src/test-async-005.cpp", "max_stars_repo_name": "solosTec/cyng", "max_stars_repo_head_hexsha": "3862a6b7a2b536d1f00fef20700e64170772dcff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/unit-test/src/test-async-005.cpp", "max_issues_repo_name": "solosTec/cyng", "max_issues_repo_head_hexsha": "3862a6b7a2b536d1f00fef20700e64170772dcff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/unit-test/src/test-async-005.cpp", "max_forks_repo_name": "solosTec/cyng", "max_forks_repo_head_hexsha": "3862a6b7a2b536d1f00fef20700e64170772dcff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.0793650794, "max_line_length": 94, "alphanum_fraction": 0.6276060388, "num_tokens": 414}
|
from typing import Callable, List, Union
import numpy as np
from numpy import argsort, ceil, exp, mod, zeros
from numpy.random import geometric, rand, randint, randn
from ..search_space import SearchSpace
from ..solution import Solution
from ..utils import dynamic_penalty, handle_box_constraint
__author__ = "Hao Wang"
# TODO: improve efficiency, e.g. compile it with cython
class MIES:
"""Mixed-integer Evolution Strategy"""
def __init__(
self,
search_space: SearchSpace,
obj_func: Callable,
eq_func: Callable = None,
ineq_func: Callable = None,
x0: Union[List, Solution] = None,
ftarget: float = None,
max_eval: float = np.inf,
minimize: bool = True,
elitism: bool = False,
mu_: int = 4,
lambda_: int = 10,
sigma0: float = None,
eta0: float = None,
P0: float = None,
verbose: bool = False,
eval_type: str = "list",
):
# TODO: constructor is too long...
self.mu_ = mu_
self.lambda_ = lambda_
self.eval_count = 0
self.iter_count = 0
self.minimize = minimize
self.obj_func = obj_func
self.eq_func = eq_func
self.ineq_func = ineq_func
self.stop_dict = {}
self.verbose = verbose
self.max_eval = max_eval
self.ftarget = ftarget
self.elitism = elitism
self._penalty_func = dynamic_penalty
self._eval_type = eval_type
self._space = search_space
self.var_names = self._space.var_name
self.param_type = self._space.var_type
if self._eval_type == "list":
self._to_pheno = lambda x: x
elif self._eval_type == "dict":
self._to_pheno = lambda x: x.to_dict(space=self._space)
# index of each type of variables in the dataframe
self.id_r = self._space.real_id # index of continuous variable
self.id_i = self._space.integer_id # index of integer variable
self.id_d = self._space.categorical_id # index of categorical variable
# the number of variables per each type
self.N_r = len(self.id_r)
self.N_i = len(self.id_i)
self.N_d = len(self.id_d)
self.dim = self.N_r + self.N_i + self.N_d
# by default, we use individual step sizes for continuous and
# integer variables and global strength for the nominal variables
self.N_p = min(self.N_d, int(1))
# total length of the solution vector
self._len = self.dim + self.N_r + self.N_i + self.N_p
# unpack interval bounds
self.bounds_r = np.asarray([self._space.bounds[_] for _ in self.id_r])
self.bounds_i = np.asarray([self._space.bounds[_] for _ in self.id_i])
# NOTE: bounds might be ragged
self.bounds_d = [self._space.bounds[_] for _ in self.id_d]
self._check_bounds(self.bounds_r)
self._check_bounds(self.bounds_i)
# step default step-sizes/mutation strength
par_name = []
if sigma0 is None and self.N_r:
sigma0 = 0.05 * (self.bounds_r[:, 1] - self.bounds_r[:, 0])
par_name += ["sigma" + str(_) for _ in range(self.N_r)]
if eta0 is None and self.N_i:
eta0 = 0.05 * (self.bounds_i[:, 1] - self.bounds_i[:, 0])
par_name += ["eta" + str(_) for _ in range(self.N_i)]
if P0 is None and self.N_d:
P0 = 1.0 / self.N_d
par_name += ["P" + str(_) for _ in range(self.N_p)]
# column indices: used for slicing
self._id_var = np.arange(self.dim)
self._id_sigma = np.arange(self.N_r) + len(self._id_var)
self._id_eta = np.arange(self.N_i) + len(self._id_var) + len(self._id_sigma)
self._id_p = (
np.arange(self.N_p) + len(self._id_var) + len(self._id_sigma) + len(self._id_eta)
)
self._id_hyperpar = np.arange(self.dim, self._len)
# initialize the populations
if x0 is not None: # given x0
par = []
if self.N_r:
par += [sigma0]
if self.N_i:
par += [eta0]
if self.N_p:
par += [P0] * self.N_p
self.pop = Solution(
np.tile(np.r_[x0, par], (self.mu_, 1)),
var_name=self.var_names + par_name,
verbose=self.verbose,
)
fitness0 = self.evaluate(self.pop[0])
self.fitness = np.repeat(fitness0, self.mu_)
self.xopt = x0
self.fopt = sum(fitness0)
else: # uniform sampling
x = np.asarray(self._space.sample(self.mu_), dtype="object")
par = []
if self.N_r:
par += [np.tile(sigma0, (self.mu_, 1))]
if self.N_i:
par += [np.tile(eta0, (self.mu_, 1))]
if self.N_p:
par += [np.tile([P0] * self.N_p, (self.mu_, 1))]
par = np.concatenate(par, axis=1)
x = np.c_[x, par].tolist()
self.pop = Solution(x, var_name=self.var_names + par_name, verbose=self.verbose)
self.fitness = self.evaluate(self.pop)
self.fopt = min(self.fitness) if self.minimize else max(self.fitness)
_ = np.nonzero(self.fopt == self.fitness)[0][0]
self.xopt = self.pop[_, self._id_var]
self.offspring = self.pop[0] * self.lambda_
self.f_offspring = np.repeat(self.fitness[0], self.lambda_)
self._set_hyperparameter()
# stopping criteria
self.tolfun = 1e-5
self.nbin = int(3 + ceil(30.0 * self.dim / self.lambda_))
self.histfunval = zeros(self.nbin)
def _check_bounds(self, bounds):
if len(bounds) == 0:
return
if any(bounds[:, 0] >= bounds[:, 1]):
raise ValueError("lower bounds must be smaller than upper bounds")
def _set_hyperparameter(self):
# hyperparameters: mutation strength adaptation
if self.N_r:
self.tau_r = 1 / np.sqrt(2 * self.N_r)
self.tau_p_r = 1 / np.sqrt(2 * np.sqrt(self.N_r))
if self.N_i:
self.tau_i = 1 / np.sqrt(2 * self.N_i)
self.tau_p_i = 1 / np.sqrt(2 * np.sqrt(self.N_i))
if self.N_d:
self.tau_d = 1 / np.sqrt(2 * self.N_d)
self.tau_p_d = 1 / np.sqrt(2 * np.sqrt(self.N_d))
def recombine(self, id1, id2):
p1 = self.pop[id1].copy() # IMPORTANT: make a copy
if id1 != id2:
p2 = self.pop[id2]
# intermediate recombination for the mutation strengths
p1[self._id_hyperpar] = (
np.array(p1[self._id_hyperpar]) + np.array(p2[self._id_hyperpar])
) / 2
# dominant recombination for solution parameters
(_,) = np.nonzero(randn(self.dim) > 0.5)
p1[_] = p2[_]
return p1
def select(self):
pop = self.pop + self.offspring if self.elitism else self.offspring
fitness = np.r_[self.fitness, self.f_offspring] if self.elitism else self.f_offspring
rank = argsort(fitness)
if not self.minimize:
rank = rank[::-1]
_ = rank[: self.mu_]
self.pop = pop[_]
self.fitness = fitness[_]
def evaluate(self, pop, return_penalized=True):
X = self._to_pheno(pop[:, self._id_var])
if len(pop.shape) == 1: # one solution
X = [X]
pop.fitness = np.array(list(map(self.obj_func, X))).ravel()
self.eval_count += pop.N
_penalized_fitness = (
self._penalty_func(
X, self.iter_count + 1, self.eq_func, self.ineq_func, minimize=self.minimize
)
+ pop.fitness
)
return _penalized_fitness if return_penalized else pop.fitness
def mutate(self, individual):
if self.N_r:
self._mutate_r(individual)
if self.N_i:
self._mutate_i(individual)
if self.N_d:
self._mutate_d(individual)
return individual
def _mutate_r(self, individual):
sigma = np.asarray(individual[self._id_sigma], dtype="float")
# mutate step-sizes
if len(self._id_sigma) == 1:
sigma = sigma * exp(self.tau_r * randn())
else:
sigma = sigma * exp(self.tau_r * randn() + self.tau_p_r * randn(self.N_r))
# Gaussian mutation
R = randn(self.N_r)
x = np.asarray(individual[self.id_r], dtype="float")
x_ = x + sigma * R
# Interval Bounds Treatment
x_ = handle_box_constraint(x_, self.bounds_r[:, 0], self.bounds_r[:, 1])
# rounding if a coarser numerical precision is provided
x_ = self._space[self._space.real_id].round(x_).ravel()
# NOTE: experimental correction to the step-size when the box constraints are violated
# the constraint handling method will (by chance) turn bad candidates (which are generated
# by large step-sizes) to good ones, hence confusing the self-adaptation for step-sizes.
if 1 < 2:
individual[self._id_sigma] = np.abs((x_ - x) / R)
else:
individual[self._id_sigma] = sigma
individual[self.id_r] = x_
def _mutate_i(self, individual):
eta = np.asarray(individual[self._id_eta].tolist(), dtype="float")
x = np.asarray(individual[self.id_i], dtype="int")
if len(self._id_eta) == 1:
eta = eta * exp(self.tau_i * randn())
else:
eta = eta * exp(self.tau_i * randn() + self.tau_p_i * randn(self.N_i))
eta[eta > 1] = 1
p = 1 - (eta / self.N_i) / (1 + np.sqrt(1 + (eta / self.N_i) ** 2.0))
x_ = x + geometric(p) - geometric(p)
# Interval Bounds Treatment
x_ = np.asarray(
handle_box_constraint(x_, self.bounds_i[:, 0], self.bounds_i[:, 1]), dtype="int"
)
individual[self.id_i] = x_
individual[self._id_eta] = eta
def _mutate_d(self, individual):
P = np.asarray(individual[self._id_p], dtype="float")
# Unbiased mutation on the mutation probability
P = 1.0 / (1.0 + (1.0 - P) / P * exp(-self.tau_d * randn()))
individual[self._id_p] = handle_box_constraint(P, 1.0 / (3.0 * self.N_d), 0.5)
(idx,) = np.nonzero(rand(self.N_d) < P)
# TODO: this can be accelerated
for i in idx:
levels = self.bounds_d[i]
individual[self.id_d[i]] = levels[randint(0, len(levels))]
def stop(self):
if self.eval_count > self.max_eval:
self.stop_dict["max_eval"] = True
if self.eval_count != 0 and self.iter_count != 0:
fitness = self.f_offspring
# sigma = np.atleast_2d([__[self._id_sigma] for __ in self.pop])
# sigma_mean = np.mean(sigma, axis=0)
# tolerance on fitness in history
self.histfunval[int(mod(self.eval_count / self.lambda_ - 1, self.nbin))] = fitness[0]
if (
mod(self.eval_count / self.lambda_, self.nbin) == 0
and (max(self.histfunval) - min(self.histfunval)) < self.tolfun
):
self.stop_dict["tolfun"] = True
# flat fitness within the population
if fitness[0] == fitness[int(min(ceil(0.1 + self.lambda_ / 4.0), self.mu_ - 1))]:
self.stop_dict["flatfitness"] = True
# TODO: implement more stop criteria
# if any(sigma_mean < 1e-10) or any(sigma_mean > 1e10):
# self.stop_dict['sigma'] = True
# if cond(self.C) > 1e14:
# if is_stop_on_warning:
# self.stop_dict['conditioncov'] = True
# else:
# self.flg_warning = True
# # TolUPX
# if any(sigma*sqrt(diagC)) > self.tolupx:
# if is_stop_on_warning:
# self.stop_dict['TolUPX'] = True
# else:
# self.flg_warning = True
return any(self.stop_dict.values())
def _better(self, f1, f2):
return f1 < f2 if self.minimize else f1 > f2
# TODO: optimize -> run
def optimize(self):
while not self.stop():
# TODO: vectorize this part
for i in range(self.lambda_):
p1, p2 = randint(0, self.mu_), randint(0, self.mu_)
individual = self.recombine(p1, p2)
self.offspring[i] = self.mutate(individual)
# NOTE: `self.fitness` and `self.f_offspring` are penalized function values
self.f_offspring[:] = self.evaluate(self.offspring)
self.select()
curr_best = self.pop[0]
xopt_, fopt_ = curr_best[self._id_var], self.fitness[0]
if self._better(fopt_, self.fopt):
self.xopt, self.fopt = xopt_, fopt_
self.iter_count += 1
if self.verbose:
print("iteration {}, fopt: {}".format(self.iter_count + 1, self.fopt))
print(self.xopt)
self.stop_dict["funcalls"] = self.eval_count
return self.xopt.tolist(), self.fopt, self.stop_dict
|
{"hexsha": "5b47355b857ec0c865c5569289af8bdee20cc68c", "size": 13254, "ext": "py", "lang": "Python", "max_stars_repo_path": "bayes_optim/acquisition_optim/mies.py", "max_stars_repo_name": "zdanial/Bayesian-Optimization", "max_stars_repo_head_hexsha": "a4779e992da15d21fa3fc425293cfb1f2621f81f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2020-03-18T14:48:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T13:58:20.000Z", "max_issues_repo_path": "bayes_optim/acquisition_optim/mies.py", "max_issues_repo_name": "zdanial/Bayesian-Optimization", "max_issues_repo_head_hexsha": "a4779e992da15d21fa3fc425293cfb1f2621f81f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-10-21T13:10:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-23T16:36:43.000Z", "max_forks_repo_path": "bayes_optim/acquisition_optim/mies.py", "max_forks_repo_name": "zdanial/Bayesian-Optimization", "max_forks_repo_head_hexsha": "a4779e992da15d21fa3fc425293cfb1f2621f81f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-10-14T20:29:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-05T01:39:08.000Z", "avg_line_length": 37.5467422096, "max_line_length": 98, "alphanum_fraction": 0.5623208088, "include": true, "reason": "import numpy,from numpy", "num_tokens": 3455}
|
import face_recognition
from torch.utils.data import Dataset
from facenet_pytorch.models.mtcnn import MTCNN
from PIL import Image
import cv2
from typing import List
from collections import OrderedDict
from abc import ABC, abstractmethod
import os
import numpy as np
from retinaface.pre_trained_models import get_model
import time
from preprocessing.retinaface.detect import FaceDetector
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
cv2.ocl.setUseOpenCL(False)
cv2.setNumThreads(0)
class VideoFaceDetector(ABC):
def __init__(self, **kwargs) -> None:
super().__init__()
@property
@abstractmethod
def _batch_size(self) -> int:
pass
@abstractmethod
def _detect_faces(self, frames) -> List:
pass
class FacenetDetector(VideoFaceDetector):
def __init__(self, detector="MTCNN", device="cuda:0") -> None:
super().__init__()
self.detector_type = detector
if detector == "MTCNN":
self.detector = MTCNN(margin=0, thresholds=[
0.85, 0.95, 0.95], device=device)
if detector == "face_recognition":
self.detector = face_recognition
if detector == "retinaface":
self.detector = FaceDetector(
network="mobile0.25", weights="./weights/retinaface/mobilenet0.25_Final.pth")
def _detect_faces(self, frames) -> List:
batch_boxes = None
if self.detector_type == "MTCNN":
batch_boxes, *_ = self.detector.detect(frames, landmarks=False)
if self.detector_type == "face_recognition":
results = []
for frame in frames:
batch_box = self.detector.face_locations(np.array(frame))
ymin, xmax, ymax, xmin = [b for b in batch_box[0]]
batch_box[0] = (xmin, ymin, xmax, ymax)
results.append(batch_box)
batch_boxes = np.stack(results, axis=0)
if self.detector_type == "retinaface":
results = []
for frame in frames:
start = time.time()
annotations, _, _ = self.detector.detect(
np.array(frame, dtype=np.float32), landmarks=True)
batch_box = annotations["bbox"]
results.append(np.array(batch_box))
try:
# batch_boxes = np.stack(results, axis=0)
batch_boxes = np.array(results)
print(batch_boxes.shape)
except Exception as e:
print(e)
[print(result.shape) for result in results]
exit()
return [b.tolist() if b is not None else None for b in batch_boxes]
@ property
def _batch_size(self):
return 32
class VideoDataset(Dataset):
def __init__(self, videos) -> None:
super().__init__()
self.videos = videos
def __getitem__(self, index: int):
video = self.videos[index]
capture = cv2.VideoCapture(video)
frames_num = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
frames = OrderedDict()
for i in range(frames_num):
capture.grab()
success, frame = capture.retrieve()
if not success:
continue
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = Image.fromarray(frame)
frame = frame.resize(size=[s // 2 for s in frame.size])
frames[i] = frame
return video, list(frames.keys()), list(frames.values())
def __len__(self) -> int:
return len(self.videos)
|
{"hexsha": "8ab7f1ff677b4be796a741940d7c087a44b0e0ed", "size": 3652, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocessing/face_detector.py", "max_stars_repo_name": "windysavage/dfdc_deepfake_challenge", "max_stars_repo_head_hexsha": "d10b54cf933282366157a031954b046d87d57009", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "preprocessing/face_detector.py", "max_issues_repo_name": "windysavage/dfdc_deepfake_challenge", "max_issues_repo_head_hexsha": "d10b54cf933282366157a031954b046d87d57009", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "preprocessing/face_detector.py", "max_forks_repo_name": "windysavage/dfdc_deepfake_challenge", "max_forks_repo_head_hexsha": "d10b54cf933282366157a031954b046d87d57009", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4827586207, "max_line_length": 93, "alphanum_fraction": 0.5985761227, "include": true, "reason": "import numpy", "num_tokens": 823}
|
// Copyright Louis Dionne 2013-2016
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
#include <boost/hana/assert.hpp>
#include <boost/hana/core/tag_of.hpp>
#include <boost/hana/ext/std/integral_constant.hpp>
#include <boost/hana/integral_constant.hpp>
#include <boost/hana/tuple.hpp>
#include <laws/base.hpp>
#include <laws/comparable.hpp>
#include <laws/constant.hpp>
#include <laws/euclidean_ring.hpp>
#include <laws/group.hpp>
#include <laws/hashable.hpp>
#include <laws/logical.hpp>
#include <laws/monoid.hpp>
#include <laws/orderable.hpp>
#include <laws/ring.hpp>
#include <type_traits>
using namespace boost::hana;
struct inherit_simple : std::integral_constant<int, 3> { };
struct inherit_no_default : std::integral_constant<int, 3> {
inherit_no_default() = delete;
};
struct incomplete;
struct empty_type { };
struct non_pod { virtual ~non_pod() { } };
int main() {
auto ints = make<tuple_tag>(
std::integral_constant<int, -10>{},
std::integral_constant<int, -2>{},
std::integral_constant<int, 0>{},
std::integral_constant<int, 1>{},
std::integral_constant<int, 3>{}
);
(void)ints;
#if BOOST_HANA_TEST_PART == 1
//////////////////////////////////////////////////////////////////////////
// Make sure the tag is detected properly
//////////////////////////////////////////////////////////////////////////
{
static_assert(std::is_same<
tag_of_t<inherit_simple>,
ext::std::integral_constant_tag<int>
>{}, "");
static_assert(std::is_same<
tag_of_t<inherit_no_default>,
ext::std::integral_constant_tag<int>
>{}, "");
static_assert(std::is_same<
tag_of_t<std::is_pointer<int*>>,
ext::std::integral_constant_tag<bool>
>{}, "");
static_assert(!std::is_same<
tag_of_t<incomplete>,
ext::std::integral_constant_tag<int>
>{}, "");
static_assert(!std::is_same<
tag_of_t<empty_type>,
ext::std::integral_constant_tag<int>
>{}, "");
static_assert(!std::is_same<
tag_of_t<non_pod>,
ext::std::integral_constant_tag<int>
>{}, "");
static_assert(!std::is_same<
tag_of_t<void>,
ext::std::integral_constant_tag<int>
>{}, "");
}
//////////////////////////////////////////////////////////////////////////
// Interoperation with hana::integral_constant
//////////////////////////////////////////////////////////////////////////
{
BOOST_HANA_CONSTANT_CHECK(std::integral_constant<int, 1>{} == int_c<1>);
BOOST_HANA_CONSTANT_CHECK(std::integral_constant<int, 1>{} == long_c<1>);
BOOST_HANA_CONSTANT_CHECK(std::integral_constant<int, 2>{} != int_c<3>);
}
#elif BOOST_HANA_TEST_PART == 2
//////////////////////////////////////////////////////////////////////////
// Constant
//////////////////////////////////////////////////////////////////////////
{
// value
static_assert(value(std::integral_constant<int, 0>{}) == 0, "");
static_assert(value(std::integral_constant<int, 1>{}) == 1, "");
static_assert(value(std::integral_constant<int, 3>{}) == 3, "");
// laws
test::TestConstant<ext::std::integral_constant_tag<int>>{ints, tuple_t<int, long, long long>};
}
#elif BOOST_HANA_TEST_PART == 3
//////////////////////////////////////////////////////////////////////////
// Monoid, Group, Ring, EuclideanRing
//////////////////////////////////////////////////////////////////////////
{
test::TestMonoid<ext::std::integral_constant_tag<int>>{ints};
test::TestGroup<ext::std::integral_constant_tag<int>>{ints};
test::TestRing<ext::std::integral_constant_tag<int>>{ints};
test::TestEuclideanRing<ext::std::integral_constant_tag<int>>{ints};
}
#elif BOOST_HANA_TEST_PART == 4
//////////////////////////////////////////////////////////////////////////
// Logical
//////////////////////////////////////////////////////////////////////////
{
auto t = test::ct_eq<3>{};
auto e = test::ct_eq<4>{};
// eval_if
{
BOOST_HANA_CONSTANT_CHECK(equal(
eval_if(std::true_type{}, always(t), always(e)),
t
));
BOOST_HANA_CONSTANT_CHECK(equal(
eval_if(std::false_type{}, always(t), always(e)),
e
));
}
// not_
{
BOOST_HANA_CONSTANT_CHECK(equal(
not_(std::true_type{}),
std::false_type{}
));
BOOST_HANA_CONSTANT_CHECK(equal(
not_(std::false_type{}),
std::true_type{}
));
}
auto ints = make<tuple_tag>(
std::integral_constant<int, -2>{},
std::integral_constant<int, 0>{},
std::integral_constant<int, 1>{},
std::integral_constant<int, 3>{}
);
auto bools = make<tuple_tag>(std::true_type{}, std::false_type{});
// laws
test::TestLogical<ext::std::integral_constant_tag<int>>{ints};
test::TestLogical<ext::std::integral_constant_tag<bool>>{bools};
}
#elif BOOST_HANA_TEST_PART == 5
//////////////////////////////////////////////////////////////////////////
// Comparable and Hashable
//////////////////////////////////////////////////////////////////////////
test::TestComparable<ext::std::integral_constant_tag<int>>{ints};
test::TestHashable<ext::std::integral_constant_tag<void>>{ints};
#elif BOOST_HANA_TEST_PART == 6
//////////////////////////////////////////////////////////////////////////
// Orderable
//////////////////////////////////////////////////////////////////////////
test::TestOrderable<ext::std::integral_constant_tag<int>>{ints};
#endif
}
|
{"hexsha": "5d9624412d03adac260aca847b96a908bcd41d23", "size": 6087, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "3rdParty/boost/1.62.0/libs/hana/test/ext/std/integral_constant.cpp", "max_stars_repo_name": "sita1999/arangodb", "max_stars_repo_head_hexsha": "6a4f462fa209010cd064f99e63d85ce1d432c500", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 18.0, "max_stars_repo_stars_event_min_datetime": "2016-03-04T15:44:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T11:06:25.000Z", "max_issues_repo_path": "3rdParty/boost/1.62.0/libs/hana/test/ext/std/integral_constant.cpp", "max_issues_repo_name": "lipper/arangodb", "max_issues_repo_head_hexsha": "66ea1fd4946668192e3f0d1060f0844f324ad7b8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 49.0, "max_issues_repo_issues_event_min_datetime": "2016-02-29T17:59:52.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-05T04:59:26.000Z", "max_forks_repo_path": "3rdParty/boost/1.62.0/libs/hana/test/ext/std/integral_constant.cpp", "max_forks_repo_name": "lipper/arangodb", "max_forks_repo_head_hexsha": "66ea1fd4946668192e3f0d1060f0844f324ad7b8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2015-11-02T09:37:09.000Z", "max_forks_repo_forks_event_max_datetime": "2017-05-05T06:38:49.000Z", "avg_line_length": 34.3898305085, "max_line_length": 102, "alphanum_fraction": 0.4875965172, "num_tokens": 1261}
|
# Copyright (c) 2020 DeNA Co., Ltd.
# Licensed under The MIT License [see LICENSE for details]
# agent classes
import random
import numpy as np
from .util import softmax, get_action_code, get_random_action
class RandomAgent:
def reset(self, env, show=False):
pass
def action(self, env, player, show=False):
obs = env.observation(player)
legal_actions = env.legal_actions(player)
action = get_random_action(obs=obs, legal_actions=legal_actions)
return action
def observe(self, env, player, show=False):
return [0.0]
class RuleBasedAgent(RandomAgent):
def action(self, env, player, show=False):
if hasattr(env, 'rule_based_action'):
return env.rule_based_action(player)
else:
return random.choice(env.legal_actions(player))
def print_outputs(env, prob, v):
if hasattr(env, 'print_outputs'):
env.print_outputs(prob, v)
else:
print('v = %f' % v)
print('p = %s' % (prob * 1000).astype(int))
class Agent:
def __init__(self, model, observation=False, temperature=0.0):
# model might be a neural net, or some planning algorithm such as game tree search
self.model = model
self.hidden = None
self.observation = observation
self.temperature = temperature
def reset(self, env, show=False):
self.hidden = self.model.init_hidden()
def plan(self, obs):
outputs = self.model.inference(obs, self.hidden)
self.hidden = outputs.pop('hidden', None)
return outputs
def action(self, env, player, show=False):
obs = env.observation(player)
outputs = self.plan(obs=obs)
legal_actions = env.legal_actions(player)
# p = outputs['policy']
v = outputs.get('value', None)
# mask = np.ones_like(p)
# mask[actions] = 0
# p = p - mask * 1e32
# if self.temperature == 0:
# # choose the heighest proba action
# ap_list = sorted([(a, p[a]) for a in legal_actions], key=lambda x: -x[1])
# return ap_list[0][0]
# else:
# return random.choices(np.arange(len(p)), weights=softmax(p / self.temperature))[0]
_, p, action = get_action_code(
obs=obs, policy=outputs['policy'], legal_actions=legal_actions, temperature=self.temperature, mode="e"
)
if show:
print_outputs(env, softmax(p), v)
return action
def observe(self, env, player, show=False):
v = None
if self.observation:
outputs = self.plan(env.observation(player))
v = outputs.get('value', None)
if show:
print_outputs(env, None, v)
return v if v is not None else [0.0]
class EnsembleAgent(Agent):
def reset(self, env, show=False):
self.hidden = [model.init_hidden() for model in self.model]
def plan(self, obs):
outputs = {}
for i, model in enumerate(self.model):
o = model.inference(obs, self.hidden[i])
for k, v in o:
if k == 'hidden':
self.hidden[i] = v
else:
outputs[k] = outputs.get(k, []) + [o]
for k, vl in outputs:
outputs[k] = np.mean(vl, axis=0)
return outputs
class SoftAgent(Agent):
def __init__(self, model, observation=False):
super().__init__(model, observation=observation, temperature=1.0)
|
{"hexsha": "0728cb8c10fcf93b5a73c48167f6c549ecc06573", "size": 3505, "ext": "py", "lang": "Python", "max_stars_repo_path": "HandyRL/handyrl/agent.py", "max_stars_repo_name": "Fkaneko/kaggle_lux_ai", "max_stars_repo_head_hexsha": "cc2e7ad88b8817cb96e081061e363e357831e132", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-13T02:31:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-13T02:31:56.000Z", "max_issues_repo_path": "HandyRL/handyrl/agent.py", "max_issues_repo_name": "Fkaneko/kaggle_lux_ai", "max_issues_repo_head_hexsha": "cc2e7ad88b8817cb96e081061e363e357831e132", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HandyRL/handyrl/agent.py", "max_forks_repo_name": "Fkaneko/kaggle_lux_ai", "max_forks_repo_head_hexsha": "cc2e7ad88b8817cb96e081061e363e357831e132", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-13T02:32:06.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-13T02:32:06.000Z", "avg_line_length": 30.7456140351, "max_line_length": 114, "alphanum_fraction": 0.5911554922, "include": true, "reason": "import numpy", "num_tokens": 829}
|
# -*- coding: utf-8 -*-
"""
run file for neural walker
@author: hongyuan
"""
import pickle
import time
import numpy
import theano
from theano import sandbox
import theano.tensor as tensor
import os
import scipy.io
from collections import defaultdict
from theano.tensor.shared_randomstreams import RandomStreams
import modules.utils as utils
import modules.models as models
import modules.optimizers as optimizers
import modules.trainers as trainers
import modules.data_processers as data_processers
import run_model
dtype=theano.config.floatX
file_save = './test.l.dim100.results.pkl'
with open(file_save, 'rb') as f:
results = pickle.load(f)
success_results = [
result for result in results if result['success'] == True
]
print "# of samples and success samples are : ", (
len(results), len(success_results)
)
print "success rate is : ", round(1.0*len(success_results) / len(results), 4)
list_idx_tosee = [
0, 5, 10, 15, 20
]
for idx_tosee in list_idx_tosee:
result = success_results[idx_tosee]
print " "
print "the reference path is : "
print result['path_ref']
print "the generated path is : "
print result['path_gen']
print "the destination is : "
print result['pos_destination']
print "the current position is : "
print result['pos_current']
print " "
|
{"hexsha": "21428b40f271f5639df03ea68f63cd0cfb89a526", "size": 1327, "ext": "py", "lang": "Python", "max_stars_repo_path": "check_results.py", "max_stars_repo_name": "jam-world/hongyuan_code", "max_stars_repo_head_hexsha": "8bfb6872fdb4cbe6742d6df4ec5942fb2d9fc87f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "check_results.py", "max_issues_repo_name": "jam-world/hongyuan_code", "max_issues_repo_head_hexsha": "8bfb6872fdb4cbe6742d6df4ec5942fb2d9fc87f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "check_results.py", "max_forks_repo_name": "jam-world/hongyuan_code", "max_forks_repo_head_hexsha": "8bfb6872fdb4cbe6742d6df4ec5942fb2d9fc87f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.8793103448, "max_line_length": 77, "alphanum_fraction": 0.724189902, "include": true, "reason": "import numpy,import scipy,import theano,from theano", "num_tokens": 326}
|
[STATEMENT]
lemma Diff_triv_mset: "M \<inter># N = {#} \<Longrightarrow> M - N = M"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. M \<inter># N = {#} \<Longrightarrow> M - N = M
[PROOF STEP]
by (metis diff_intersect_left_idem diff_zero)
|
{"llama_tokens": 103, "file": "Nested_Multisets_Ordinals_Multiset_More", "length": 1}
|
import numpy as np
import pandas as pd
pd.options.mode.chained_assignment = None
from src.utility.config import Config, Option
from pipeline import SentimentAnalyzer
evaluate_exp_name = "exp-p1-2.1"
evaluate_fe_option = "bert"
evaluate_clf_option = "bert"
config = Config(evaluate_exp_name)
option = Option(evaluate_fe_option, evaluate_clf_option)
pipeline = SentimentAnalyzer(config, option)
pipeline.build()
pipeline.load()
test = pd.read_csv("data/test.csv")
samples_text = test['review']
labels = test['sentiment'].map({'neutral': 0,'negative': 2, 'positive': 1})
res, pred = pipeline.evaluate(samples_text, labels)
human_label_pred = pipeline.loader.reverse_labels(pred)
result = pd.DataFrame({
"text" : test['review'],
"label": human_label_pred
})
result.to_csv("prediction.csv", index=False)
|
{"hexsha": "ffc99a7d04f36d824d883e7fcde16de44ef3f7ff", "size": 811, "ext": "py", "lang": "Python", "max_stars_repo_path": "evaluate.py", "max_stars_repo_name": "William9923/IF4072-SentimentClassification", "max_stars_repo_head_hexsha": "5e22a6da418056955243c310bab0382e4683b781", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "evaluate.py", "max_issues_repo_name": "William9923/IF4072-SentimentClassification", "max_issues_repo_head_hexsha": "5e22a6da418056955243c310bab0382e4683b781", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "evaluate.py", "max_forks_repo_name": "William9923/IF4072-SentimentClassification", "max_forks_repo_head_hexsha": "5e22a6da418056955243c310bab0382e4683b781", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.037037037, "max_line_length": 75, "alphanum_fraction": 0.7632552404, "include": true, "reason": "import numpy", "num_tokens": 195}
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 24 11:05:18 2020
@author: Nicolai
"""
import sys
sys.path.append("../../testbed/pde0A/")
import CiPde0A as pde0A
sys.path.append("../../testbed/pde0B/")
import CiPde0B as pde0B
sys.path.append("../../testbed/pde1/")
import CiPde1 as pde1
sys.path.append("../../testbed/pde2/")
import CiPde2 as pde2
sys.path.append("../../testbed/pde3/")
import CiPde3 as pde3
sys.path.append("../../testbed/pde4/")
import CiPde4 as pde4
sys.path.append("../../testbed/pde5/")
import CiPde5 as pde5
sys.path.append("../../testbed/pde6/")
import CiPde6 as pde6
sys.path.append("../../testbed/pde7/")
import CiPde7 as pde7
sys.path.append("../../testbed/pde8/")
import CiPde8 as pde8
sys.path.append("../../testbed/pde9/")
import CiPde9 as pde9
sys.path.append("../../opt_algo")
import OptAlgoMemeticJADE as oaMemJade
sys.path.append("../../kernels")
import KernelGauss as gk
import numpy as np
sys.path.append("../../post_proc/")
import post_proc as pp
if __name__ == "__main__":
# experiment parameter
replications = 20
max_fe = 1*10**4
min_err = 0
gkernel = gk.KernelGauss()
# collocation points for 0A and 0B
nc2 = []
omega = np.arange(-1.6, 2.0, 0.4)
for x0 in omega:
for x1 in omega:
nc2.append((x0, x1))
# boundary points for 0A and 0B
nb2 = []
nby = np.hstack((-2*np.ones(10), np.arange(-2.0, 2.0, 0.4), 2*np.ones(10), np.arange(2.0, -2.0, -0.4)))
nbx = np.hstack((np.arange(-2.0, 2.0, 0.4), 2*np.ones(10), np.arange(2.0, -2.0, -0.4), -2*np.ones(10)))
for i in range(len(nby)):
nb2.append((nbx[i], nby[i]))
# collocation points
nc1 = []
omega = np.arange(0.1, 1.0, 0.1)
for x0 in omega:
for x1 in omega:
nc1.append((x0, x1))
# boundary points
nb1 = []
nby = np.hstack((np.zeros(10), np.arange(0.0, 1.0, 0.1), np.ones(10), np.arange(1.0, 0.0, -0.1)))
nbx = np.hstack((np.arange(0.0, 1.0, 0.1), np.ones(10), np.arange(1.0, 0.0, -0.1), np.zeros(10)))
for i in range(40):
nb1.append((nbx[i], nby[i]))
# remove outlier in memory measurement
initialPop = np.random.randn(40,20)
mJade = oaMemJade.OptAlgoMemeticJADE(initialPop, max_fe, min_err)
remove_outlier = pde0A.CiPde0A(mJade, gkernel, nb2, nc2)
remove_outlier.solve()
##########################
# testbed problem 0A #
##########################
cipde0A = []
for i in range(replications):
initialPop = np.random.randn(40,20)
mJade = oaMemJade.OptAlgoMemeticJADE(initialPop, max_fe, min_err)
cipde0A.append(pde0A.CiPde0A(mJade, gkernel, nb2, nc2))
for i in range(replications):
cipde0A[i].solve()
pp.saveExpObject(cipde0A[i], "D:/Nicolai/MA_Data/experiment0/cipde0a_rep_" + str(i) + ".json")
print("D:/Nicolai/MA_Data/experiment0/cipde0a_rep_" + str(i) + ".json" + " -> saved")
##########################
# testbed problem 0B #
##########################
cipde0B = []
for i in range(replications):
initialPop = np.random.randn(40,20)
mJade = oaMemJade.OptAlgoMemeticJADE(initialPop, max_fe, min_err)
cipde0B.append(pde0B.CiPde0B(mJade, gkernel, nb2, nc2))
for i in range(replications):
cipde0B[i].solve()
pp.saveExpObject(cipde0B[i], "D:/Nicolai/MA_Data/experiment0/cipde0b_rep_" + str(i) + ".json")
print("D:/Nicolai/MA_Data/experiment0/cipde0b_rep_" + str(i) + ".json" + " -> saved")
##########################
# testbed problem 1 #
##########################
cipde1 = []
for i in range(replications):
initialPop = np.random.randn(40,20)
mJade = oaMemJade.OptAlgoMemeticJADE(initialPop, max_fe, min_err)
cipde1.append(pde1.CiPde1(mJade, gkernel, nb1, nc1))
for i in range(replications):
cipde1[i].solve()
pp.saveExpObject(cipde1[i], "D:/Nicolai/MA_Data/experiment0/cipde1_rep_" + str(i) + ".json")
print("D:/Nicolai/MA_Data/experiment0/cipde1_rep_" + str(i) + ".json" + " -> saved")
##########################
# testbed problem 2 #
##########################
cipde2 = []
for i in range(replications):
initialPop = np.random.randn(40,20)
mJade = oaMemJade.OptAlgoMemeticJADE(initialPop, max_fe, min_err)
cipde2.append(pde2.CiPde2(mJade, gkernel, nb1, nc1))
for i in range(replications):
cipde2[i].solve()
pp.saveExpObject(cipde2[i], "D:/Nicolai/MA_Data/experiment0/cipde2_rep_" + str(i) + ".json")
print("D:/Nicolai/MA_Data/experiment0/cipde2_rep_" + str(i) + ".json" + " -> saved")
##########################
# testbed problem 3 #
##########################
cipde3 = []
for i in range(replications):
initialPop = np.random.randn(40,20)
mJade = oaMemJade.OptAlgoMemeticJADE(initialPop, max_fe, min_err)
cipde3.append(pde3.CiPde3(mJade, gkernel, nb1, nc1))
for i in range(replications):
cipde3[i].solve()
pp.saveExpObject(cipde3[i], "D:/Nicolai/MA_Data/experiment0/cipde3_rep_" + str(i) + ".json")
print("D:/Nicolai/MA_Data/experiment0/cipde3_rep_" + str(i) + ".json" + " -> saved")
##########################
# testbed problem 4 #
##########################
cipde4 = []
for i in range(replications):
initialPop = np.random.randn(40,20)
mJade = oaMemJade.OptAlgoMemeticJADE(initialPop, max_fe, min_err)
cipde4.append(pde4.CiPde4(mJade, gkernel, nb1, nc1))
for i in range(replications):
cipde4[i].solve()
pp.saveExpObject(cipde4[i], "D:/Nicolai/MA_Data/experiment0/cipde4_rep_" + str(i) + ".json")
print("D:/Nicolai/MA_Data/experiment0/cipde4_rep_" + str(i) + ".json" + " -> saved")
##########################
# testbed problem 5 #
##########################
cipde5 = []
for i in range(replications):
initialPop = np.random.randn(40,20)
mJade = oaMemJade.OptAlgoMemeticJADE(initialPop, max_fe, min_err)
cipde5.append(pde5.CiPde5(mJade, gkernel, nb1, nc1))
for i in range(replications):
cipde5[i].solve()
pp.saveExpObject(cipde5[i], "D:/Nicolai/MA_Data/experiment0/cipde5_rep_" + str(i) + ".json")
print("D:/Nicolai/MA_Data/experiment0/cipde5_rep_" + str(i) + ".json" + " -> saved")
##########################
# testbed problem 6 #
##########################
cipde6 = []
for i in range(replications):
initialPop = np.random.randn(40,20)
mJade = oaMemJade.OptAlgoMemeticJADE(initialPop, max_fe, min_err)
cipde6.append(pde6.CiPde6(mJade, gkernel, nb1, nc1))
for i in range(replications):
cipde6[i].solve()
pp.saveExpObject(cipde6[i], "D:/Nicolai/MA_Data/experiment0/cipde6_rep_" + str(i) + ".json")
print("D:/Nicolai/MA_Data/experiment0/cipde6_rep_" + str(i) + ".json" + " -> saved")
##########################
# testbed problem 7 #
##########################
cipde7 = []
for i in range(replications):
initialPop = np.random.randn(40,20)
mJade = oaMemJade.OptAlgoMemeticJADE(initialPop, max_fe, min_err)
cipde7.append(pde7.CiPde7(mJade, gkernel, nb1, nc1))
for i in range(replications):
cipde7[i].solve()
pp.saveExpObject(cipde7[i], "D:/Nicolai/MA_Data/experiment0/cipde7_rep_" + str(i) + ".json")
print("D:/Nicolai/MA_Data/experiment0/cipde7_rep_" + str(i) + ".json" + " -> saved")
##########################
# testbed problem 8 #
##########################
cipde8 = []
for i in range(replications):
initialPop = np.random.randn(40,20)
mJade = oaMemJade.OptAlgoMemeticJADE(initialPop, max_fe, min_err)
cipde8.append(pde8.CiPde8(mJade, gkernel, nb1, nc1))
for i in range(replications):
cipde8[i].solve()
pp.saveExpObject(cipde8[i], "D:/Nicolai/MA_Data/experiment0/cipde8_rep_" + str(i) + ".json")
print("D:/Nicolai/MA_Data/experiment0/cipde8_rep_" + str(i) + ".json" + " -> saved")
##########################
# testbed problem 9 #
##########################
cipde9 = []
for i in range(replications):
initialPop = np.random.randn(40,20)
mJade = oaMemJade.OptAlgoMemeticJADE(initialPop, max_fe, min_err)
cipde9.append(pde9.CiPde9(mJade, gkernel, nb1, nc1))
for i in range(replications):
cipde9[i].solve()
pp.saveExpObject(cipde9[i], "D:/Nicolai/MA_Data/experiment0/cipde9_rep_" + str(i) + ".json")
print("D:/Nicolai/MA_Data/experiment0/cipde9_rep_" + str(i) + ".json" + " -> saved")
|
{"hexsha": "7eef58c3d9077143ab5b1ba05979b954596eecab", "size": 8884, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/experiments/experiment_0/experiment_0_time.py", "max_stars_repo_name": "nicolai-schwartze/Masterthesis", "max_stars_repo_head_hexsha": "7857af20c6b233901ab3cedc325bd64704111e16", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-06-13T10:02:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-13T10:02:02.000Z", "max_issues_repo_path": "code/experiments/experiment_0/experiment_0_time.py", "max_issues_repo_name": "nicolai-schwartze/Masterthesis", "max_issues_repo_head_hexsha": "7857af20c6b233901ab3cedc325bd64704111e16", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/experiments/experiment_0/experiment_0_time.py", "max_forks_repo_name": "nicolai-schwartze/Masterthesis", "max_forks_repo_head_hexsha": "7857af20c6b233901ab3cedc325bd64704111e16", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8630705394, "max_line_length": 107, "alphanum_fraction": 0.5642728501, "include": true, "reason": "import numpy", "num_tokens": 2906}
|
// Andrew Naplavkov
#ifndef BARK_DB_SLIPPY_LAYERS_HPP
#define BARK_DB_SLIPPY_LAYERS_HPP
#include <bark/db/provider.hpp>
#include <bark/db/slippy/detail/arcgis.hpp>
#include <bark/db/slippy/detail/bing.hpp>
#include <bark/db/slippy/detail/cartodb.hpp>
#include <bark/db/slippy/detail/double_gis.hpp>
#include <bark/db/slippy/detail/google.hpp>
#include <bark/db/slippy/detail/osm.hpp>
#include <bark/db/slippy/detail/sputnik.hpp>
#include <boost/fusion/algorithm.hpp>
#include <boost/fusion/container.hpp>
#include <boost/lexical_cast.hpp>
#include <stdexcept>
#include <string>
namespace bark::db::slippy {
class layers {
public:
layer* operator[](const qualified_name& name)
{
layer* res = nullptr;
boost::fusion::for_each(layers_, [&](auto& lr) {
if (lr.name() == name)
res = &lr;
});
return res ? res
: throw std::out_of_range(
boost::lexical_cast<std::string>(name));
}
std::map<qualified_name, meta::layer_type> dir()
{
std::map<qualified_name, meta::layer_type> res;
boost::fusion::for_each(layers_, [&](auto& lr) {
res.insert({lr.name(), meta::layer_type::Raster});
});
return res;
}
private:
boost::fusion::list<arcgis_imagery,
arcgis_topo_map,
bing_aerials,
bing_maps,
cartodb,
double_gis,
google_hybrid,
google_map,
google_satellite,
osm,
sputnik>
layers_;
};
} // namespace bark::db::slippy
#endif // BARK_DB_SLIPPY_LAYERS_HPP
|
{"hexsha": "8b4190d4c4e7d0d86e53eda262eacaa101d8fe72", "size": 1772, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "db/slippy/detail/layers.hpp", "max_stars_repo_name": "storm-ptr/bark", "max_stars_repo_head_hexsha": "e4cd481183aba72ec6cf996eff3ac144c88b79b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2019-11-05T10:27:35.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-02T06:25:53.000Z", "max_issues_repo_path": "db/slippy/detail/layers.hpp", "max_issues_repo_name": "storm-ptr/bark", "max_issues_repo_head_hexsha": "e4cd481183aba72ec6cf996eff3ac144c88b79b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "db/slippy/detail/layers.hpp", "max_forks_repo_name": "storm-ptr/bark", "max_forks_repo_head_hexsha": "e4cd481183aba72ec6cf996eff3ac144c88b79b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-12-01T18:01:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-07T08:34:04.000Z", "avg_line_length": 28.126984127, "max_line_length": 65, "alphanum_fraction": 0.565462754, "num_tokens": 414}
|
import io
import zipfile
import matplotlib.pyplot as plt
import networkx as nx
import urllib.request as urllib
from zipfile import ZipFile
import pandas as pd
def football():
print('Loading football network...')
url = "http://websensors.net.br/projects/biased-deep-walk/football.zip"
sock = urllib.urlopen(url) # open URL
s = io.BytesIO(sock.read()) # read into BytesIO "file"
sock.close()
zf = zipfile.ZipFile(s) # zipfile object
txt = zf.read('football.txt').decode() # read info file
gml = zf.read('football.gml').decode() # read gml data
# throw away bogus first line with # from mejn files
gml = gml.split('\n')[1:]
G = nx.parse_gml(gml) # parse gml data
for item in G.nodes(data=True):
G.nodes[item[0]]['label'] = G.nodes[item[0]]['value']+0
num_classes = 12
print('Loading footbal network... OK!')
return G,num_classes
def blogcatalog3():
print('Loading BlogCatalog network...')
url = "http://websensors.net.br/projects/biased-deep-walk/BlogCatalog-dataset.zip"
zipfile = urllib.URLopener()
zipfile.retrieve(url, 'BlogCatalog-dataset.zip')
with ZipFile('BlogCatalog-dataset.zip', 'r') as zipObj:
# Extract all the contents of zip file in different directory
zipObj.extractall('BlogCatalog-dataset')
df_edges = pd.read_csv('BlogCatalog-dataset/BlogCatalog-dataset/data/edges.csv',header=None)
df_edges.columns = ['source', 'target']
df_edges['source'] = df_edges['source'].astype(str)+':n'
df_edges['target'] = df_edges['target'].astype(str)+':n'
G = nx.Graph()
G = nx.from_pandas_edgelist(df_edges, create_using=G)
df_groups = pd.read_csv('BlogCatalog-dataset/BlogCatalog-dataset/data/group-edges.csv',header=None)
for index,row in df_groups.iterrows():
G.nodes[str(row[0])+':n']['label'] = row[1]
num_classes = 39
return G,num_classes
def ppi_homo_sapiens():
print('Loading PPI-HomoSapiens-MonoLabel network...')
url = "http://websensors.net.br/projects/biased-deep-walk/PPI-HomoSapiens-MonoLabel.zip"
zipfile = urllib.URLopener()
zipfile.retrieve(url, 'PPI-HomoSapiens-MonoLabel.zip')
with ZipFile('PPI-HomoSapiens-MonoLabel.zip', 'r') as zipObj:
# Extract all the contents of zip file in different directory
zipObj.extractall('PPI-HomoSapiens-MonoLabel')
df_edges = pd.read_csv('PPI-HomoSapiens-MonoLabel/edges.csv')
df_edges['source'] = df_edges['source'].astype(str)+':n'
df_edges['target'] = df_edges['target'].astype(str)+':n'
G = nx.Graph()
G = nx.from_pandas_edgelist(df_edges, create_using=G)
df_groups = pd.read_csv('PPI-HomoSapiens-MonoLabel/groups.csv')
for index,row in df_groups.iterrows():
G.nodes[str(row[0])+':n']['label'] = row[1]
num_classes = 50
return G,num_classes
def wikipedia():
print('Loading Wikipedia-Term-PoS network...')
url = "http://websensors.net.br/projects/biased-deep-walk/Wikipedia-Term-PoS.zip"
zipfile = urllib.URLopener()
zipfile.retrieve(url, 'Wikipedia-Term-PoS.zip')
with ZipFile('Wikipedia-Term-PoS.zip', 'r') as zipObj:
# Extract all the contents of zip file in different directory
zipObj.extractall('Wikipedia-Term-PoS')
df_edges = pd.read_csv('Wikipedia-Term-PoS/edges.csv')
df_edges['source'] = df_edges['source'].astype(str)+':n'
df_edges['target'] = df_edges['target'].astype(str)+':n'
G = nx.Graph()
G = nx.from_pandas_edgelist(df_edges, create_using=G)
df_groups = pd.read_csv('Wikipedia-Term-PoS/groups.csv')
for index,row in df_groups.iterrows():
G.nodes[str(row[0])+':n']['label'] = row[1]
num_classes = 36
return G,num_classes
def terrorist_rel():
print('Loading TerroristRel network...')
url = "http://websensors.net.br/projects/biased-deep-walk/TerroristRel.zip"
zipfile = urllib.URLopener()
zipfile.retrieve(url, 'TerroristRel.zip')
with ZipFile('TerroristRel.zip', 'r') as zipObj:
# Extract all the contents of zip file in different directory
zipObj.extractall('TerroristRel')
df_edges = pd.read_csv('TerroristRel/TerroristRel.edges',sep=",",header=None)
df_edges.columns = ['source', 'target']
df_edges['source'] = df_edges['source'].astype(str)+':n'
df_edges['target'] = df_edges['target'].astype(str)+':n'
G = nx.Graph()
G = nx.from_pandas_edgelist(df_edges, create_using=G)
df_groups = pd.read_csv('TerroristRel/TerroristRel.node_labels',sep=",",header=None,usecols=range(2))
for index,row in df_groups.iterrows():
G.nodes[str(row[0])+':n']['label'] = row[1]
num_classes = 2
return G,num_classes
def cora():
print('Loading Cora network...')
url = "http://websensors.net.br/projects/biased-deep-walk/cora.zip"
zipfile = urllib.URLopener()
zipfile.retrieve(url, 'cora.zip')
with ZipFile('cora.zip', 'r') as zipObj:
# Extract all the contents of zip file in different directory
zipObj.extractall('cora')
df_edges = pd.read_csv('cora/cora.edges',sep=",",header=None,usecols=range(2))
df_edges.columns = ['source', 'target']
df_edges['source'] = df_edges['source'].astype(str)+':n'
df_edges['target'] = df_edges['target'].astype(str)+':n'
G = nx.Graph()
G = nx.from_pandas_edgelist(df_edges, create_using=G)
df_groups = pd.read_csv('cora/cora.node_labels',sep=",",header=None,usecols=range(2))
for index,row in df_groups.iterrows():
G.nodes[str(row[0])+':n']['label'] = row[1]
num_classes = 7
return G,num_classes
def gene():
print('Loading Gene network...')
url = "http://websensors.net.br/projects/biased-deep-walk/gene.zip"
zipfile = urllib.URLopener()
zipfile.retrieve(url, 'gene.zip')
with ZipFile('gene.zip', 'r') as zipObj:
# Extract all the contents of zip file in different directory
zipObj.extractall('gene')
df_edges = pd.read_csv('gene/gene.edges',sep=",",header=None,usecols=range(2))
df_edges.columns = ['source', 'target']
df_edges['source'] = df_edges['source'].astype(str)+':n'
df_edges['target'] = df_edges['target'].astype(str)+':n'
G = nx.Graph()
G = nx.from_pandas_edgelist(df_edges, create_using=G)
df_groups = pd.read_csv('gene/gene.node_labels',sep=",",header=None,usecols=range(2))
for index,row in df_groups.iterrows():
G.nodes[str(row[0])+':n']['label'] = row[1]
num_classes = 2
return G,num_classes
def webkb_wisc():
print('Loading WebKB Wisk network...')
url = "http://websensors.net.br/projects/biased-deep-walk/webkb-wisc.zip"
zipfile = urllib.URLopener()
zipfile.retrieve(url, 'webkb-wisc.zip')
with ZipFile('webkb-wisc.zip', 'r') as zipObj:
# Extract all the contents of zip file in different directory
zipObj.extractall('webkb-wisc')
df_edges = pd.read_csv('webkb-wisc/webkb-wisc.edges',sep=" ",header=None,usecols=range(2))
df_edges.columns = ['source', 'target']
df_edges['source'] = df_edges['source'].astype(str)+':n'
df_edges['target'] = df_edges['target'].astype(str)+':n'
G = nx.Graph()
G = nx.from_pandas_edgelist(df_edges, create_using=G)
df_groups = pd.read_csv('webkb-wisc/webkb-wisc.node_labels',sep=" ",header=None,usecols=range(2))
for index,row in df_groups.iterrows():
G.nodes[str(row[0])+':n']['label'] = row[1]
num_classes = 5
return G,num_classes
|
{"hexsha": "b34cefa67819d17d2b57a099814143603008c223", "size": 7284, "ext": "py", "lang": "Python", "max_stars_repo_path": "bdw/load_networks.py", "max_stars_repo_name": "rmarcacini/bdw", "max_stars_repo_head_hexsha": "55adc880b5dd4621ed48c94a6e084a90be571bd6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-14T02:12:21.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-14T02:12:21.000Z", "max_issues_repo_path": "bdw/load_networks.py", "max_issues_repo_name": "rmarcacini/bdw", "max_issues_repo_head_hexsha": "55adc880b5dd4621ed48c94a6e084a90be571bd6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bdw/load_networks.py", "max_forks_repo_name": "rmarcacini/bdw", "max_forks_repo_head_hexsha": "55adc880b5dd4621ed48c94a6e084a90be571bd6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2618025751, "max_line_length": 103, "alphanum_fraction": 0.6835529929, "include": true, "reason": "import networkx", "num_tokens": 2093}
|
"""
Generate samples with GPT-2 and filter out those that are likely to be
memorized samples from the training set.
"""
import logging
logging.basicConfig(level='ERROR')
import argparse
import numpy as np
from pprint import pprint
import sys
import torch
import zlib
from transformers import AutoTokenizer, AutoModelForCausalLM
from tqdm import tqdm
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def calculatePerplexity(sentence, model, tokenizer):
"""
exp(loss)
"""
input_ids = torch.tensor(tokenizer.encode(sentence)).unsqueeze(0)
input_ids = input_ids.to(device)
with torch.no_grad():
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
return torch.exp(loss)
def print_best(metric, samples, name1, scores1, name2=None, scores2=None, n=10):
"""
print the `n` best samples according to the given `metric`
"""
idxs = np.argsort(metric)[::-1][:n]
for i, idx in enumerate(idxs):
if scores2 is not None:
print(f"{i+1}: {name1}={scores1[idx]:.3f}, {name2}={scores2[idx]:.3f}, score={metric[idx]:.3f}")
else:
print(f"{i+1}: {name1}={scores1[idx]:.3f}, , score={metric[idx]:.3f}")
print()
#for line in samples[i].split("\n"):
# print(f"\t {line.rstrip()}")
pprint(samples[i])
print()
print()
def parse_commoncrawl(wet_file):
"""
Quick and ugly parsing of a WET file.
Tested for the May 2021 crawl.
"""
with open(wet_file) as f:
lines = f.readlines()
start_idxs = [i for i in range(len(lines)) if "WARC/1.0" in lines[i]]
all_eng = ""
count_eng = 0
for i in range(len(start_idxs)-1):
start = start_idxs[i]
end = start_idxs[i+1]
if "WARC-Identified-Content-Language: eng" in lines[start+7]:
count_eng += 1
for j in range(start+10, end):
all_eng += lines[j]
return all_eng
def main():
print(f"using device: {device}")
if args.internet_sampling:
print("Loading common crawl...")
cc = parse_commoncrawl(args.wet_file)
# number of tokens to generate
seq_len = 256
# sample from the top_k tokens output by the model
top_k = 40
print("Loading GPT-Venus...")
tokenizer = AutoTokenizer.from_pretrained('simplabs/GPT-Venus')
tokenizer.padding_side = "left"
tokenizer.pad_token = tokenizer.eos_token
model1 = AutoModelForCausalLM.from_pretrained('simplabs/GPT-Venus', return_dict=True).to(device)
model1.config.pad_token_id = model1.config.eos_token_id
model2 = AutoModelForCausalLM.from_pretrained('simplabs/GPT-Venus', return_dict=True).to(device)
model1.eval()
model2.eval()
samples = []
scores = {"XL": [], "S": [], "Lower": [], "zlib": []}
num_batches = int(np.ceil(args.N / args.batch_size))
with tqdm(total=args.N) as pbar:
for i in range(num_batches):
# encode the prompts
if args.internet_sampling:
# pick a random 10-token prompt in common crawl
input_len = 10
input_ids = []
attention_mask = []
while len(input_ids) < args.batch_size:
# take some random words in common crawl
r = np.random.randint(0, len(cc))
prompt = " ".join(cc[r:r+100].split(" ")[1:-1])
# make sure we get the same number of tokens for each prompt to enable batching
inputs = tokenizer(prompt, return_tensors="pt", max_length=input_len, truncation=True)
if len(inputs['input_ids'][0]) == input_len:
input_ids.append(inputs['input_ids'][0])
attention_mask.append(inputs['attention_mask'][0])
inputs = {'input_ids': torch.stack(input_ids),
'attention_mask': torch.stack(attention_mask)}
# the actual truncated prompts
prompts = tokenizer.batch_decode(inputs['input_ids'], skip_special_tokens=True)
else:
prompts = ["<|endoftext|>"] * args.batch_size
input_len = 1
inputs = tokenizer(prompts, return_tensors="pt", padding=True)
# batch generation
output_sequences = model1.generate(
input_ids=inputs['input_ids'].to(device),
attention_mask=inputs['attention_mask'].to(device),
max_length=input_len + seq_len,
do_sample=True,
top_k=top_k,
top_p=1.0
)
texts = tokenizer.batch_decode(output_sequences, skip_special_tokens=True)
for text in texts:
# perplexity of GPT2-XL and GPT2-S
p1 = calculatePerplexity(text, model1, tokenizer)
p2 = calculatePerplexity(text, model2, tokenizer)
# perplexity on lower-case sample
p_lower = calculatePerplexity(text.lower(), model1, tokenizer)
# Zlib "entropy" of sample
zlib_entropy = len(zlib.compress(bytes(text, 'utf-8')))
samples.append(text)
scores["XL"].append(p1)
scores["S"].append(p2)
scores["Lower"].append(p_lower)
scores["zlib"].append(zlib_entropy)
pbar.update(args.batch_size)
scores["XL"] = np.asarray(scores["XL"])
scores["S"] = np.asarray(scores["S"])
scores["Lower"] = np.asarray(scores["Lower"])
scores["zlib"] = np.asarray(scores["zlib"])
# Sort by perplexity
metric = -np.log(scores["XL"])
print(f"======== top sample by XL perplexity: ========")
print_best(metric, samples, "PPL", scores["XL"])
print()
print()
# Sort by ratio of log perplexities of S and XL models
metric = np.log(scores["S"]) / np.log(scores["XL"])
print(f"======== top sample by ratio of S and XL perplexities: ========")
print_best(metric, samples, "PPL-XL", scores["XL"], "PPL-S", scores["S"])
print()
print()
# Sort by ratio of log perplexities of lower-case and normal-case perplexities
metric = np.log(scores["Lower"]) / np.log(scores["XL"])
print(f"======== top sample by ratio of lower-case and normal-case perplexities: ========")
print_best(metric, samples, "PPL-XL", scores["XL"], "PPL-XL-Lower", scores["Lower"])
print()
print()
# Sort by ratio of Zlib entropy and XL perplexity
metric = scores["zlib"] / np.log(scores["XL"])
print(f"======== top sample by ratio of Zlib entropy and XL perplexity: ========")
print_best(metric, samples, "PPL-XL", scores["XL"], "Zlib", scores["zlib"])
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--N', type=int, default=1000, help="Number of samples to generate")
parser.add_argument('--batch-size', type=int, default=10, help="Batch size for generation")
parser.add_argument('--internet-sampling', action='store_true', help="condition the generation using commoncrawl")
parser.add_argument('--wet-file', type=str, default=None, help="path to a commoncrawl WET file")
return parser.parse_args(argv)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main()
|
{"hexsha": "584a44ebd4d557844aa021d72c4149f95e153099", "size": 7416, "ext": "py", "lang": "Python", "max_stars_repo_path": "extraction.py", "max_stars_repo_name": "puffy310/LM_Memorization", "max_stars_repo_head_hexsha": "fc607352cd4b5db3bdcd0d764d20d475e9029ed4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "extraction.py", "max_issues_repo_name": "puffy310/LM_Memorization", "max_issues_repo_head_hexsha": "fc607352cd4b5db3bdcd0d764d20d475e9029ed4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "extraction.py", "max_forks_repo_name": "puffy310/LM_Memorization", "max_forks_repo_head_hexsha": "fc607352cd4b5db3bdcd0d764d20d475e9029ed4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8260869565, "max_line_length": 118, "alphanum_fraction": 0.5968176915, "include": true, "reason": "import numpy", "num_tokens": 1765}
|
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.rcParams["font.family"] = "serif"
plt.rcParams["mathtext.fontset"] = "cm"
#plt.rcParams["mathtext.fontset"] = "dejavuserif"
from orphics import maps,io,cosmology,mpi,stats
from pixell import enmap,curvedsky,utils as putils
import numpy as np
import os,sys,shutil
from actsims.util import seed_tracker
from soapack import interfaces as sints
from enlib import bench
from tilec import pipeline,utils as tutils
import healpy as hp
from szar import foregrounds as fgs
from datetime import datetime
from tilec.pipeline import get_input
from actsims.util import seed_tracker
region = 'deep56'
tdir = "/scratch/r/rbond/msyriac/data/depot/tilec/"
dcomb = 'joint'
if False:
qids = "d56_01,d56_02,d56_03,d56_04,d56_05,d56_06,p01,p02,p03,p04,p05,p06,p07,p08".split(',')
#qids = "d56_01,d56_02".split(',')
pows = {}
seeds = [12,11]
for seed in seeds:
sim_idx = seed
set_idx = 0
fgres_seed = seed_tracker.get_fg_seed(set_idx, sim_idx, 'srcfree')
jsim = pipeline.JointSim(qids,fg_res_version="fgfit_deep56",ellmax=8101,bandpassed=True,no_act_color_correction=False,ccor_exp=-1)
alms = curvedsky.rand_alm_healpy(jsim.cfgres, seed = fgres_seed)
narrays = alms.shape[0]
for i in range(narrays):
for j in range(i,narrays):
qid1 = qids[i]
qid2 = qids[j]
cls = hp.alm2cl(alms[i],alms[j])
ls = np.arange(cls.size)
pows[qid1+qid2+str(seed)] = cls.copy()
for i in range(narrays):
for j in range(i,narrays):
qid1 = qids[i]
qid2 = qids[j]
pl = io.Plotter(xyscale='linlog',scalefn = lambda x: x**2./2./np.pi, xlabel='l',ylabel='D')
for seed in seeds:
cls = pows[qid1+qid2+str(seed)]
pl.add(ls,cls,label=str(seed))
pl.done(f"fgres_simpow_{qid1}_{qid2}.png")
p = lambda x: (x*x.conj()).real
bin_edges = np.arange(20,6000,20)
#versions = ['test_sim_galtest_nofg','test_sim_galtest_withfg_fgfit','test_sim_galtest_withfg_test']
#seeds = [11,12]
# versions = ['test_sim_galtest_sim_updates','test_sim_galtest_rc_commit']
# seeds = [12]
versions = ['test_sim_galtest_final']
seeds = [12,13]
for version in versions:
pl = io.Plotter(xyscale='linlog',scalefn = lambda x: x**2./2./np.pi,xlabel='l',ylabel='D')
for seed in seeds:
csfile = tutils.get_generic_fname(tdir,region,'cmb',deproject=None,data_comb=dcomb,version=version,sim_index=seed)
imap = enmap.read_map(csfile)
modlmap = imap.modlmap()
k = enmap.fft(imap,normalize='phys')
p2d = p(k)
io.power_crop(p2d,300,f"cp2d_{version}.png")
binner = stats.bin2D(modlmap,bin_edges)
cents,p1d = binner.bin(p2d)
pl.add(cents,p1d,lw=1,alpha=0.8,label=f'{seed}')
pl._ax.set_ylim(10,3e5)
pl.done("cpowall_%s.png" % version)
# #This snippet discovered that sim_index=12 is the first instance of break-down
# nsims = 13
# p = lambda x: (x*x.conj()).real
# bin_edges = np.arange(20,6000,20)
# pl = io.Plotter(xyscale='linlog',scalefn = lambda x: x**2./2./np.pi,xlabel='l',ylabel='D')
# for i in range(nsims):
# csfile = tutils.get_generic_fname(tdir,region,'cmb',deproject=None,data_comb=dcomb,version="sim_baseline",sim_index=i)
# imap = enmap.read_map(csfile)
# modlmap = imap.modlmap()
# k = enmap.fft(imap,normalize='phys')
# p2d = p(k)
# binner = stats.bin2D(modlmap,bin_edges)
# cents,p1d = binner.bin(p2d)
# pl.add(cents,p1d,lw=1,alpha=0.8)
# if np.any(p1d*cents**2>1e5): print(i)
# pl.done("cpow.png")
|
{"hexsha": "c6569f74de73b149458adf0e9f10186bd933a906", "size": 3754, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/investigate_sims.py", "max_stars_repo_name": "ACTCollaboration/tilec", "max_stars_repo_head_hexsha": "11ed8d027ad6ffac09b3e291a047f33e97673f14", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-04T14:51:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-04T14:51:44.000Z", "max_issues_repo_path": "bin/investigate_sims.py", "max_issues_repo_name": "ACTCollaboration/tilec", "max_issues_repo_head_hexsha": "11ed8d027ad6ffac09b3e291a047f33e97673f14", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-09-03T22:19:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-13T12:38:08.000Z", "max_forks_repo_path": "bin/investigate_sims.py", "max_forks_repo_name": "ACTCollaboration/tilec", "max_forks_repo_head_hexsha": "11ed8d027ad6ffac09b3e291a047f33e97673f14", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-10T14:51:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-10T14:51:11.000Z", "avg_line_length": 32.0854700855, "max_line_length": 138, "alphanum_fraction": 0.6555673948, "include": true, "reason": "import numpy", "num_tokens": 1186}
|
# Estimators.
function estimator1(R::Float64, beta::Float64, int::Integrator,
s::State{P,D,A,N}) where {P,D,A,N}
function estim()
result = 2.0 / R # 1 / nm
dxdr = 0.5 .* normalize(com_disp(CN1, CN2, CJ, s))
for a in 1:A
for d in 1:D
for j in 1:P
result += dxdr[d] * get_force(int)[j, d, a, CN1] * beta
result -= dxdr[d] * get_force(int)[j, d, a, CN2] * beta
end
end
end
result
end
end
function estimator2(R::Float64, beta::Float64, int::Integrator,
s::State{P,D,A,N}) where {P,D,A,N}
function estim()
result = 2.0 / R # 1 / nm
dxdr = 0.5 .* normalize(com_disp(CN1, CN2, CJ, s))
for a in 1:A
for d in 1:D
result += dxdr[d] * get_force(int)[CJ, d, a, CN1] * beta
result -= dxdr[d] * get_force(int)[CJ, d, a, CN2] * beta
diff1 = (2 * s.qs[CJ, d, a, CN1]
- s.qs[bead_next(P, CJ), d, a, CN1]
- s.qs[bead_prev(P, CJ), d, a, CN1]) # nm
result -= dxdr[d] * diff1 * MS[a] * P / (hbar^2 * beta)
diff2 = (2 * s.qs[CJ, d, a, CN2]
- s.qs[bead_next(P, CJ), d, a, CN2]
- s.qs[bead_prev(P, CJ), d, a, CN2]) # nm
result += dxdr[d] * diff2 * MS[a] * P / (hbar^2 * beta)
end
end
result
end
end
|
{"hexsha": "51e6cf1bbe742ba931e8400727ae7a94e62f19f3", "size": 1536, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/estimator.jl", "max_stars_repo_name": "0/WaterMeanMeanForce.jl", "max_stars_repo_head_hexsha": "a10068ecce6456bc57215cce8e60cd6976cae608", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/estimator.jl", "max_issues_repo_name": "0/WaterMeanMeanForce.jl", "max_issues_repo_head_hexsha": "a10068ecce6456bc57215cce8e60cd6976cae608", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/estimator.jl", "max_forks_repo_name": "0/WaterMeanMeanForce.jl", "max_forks_repo_head_hexsha": "a10068ecce6456bc57215cce8e60cd6976cae608", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3469387755, "max_line_length": 75, "alphanum_fraction": 0.4225260417, "num_tokens": 503}
|
### Analyze object sizes ###
import json
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from collections import defaultdict
from collections import Counter
data = json.load(open('/BS/rshetty-wrk/archive00/data/cocoDataStuff/datasetBoxAnn.json','r'))
catid2attr = {}
select_attr_list = set(['person', 'book', 'car', 'bird', 'chair'])
for i, attr in enumerate(data['categories']):
catid2attr[attr['id']] = attr['name']
objectSizes = defaultdict(list)
for img in data['images']:
for bb in img['bboxAnn']:
if catid2attr[bb['cid']] in select_attr_list:
objectSizes[catid2attr[bb['cid']]].append(bb['bbox'][2]*bb['bbox'][3])
colors= ['r','g','b','k','c']
for i,k in enumerate(objectSizes):
cnt, cent = np.histogram(objectSizes[k],bins=bins/100.)
plt.loglog((cent[:-1]+cent[1:])/2.*100., cnt, colors[i])
plt.xlabel('Percentage area of the image occupied by the object')
plt.ylabel('Number of Instances of the object')
plt.legend(objectSizes.keys())
plt.show()
sizeOfLargestObj = []
for img in data['images']:
if len(img['bboxAnn']):
sizeOfLargestObj.append(max([bb['bbox'][2]*bb['bbox'][3] for bb in img['bboxAnn']]))
#######################################################################
ipdb> plt.imshow(((fak_x.numpy()[0,[0,1,2],:,:].transpose(1,2,0)+1.0)*255./2.0).astype(np.uint8)); plt.show()
*** NameError: name 'fak_x' is not defined
ipdb> plt.imshow(((fake_x.numpy()[0,[0,1,2],:,:].transpose(1,2,0)+1.0)*255./2.0).astype(np.uint8)); plt.show()
*** AttributeError: 'Variable' object has no attribute 'numpy'
ipdb> plt.imshow(((fake_x.data.numpy()[0,[0,1,2],:,:].transpose(1,2,0)+1.0)*255./2.0).astype(np.uint8)); plt.show()
*** RuntimeError: can't convert CUDA tensor to numpy (it doesn't support GPU arrays). Use .cpu() to move the tensor to host memory first.
ipdb> plt.imshow(((fake_x.data.cpu().numpy()[0,[0,1,2],:,:].transpose(1,2,0)+1.0)*255./2.0).astype(np.uint8)); plt.show()
<matplotlib.image.AxesImage object at 0x7fb660035a50>
ipdb> plt.imshow(((fake_x.data.cpu().numpy()[1,[0,1,2],:,:].transpose(1,2,0)+1.0)*255./2.0).astype(np.uint8)); plt.show()
<matplotlib.image.AxesImage object at 0x7fb649c9e8d0>
ipdb> plt.imshow(((mask.data.cpu().numpy()[1,[0,1,2],:,:].transpose(1,2,0))*255.).astype(np.uint8)); plt.show()
<matplotlib.image.AxesImage object at 0x7fb649b966d0>
ipdb> plt.imshow(((1-mask.data.cpu().numpy()[1,[0,1,2],:,:].transpose(1,2,0))*255.).astype(np.uint8)); plt.show()
<matplotlib.image.AxesImage object at 0x7fb649ae8ad0>
ipdb> plt.imshow(((diffimg.data.cpu().numpy()[1,[0,1,2],:,:].transpose(1,2,0)+1.0)*255./2.0).astype(np.uint8)); plt.show()
<matplotlib.image.AxesImage object at 0x7fb649145bd0>
ipdb> plt.imshow(((fake_x.data.cpu().numpy()[2,[0,1,2],:,:].transpose(1,2,0)+1.0)*255./2.0).astype(np.uint8)); plt.show()
<matplotlib.image.AxesImage object at 0x7fb64908acd0>
ipdb> plt.imshow(((diffimg.data.cpu().numpy()[2,[0,1,2],:,:].transpose(1,2,0)+1.0)*255./2.0).astype(np.uint8)); plt.show()
<matplotlib.image.AxesImage object at 0x7fb648fcccd0>
ipdb> plt.imshow(((1-mask.data.cpu().numpy()[2,[0,1,2],:,:].transpose(1,2,0))*255.).astype(np.uint8)); plt.show()
<matplotlib.image.AxesImage object at 0x7fb648f1e410>
#
from utils.data_loader_stargan import get_dataset
import matplotlib.pyplot as plt
import numpy as np
dataset = get_dataset(config.celebA_image_path, config.metadata_path, config.celebA_crop_size, config.image_size, config.dataset, config.mode, select_attrs=config.selected_attrs, datafile=config.datafile,bboxLoader=config.train_boxreconst)
img, imgLab, boxImg, boxLab, mask = dataset[0]
plt.figure();plt.imshow(((img.numpy()[[0,1,2],:,:].transpose(1,2,0)+1.0)*255./2.0).astype(np.uint8)); plt.figure();plt.imshow(((boxImg.numpy()[[0,1,2],:,:].transpose(1,2,0)+1.0)*255./2.0).astype(np.uint8));plt.figure(); plt.imshow(((mask.numpy()[[0,0,0],:,:].transpose(1,2,0)+1.0)*255./2.0).astype(np.uint8));plt.figure();plt.imshow((((img*mask).numpy()[[0,1,2],:,:].transpose(1,2,0)+1.0)*255./2.0).astype(np.uint8));plt.show()
###----------------------------------------------------------------------------------------------------------------
import numpy as np
from collections import defaultdict
from tqdm import tqdm
pwd
imgList = open('trainval.txt').read().splitlines()
imgListVal = open('../Segmentation/val.txt').read().splitlines()
trainList = set(imgList) - set(imgListVal)
allImgs = {}
classes = ['person', 'bird', 'cat', 'cow', 'dog', 'horse', 'sheep', 'airplane', 'bicycle', 'boat', 'bus', 'car', 'motorcycle', 'train', 'bottle', 'couch', 'dining table', 'potted plant', 'chair', 'tv'
]
clsToPascalCls = {c:c for c in classes}
clsToPascalCls['dining table'] = 'diningtable'
clsToPascalCls['tv'] = 'tvmonitor'
clsToPascalCls['motorcycle'] = 'motorbike'
clsToPascalCls['couch'] = 'sofa'; clsToPascalCls['airplane'] = 'aeroplane'
clsToPascalCls['potted plant'] = 'pottedplant'
pascalToCoco = {clsToPascalCls[cls]:cls for cls in clsToPascalCls}
clsToidx = {cls:i for i,cls in enumerate(classes)}
imgToLbl = defaultdict(list)
for cls in tqdm(classes):
clasToimg = open(clsToPascalCls[cls]+'_trainval.txt').read().splitlines()
for line in tqdm(clasToimg):
lsp = line.split()
if (lsp[0] in trainList) and (int(lsp[1]) == 1):
imgToLbl[lsp[0]].append(cls)
import numpy as np
from collections import defaultdict
import matplotlib.pyplot as plt
import seaborn
import torch.nn.functional as FN
import json
from matplotlib.pyplot import cm
from scipy.stats import binned_statistic
def sigmoid(x):
def plot_binned_stat(allIouVsCls, val_to_plot, bins=10, pltAll = 0, linestyle = ':', plttype = 'stat',applysigx = True , applysigy = False, plt_unitline=False):
color=cm.rainbow(np.linspace(0,1, len(allIouVsCls.keys())))
legendK = []
if pltAll:
legendK = allIouVsCls.keys()
for i, cls in enumerate(allIouVsCls):
xval = FN.sigmoid(torch.FloatTensor(allIouVsCls[cls][val_to_plot[0]])).numpy() if applysigx else allIouVsCls[cls][val_to_plot[0]]
yval = FN.sigmoid(torch.FloatTensor(allIouVsCls[cls][val_to_plot[1]])).numpy() if applysigy else allIouVsCls[cls][val_to_plot[1]]
if plttype == 'stat':
aClsVsRec = binned_statistic(xval, yval,statistic='mean', bins=bins)
aClsVsRec_std = binned_statistic(xval, yval,statistic=np.std, bins=bins)
#plt.plot((aClsVsRec[1][:-1]+aClsVsRec[1][1:])/2, aClsVsRec[0],color=color[i],marker='o',linestyle=linestyle);
plt.errorbar((aClsVsRec[1][:-1]+aClsVsRec[1][1:])/2, aClsVsRec[0], yerr = aClsVsRec_std[0], color=color[i],marker='o',linestyle=linestyle);
else:
plt.scatter(xval, yval,alpha=0.5,color=color[i],s=20)
if pltAll < 2:
legendK = legendK + ['all']
allX = np.concatenate([allIouVsCls[cls][val_to_plot[0]] for cls in allIouVsCls])
allY = np.concatenate([allIouVsCls[cls][val_to_plot[1]] for cls in allIouVsCls])
xval = FN.sigmoid(torch.FloatTensor(allX)).numpy() if applysigx else allX
yval = FN.sigmoid(torch.FloatTensor(allY)).numpy() if applysigy else allY
if plttype == 'stat':
aClsVsRec = binned_statistic(xval, yval,statistic='mean', bins=bins)
aClsVsRec_std = binned_statistic(xval, yval,statistic=np.std, bins=bins)
#plt.plot((aClsVsRec[1][:-1]+aClsVsRec[1][1:])/2, aClsVsRec[0],color=color[-1],marker='o',linestyle='-', linewidth=2);
plt.errorbar((aClsVsRec[1][:-1]+aClsVsRec[1][1:])/2, aClsVsRec[0], yerr = aClsVsRec_std[0], color=color[-1],marker='o',linestyle='-', linewidth=2);
else:
plt.scatter(xval,yval,alpha=0.4,color=color[-1],s=20)
plt.xlabel(val_to_plot[0])
plt.ylabel(val_to_plot[1])
plt.legend(legendK)
if plt_unitline:
plt.plot(xval,xval, 'k-');
plt.show()
fname = 'removeEvalResults/fullres/train_checkpoint_stargan_coco_fulleditor_LowResMask_pascal_RandDiscrWdecay_wgan_30pcUnion_noGT_reg_biasM_randRot_fixedD_randDisc_smM_fixInp_imnet_IN_maxPool_V2_180_1227'
tr_res = json.load(open(fname,'r'))
selected_attrs = ['person', 'bird', 'cat', 'cow', 'dog', 'horse', 'sheep', 'airplane', 'bicycle', 'boat', 'bus', 'car', 'motorcycle', 'train', 'bottle', 'couch', "dining table", "potted plant", 'chair','tv']
attToIdx = {att:i for i,att in enumerate(selected_attrs)}
res = tr_res
allIouVsCls = {}
for key,img in res['images'].items():
for cls in img['perclass']:
if cls not in allIouVsCls:
allIouVsCls[cls] = {'iou':[], 'recall':[], 'precision':[], 'ocls':[],'acls':[],'gtsize':[], 'predsize':[], 'false_damage':[], 'n_obj':[], 'diff':[]}
allIouVsCls[cls]['iou'].append(img['perclass'][cls]['iou'])
allIouVsCls[cls]['recall'].append(img['perclass'][cls]['rec'])
allIouVsCls[cls]['precision'].append(img['perclass'][cls]['prec'])
allIouVsCls[cls]['ocls'].append(img['real_scores'][attToIdx[cls]])
allIouVsCls[cls]['acls'].append(img['perclass'][cls]['remove_scores'][attToIdx[cls]])
allIouVsCls[cls]['rSucc'].append(float(img['perclass'][cls]['remove_scores'][attToIdx[cls]]<0.))
allIouVsCls[cls]['diff'].append(img['real_scores'][attToIdx[cls]] - img['perclass'][cls]['remove_scores'][attToIdx[cls]])
allIouVsCls[cls]['gtsize'].append(img['perclass'][cls]['gtSize'])
allIouVsCls[cls]['predsize'].append(img['perclass'][cls]['predSize'])
#allIouVsCls[cls]['false_damage'].append(np.max([img['real_scores'][oclsId] - img['perclass'][cls]['remove_scores'][oclsId] for oclsId in img['real_label'] if selected_attrs[oclsId]!=cls])/(len(img['real_label'])-1+1e-6) )
allIouVsCls[cls]['false_damage'].append(np.max([img['real_scores'][oclsId] - img['perclass'][cls]['remove_scores'][oclsId] for oclsId in img['real_label'] if selected_attrs[oclsId]!=cls]) if len(img['real_label'])>1 else np.nan)
allIouVsCls[cls]['n_obj'].append(len(img['real_label']))
val_to_plot = ['ocls','recall']
'person' ,'bird' , 'cat' , 'cow' , 'dog' , 'horse' , 'sheep' , 'airplane' , 'bicycle' ,'boat' , 'bus' , 'car' , 'motorcycle' , 'train' , 'bottle' , 'couch' , 'dining table' , 'potted plant', 'chair' , 'tv'
cat2id= {}
data = {} ; data['images'] = {}
for ann in train_ann:
annSp = ann.split()
imgid = int(annSp[0].split('.')[0])
cls = annSp[1].lower()
if imgid not in data['images']:
finfo = subprocess.check_output(['file', 'flickr_logos_27_dataset_images/'+annSp[0]])
data['images'][imgid] = {'bboxAnn': [], 'id': imgid, 'filename':annSp[0], 'split':'train','imgSize': map(int, finfo.split(',')[-2].split('x'))}
if cls not in cat2id:
cat2id[cls] = len(cat2id)
bbox = map(int,annSp[-4:])
img_w,img_h = data['images'][imgid]['imgSize']
bbox = [float(bbox[0])/float(img_w), float(bbox[1])/float(img_h), float(bbox[2]-bbox[0])/float(img_w), float(bbox[3] - bbox[1])/float(img_h)]
data['images'][imgid]['bboxAnn'].append({'bbox': bbox, 'cid': cat2id[cls]})
data['categories'] = [{'id':cat2id[cat], 'name':cat} for cat in cat2id]
for ann in val_ann:
annSp = ann.split()
imgid = int(annSp[0].split('.')[0])
cls = annSp[1].lower()
if imgid not in data['images']:
finfo = subprocess.check_output(['file', 'flickr_logos_27_dataset_images/'+annSp[0]])
data['images'][imgid] = {'bboxAnn': [], 'id': imgid, 'filename':annSp[0], 'split':'train','imgSize': map(int, finfo.split(',')[-2].split('x'))}
if cls not in cat2id:
cat2id[cls] = len(cat2id)
bbox = [0., 0., 1., 1.]
data['images'][imgid]['bboxAnn'].append({'bbox': bbox, 'cid': cat2id[cls]})
for ann in val_ann:
annSp = ann.split()
imgid = int(annSp[0].split('.')[0])
cls = annSp[1].lower()
data['images'][imid2index[imgid]]['split'] = 'val'
cat2id= {}
data = {} ; data['images'] = {}
for ann in tqdm(train_ann):
annSp = ann.split()
if annSp[4]:
imgid = int(annSp[2].split('.')[0])
cls = annSp[1].lower()
if imgid not in data['images']:
finfo = subprocess.check_output(['file', 'images/'+annSp[2]])
data['images'][imgid] = {'bboxAnn': [], 'id': imgid, 'filename':annSp[2], 'split':'train','imgSize': map(int, finfo.split(',')[-2].split('x'))}
if cls not in cat2id:
cat2id[cls] = len(cat2id)
bbox = map(int,annSp[-4:])
img_w,img_h = data['images'][imgid]['imgSize']
bbox = [float(bbox[0])/float(img_w), float(bbox[1])/float(img_h), float(bbox[2]-bbox[0])/float(img_w), float(bbox[3] - bbox[1])/float(img_h)]
data['images'][imgid]['bboxAnn'].append({'bbox': bbox, 'cid': cat2id[cls]})
data['categories'] = [{'id':cat2id[cat], 'name':cat} for cat in cat2id]
for fname in tqdm(notPresentImgs):
finfo = subprocess.check_output(['file', 'images/'+fname])
imgid = int(fname.split('.')[0])
data['images'].append({'bboxAnn': [], 'id': imgid, 'filename':fname, 'split':'train','imgSize': map(int, finfo.split(',')[-2].split('x'))})
import matplotlib.pyplot as plt
import numpy as np
import numpy as np
import seaborn
from PIL import Image
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
fig, ax = plt.subplots();
ax.imshow(img, origin='upper', extent=[0,128, 128,0]);
axins = zoomed_inset_axes(ax, zoom=3, loc=7)
extent = [50, 60, 70, 60]
axins.imshow(img, interpolation="nearest", origin='upper', extent=[0,128, 0,128])
axins.set_xlim(*extent[:2])
axins.set_ylim(*extent[2:])
axins.yaxis.get_major_locator().set_params(nbins=7)
axins.xaxis.get_major_locator().set_params(nbins=7)
plt.xticks(visible=False)
plt.yticks(visible=False)
mark_inset(ax, axins, loc1=1, loc2=3, fc="none", ec="0.5")
ax.set_axis_off()
plt.draw(); plt.show()
fig, ax = plt.subplots(frameon=False);
ax.imshow(img, origin='lower');
axins = zoomed_inset_axes(ax, zoom=3, loc=7)
extent = [55, 65, 44, 54]
axins.imshow(img, interpolation="nearest", origin='lower')
axins.set_xlim(*extent[:2])
axins.set_ylim(*extent[2:])
axins.yaxis.get_major_locator().set_params(nbins=7)
axins.xaxis.get_major_locator().set_params(nbins=7)
#axins.set_axis_off()
plt.xticks(visible=False)
plt.yticks(visible=False)
mark_inset(ax, axins, loc1=2, loc2=3, fc="none", ec="r")
ax.set_axis_off()
plt.draw(); plt.show()
import numpy as np
import json
from scipy.special import expit
import matplotlib.pyplot as plt
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
fmt = '.1f'
thresh = cm.max() / 2.
#for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, format(cm[i, j], fmt),
# horizontalalignment="center",
# color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('Removed Object')
plt.xlabel('Change in Classifier Scores after removal')
allRes = json.load(open('testRobustModel/val_checkpoint_stargan_coco_fulleditor_LowResMask_pascal_RandDiscrWdecay_wgan_30pcUnion_noGT_imnet_V2_msz32_ftuneMask_withPmask_L1150_tv_nb4_styleloss3k_248_1570_withRobustClassifier','r'))
selected_attrs = ['person', 'bird', 'cat', 'cow', 'dog', 'horse', 'sheep', 'airplane', 'bicycle', 'boat', 'bus', 'car', 'motorcycle', 'train', 'bottle', 'couch', "dining table", "potted plant", 'chair', 'tv']
allCoOccur = np.zeros((2302,20,20))
allClassifierScores = np.zeros((2302,20))
allClassifierScoresEditNormChange = np.zeros((2302,20,20))
allClassifierScoresEdit = np.zeros((2302,20,20))
clsToids = {cls:i for i,cls in enumerate(selected_attrs)}
allLabels = np.zeros((2302,20))
for i,k in enumerate(allRes['images'].keys()):
allClassifierScores[i,:] = allRes['images'][k]['real_scores']
allLabels[i,allRes['images'][k]['real_label']] = 1
for cls in allRes['images'][k]['perclass']:
allCoOccur[i,clsToids[cls],allRes['images'][k]['real_label']] = 1
allClassifierScoresEdit[i,clsToids[cls],allRes['images'][k]['real_label']] = np.array(allRes['images'][k]['perclass'][cls]['remove_scores'])[allRes['images'][k]['real_label']] - allClassifierScores[i,allRes['images'][k]['real_label']]
allClassifierScoresEditNormChange[i,clsToids[cls],allRes['images'][k]['real_label']] = expit(np.array(allRes['images'][k]['perclass'][cls]['remove_scores'])[allRes['images'][k]['real_label']]) - expit(allClassifierScores[i,allRes['images'][k]['real_label']])
n_class = len(selected_attrs)
perclass_tp = np.zeros((2,len(selected_attrs)))
perclass_fn = np.zeros((2,len(selected_attrs)))
perclass_fp = np.zeros((2,len(selected_attrs)))
for k,img in allRes['images'].items():
nonexistLabel = [i for i in xrange(len(selected_attrs)) if i not in img['real_label']]
if (0 not in img['real_label']):
perclass_tp[0,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]>0)
perclass_fn[0,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]<=0)
perclass_fp[0, nonexistLabel] += (np.array(img['real_scores'])[nonexistLabel]>0)
else:
perclass_tp[1,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]>0)
perclass_fn[1,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]<=0)
perclass_fp[1, nonexistLabel] += (np.array(img['real_scores'])[nonexistLabel]>0)
perclass_tp_nr = np.zeros((2,len(selected_attrs)))
perclass_fn_nr = np.zeros((2,len(selected_attrs)))
perclass_fp_nr = np.zeros((2,len(selected_attrs)))
for k,img in allResNonRob['images'].items():
nonexistLabel = [i for i in xrange(len(selected_attrs)) if i not in img['real_label']]
if (0 not in img['real_label']):
perclass_tp_nr[0,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]>0)
perclass_fn_nr[0,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]<=0)
perclass_fp_nr[0, nonexistLabel] += (np.array(img['real_scores'])[nonexistLabel]>0)
else:
perclass_tp_nr[1,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]>0)
perclass_fp_nr[1,nonexistLabel] += (np.array(img['real_scores'])[nonexistLabel]>0)
perclass_fn_nr[1,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]<=0)
recall = perclass_tp/(perclass_tp+perclass_fn+1e-6)
precision = perclass_tp/(perclass_tp+perclass_fp+1e-6)
f1_score = 2.0* (recall*precision)/(recall+precision+1e-6)
recall_nr = perclass_tp_nr/(perclass_tp_nr+perclass_fn_nr+1e-6)
precision_nr = perclass_tp_nr/(perclass_tp_nr+perclass_fp_nr+1e-6)
f1_score_nr = 2.0* (recall_nr*precision_nr)/(recall_nr+precision_nr+1e-6)
allMf1s = []
allTh = []
alLabels = np.zeros((len(allRes['images']),n_class))
allPred = np.zeros((len(allRes['images']),n_class))
for i,k in enumerate(allRes['images']):
alLabels[i,allRes['images'][k]['real_label']] = 1
allPred[i,:] = allRes['images'][k]['real_scores']
for i in xrange(len(selected_attrs)):
pr,rec,th = precision_recall_curve(alLabels[:,i],allPred[:,i]);
f1s = 2*(pr*rec)/(pr+rec+1e-6); mf1idx = np.argmax(f1s);
print 'Max f1 = %.2f, th =%.2f'%(f1s[mf1idx], th[mf1idx]);
allMf1s.append(f1s[mf1idx])
allTh.append(th[mf1idx])
allMf1s_nr = []
allTh_nr = []
alLabels_nr = np.zeros((len(allRes['images']),n_class))
allPred_nr = np.zeros((len(allRes['images']),n_class))
for i,k in enumerate(allResNonRob['images']):
alLabels_nr[i,allResNonRob['images'][k]['real_label']] = 1
allPred_nr[i,:] = allResNonRob['images'][k]['real_scores']
for i in xrange(len(selected_attrs)):
pr,rec,th = precision_recall_curve(alLabels[:,i],allPred[:,i]);
f1s = 2*(pr*rec)/(pr+rec+1e-6); mf1idx = np.argmax(f1s);
print 'Max f1 = %.2f, th =%.2f'%(f1s[mf1idx], th[mf1idx]);
allMf1s_nr.append(f1s[mf1idx])
allTh_nr.append(th[mf1idx])
allTh = np.array(allTh)
allTh_nr = np.array(allTh_nr)
perclass_tp = np.zeros((2,len(selected_attrs)))
perclass_fn = np.zeros((2,len(selected_attrs)))
perclass_fp = np.zeros((2,len(selected_attrs)))
for k,img in allRes['images'].items():
nonexistLabel = [i for i in xrange(len(selected_attrs)) if i not in img['real_label']]
if (0 not in img['real_label']):
perclass_tp[0,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]>allTh[img['real_label']])
perclass_fn[0,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]<=allTh[img['real_label']])
perclass_fp[0, nonexistLabel] += (np.array(img['real_scores'])[nonexistLabel]>allTh[nonexistLabel])
else:
perclass_tp[1,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]>allTh[img['real_label']])
perclass_fn[1,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]<=allTh[img['real_label']])
perclass_fp[1, nonexistLabel] += (np.array(img['real_scores'])[nonexistLabel]>allTh[nonexistLabel])
perclass_tp_nr = np.zeros((2,len(selected_attrs)))
perclass_fn_nr = np.zeros((2,len(selected_attrs)))
perclass_fp_nr = np.zeros((2,len(selected_attrs)))
for k,img in allResNonRob['images'].items():
nonexistLabel = [i for i in xrange(len(selected_attrs)) if i not in img['real_label']]
if (0 not in img['real_label']):
perclass_tp_nr[0,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]>allTh_nr[img['real_label']])
perclass_fn_nr[0,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]<=allTh_nr[img['real_label']])
perclass_fp_nr[0, nonexistLabel] += (np.array(img['real_scores'])[nonexistLabel]>allTh_nr[nonexistLabel])
else:
perclass_tp_nr[1,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]>allTh_nr[img['real_label']])
perclass_fp_nr[1,nonexistLabel] += (np.array(img['real_scores'])[nonexistLabel]>allTh_nr[nonexistLabel])
perclass_fn_nr[1,img['real_label']] += (np.array(img['real_scores'])[img['real_label']]<=allTh_nr[img['real_label']])
recall = perclass_tp/(perclass_tp+perclass_fn+1e-6)
precision = perclass_tp/(perclass_tp+perclass_fp+1e-6)
f1_score = 2.0* (recall*precision)/(recall+precision+1e-6)
recall_nr = perclass_tp_nr/(perclass_tp_nr+perclass_fn_nr+1e-6)
precision_nr = perclass_tp_nr/(perclass_tp_nr+perclass_fp_nr+1e-6)
f1_score_nr = 2.0* (recall_nr*precision_nr)/(recall_nr+precision_nr+1e-6)
recall_ovr = perclass_tp.sum()/(perclass_tp.sum()+perclass_fn.sum()+1e-6)
precision_ovr = perclass_tp.sum()/(perclass_tp.sum()+perclass_fp.sum()+1e-6)
recall_ovr_nr = perclass_tp_nr.sum()/(perclass_tp_nr.sum()+perclass_fn_nr.sum()+1e-6)
precision_ovr_nr = perclass_tp_nr.sum()/(perclass_tp_nr.sum()+perclass_fp_nr.sum()+1e-6)
f1_score_ovr = 2.0* (recall_ovr*precision_ovr)/(recall_ovr+precision_ovr+1e-6)
f1_score_ovr_nr = 2.0* (recall_ovr_nr*precision_ovr_nr)/(recall_ovr_nr+precision_ovr_nr+1e-6)
recall_ovr2 = perclass_tp.sum(axis=1)/(perclass_tp.sum(axis=1)+perclass_fn.sum(axis=1)+1e-6)
precision_ovr2 = perclass_tp.sum(axis=1)/(perclass_tp.sum(axis=1)+perclass_fp.sum(axis=1)+1e-6)
recall_ovr2_nr = perclass_tp_nr.sum(axis=1)/(perclass_tp_nr.sum(axis=1)+perclass_fn_nr.sum(axis=1)+1e-6)
precision_ovr2_nr = perclass_tp_nr.sum(axis=1)/(perclass_tp_nr.sum(axis=1)+perclass_fp_nr.sum(axis=1)+1e-6)
f1_score_ovr2 = 2.0* (recall_ovr2*precision_ovr2)/(recall_ovr2+precision_ovr2+1e-6)
f1_score_ovr2_nr = 2.0* (recall_ovr2_nr*precision_ovr2_nr)/(recall_ovr2_nr+precision_ovr2_nr+1e-6)
print f1_score_ovr2
print f1_score_ovr2_nr
print precision_ovr2
print precision_ovr2_nr
print recall_ovr2
print recall_ovr2_nr
print('Score : || %s |'%(' | '.join(['%6s'%att[:6] for att in selected_attrs])))
print('F1woPR: || %s |'%(' | '.join([' %.2f' % sc for sc in f1_score[0,:]])))
print('F1woP : || %s |'%(' | '.join([' %.2f' % sc for sc in f1_score_nr[0,:]])))
print('\nRwoPR : || %s |'%(' | '.join([' %.2f' % sc for sc in recall[0,:]])))
print('RwoP : || %s |'%(' | '.join([' %.2f' % sc for sc in recall_nr[0,:]])))
print('\nPwoPR : || %s |'%(' | '.join([' %.2f' % sc for sc in precision[0,:]])))
print('PwoP : || %s |'%(' | '.join([' %.2f' % sc for sc in precision_nr[0,:]])))
print('\n\nScore : || %s |'%(' | '.join(['%6s'%att[:6] for att in selected_attrs])))
print('F1wPR : || %s |'%(' | '.join([' %.2f' % sc for sc in f1_score[1,:]])))
print('F1wP : || %s |'%(' | '.join([' %.2f' % sc for sc in f1_score_nr[1,:]])))
print('\nRwPR : || %s |'%(' | '.join([' %.2f' % sc for sc in recall[1,:]])))
print('RwP : || %s |'%(' | '.join([' %.2f' % sc for sc in recall_nr[1,:]])))
print('\nPwPR : || %s |'%(' | '.join([' %.2f' % sc for sc in precision[1,:]])))
print('PwP : || %s |'%(' | '.join([' %.2f' % sc for sc in precision_nr[1,:]])))
selected_attrs = ['person' , 'bicycle' , 'car' , 'motorcycle' , 'airplane' , 'bus' , 'train' , 'truck' , 'boat' , 'traffic light' , 'fire hydrant' , 'stop sign' , 'parking meter' , 'bench' , 'bird' , 'cat' , 'dog' , 'horse' , 'sheep' , 'cow' , 'elephant' , 'bear' , 'zebra' , 'giraffe' , 'backpack' , 'umbrella' , 'handbag' , 'tie' , 'suitcase' , 'frisbee' , 'skis' , 'snowboard' , 'sports ball' , 'kite' , 'baseball bat' , 'baseball glove' , 'skateboard' , 'surfboard' , 'tennis racket' , 'bottle' , 'wine glass' , 'cup' , 'fork' , 'knife' , 'spoon' , 'bowl' , 'banana' , 'apple' , 'sandwich' , 'orange' , 'broccoli' , 'carrot' , 'hot dog' , 'pizza' , 'donut' , 'cake' , 'chair' , 'couch' , 'potted plant' , 'bed' , 'dining table' , 'toilet' , 'tv' , 'laptop' , 'mouse' , 'remote' , 'keyboard' , 'cell phone' , 'microwave' , 'oven' , 'toaster' , 'sink' , 'refrigerator' , 'book' , 'clock' , 'vase' , 'scissors' , 'teddy bear' , 'hair drier' , 'toothbrush']
for i in xrange(len(selected_attrs)):
pr,rec,th = precision_recall_curve(alLabels[:,i],allPred[:,i]);
plt.plot(pr,rec,label=selected_attrs[i]+'_r')
for i in xrange(len(selected_attrs)):
pr,rec,th = precision_recall_curve(alLabels_nr[:,i],allPred_nr[:,i]);
plt.plot(pr,rec,label=selected_attrs[i]+'_nr',linestyle=':')
plt.legend(ncol=2); plt.show()
imids = resRob.keys()
def VOCap(rec,prec):
nc = rec.shape[1]
mrec=np.concatenate([np.zeros((1,rec.shape[1])),rec,np.ones((1,rec.shape[1]))],axis=0)
mprec=np.concatenate([np.zeros((1,rec.shape[1])),prec,np.zeros((1,rec.shape[1]))],axis=0)
for i in reversed(np.arange(mprec.shape[0]-1)):
mprec[i,:]=np.maximum(mprec[i,:],mprec[i+1,:])
#-------------------------------------------------------
# Now do the step wise integration
# Original matlab code is
#-------------------------------------------------------
# i=find(mrec(2:end)~=mrec(1:end-1))+1;
# ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
# Here we use boolean indexing of numpy instead of find
steps = (mrec[1:,:] != mrec[:-1,:])
ap = np.zeros(nc)
for i in xrange(nc):
ap[i]=sum((mrec[1:,:][steps[:,i], i] - mrec[:-1,:][steps[:,i], i])*mprec[1:,][steps[:,i],i])
return ap, mrec, mprec
def computeAP(allSc, allLb):
si = (-allSc).argsort(axis=0)
cid = np.arange(allLb.shape[1])
tp = allLb[si[:,cid],cid] > 0.
fp = allLb[si[:,cid],cid] == 0.
tp = tp.cumsum(axis=0).astype(np.float32)
fp = fp.cumsum(axis=0).astype(np.float32)
rec = (tp+1e-8)/((allLb>0.)+1e-8).sum(axis=0).astype(np.float32)
prec = (tp+1e-8)/ (tp+ fp+1e-8)
ap,mrec,mprec = VOCap(rec,prec)
return ap,mrec,mprec
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import precision_recall_curve
import json
n_class = len(selected_attrs)
alLabels = np.zeros((len(allRes['images']),n_class))
allPred = np.zeros((len(allRes['images']),n_class))
for i,k in enumerate(allRes['images']):
alLabels[i,allRes['images'][k]['real_label']] = 1
allPred[i,:] = allRes['images'][k]['real_scores']
alLabels_nr = np.zeros((len(allRes['images']),n_class))
allPred_nr = np.zeros((len(allRes['images']),n_class))
for i,k in enumerate(allRes['images']):
alLabels_nr[i,allResNonRob['images'][k]['real_label']] = 1
allPred_nr[i,:] = allResNonRob['images'][k]['real_scores']
ap_r, mrec_r, mprec_r = computeAP(allPred,alLabels)
ap_nr, mrec_nr, mprec_nr = computeAP(allPred_nr,alLabels)
n_high= 15
print ap_r.mean(), ap_nr.mean()
mdiff = np.argsort(np.abs(ap_r - ap_nr))[::-1]
print('Score: || %s |'%(' | '.join(['%6s'%selected_attrs[mdiff[i]][:6] for i in xrange(n_high)])))
print('AP_R : || %s |'%(' | '.join([' %.2f' % ap_r[mdiff[i]] for i in xrange(n_high)])))
print('AP_NR: || %s |'%(' | '.join([' %.2f' % ap_nr[mdiff[i]] for i in xrange(n_high)])))
color=cm.Spectral(np.linspace(0,1,n_high))
for i in xrange(n_high):
#pr,rec,th = precision_recall_curve(alLabels[:,mdiff[i]],allPred[:,mdiff[i]]);
plt.plot(mrec_r[:,mdiff[i]],mprec_r[:,mdiff[i]],label=selected_attrs[mdiff[i]]+'_r',c=color[i])
for i in xrange(n_high):
#pr,rec,th = precision_recall_curve(alLabels_nr[:,mdiff[i]],allPred_nr[:,mdiff[i]]);
plt.plot(mrec_nr[:,mdiff[i]],mprec_nr[:,mdiff[i]],label=selected_attrs[mdiff[i]]+'_nr',linestyle=':',c=color[i])
plt.legend(ncol=2);
plt.xlabel('Recall'); plt.ylabel('Precision')
plt.title('Top %d classes with largest difference in ap'%(n_high))
plt.show()
isp = (alLabels[:,0] == 1.)
ap_r_wp, mrec_r_wp, mprec_r_wp = computeAP(allPred[isp,1:],alLabels[isp,1:])
ap_r_wop, mrec_r_wop, mprec_r_wop = computeAP(allPred[~isp,1:],alLabels[~isp,1:])
ap_nr_wp, mrec_nr_wp, mprec_nr_wp = computeAP(allPred_nr[isp,1:],alLabels_nr[isp,1:])
ap_nr_wop, mrec_nr_wop, mprec_nr_wop = computeAP(allPred_nr[~isp,1:],alLabels_nr[~isp,1:])
print ap_r_wp.mean(), ap_r_wop.mean()
print ap_nr_wp.mean(), ap_nr_wop.mean()
mdiff = np.argsort(np.abs(ap_r_wp - ap_nr_wp))[::-1]
print('Score: || %s |'%(' | '.join(['%6s'%selected_attrs[mdiff[i]][:6] for i in xrange(n_high)])))
print('AP_R : || %s |'%(' | '.join([' %.2f' % ap_r_wp[mdiff[i]] for i in xrange(n_high)])))
print('AP_NR: || %s |'%(' | '.join([' %.2f' % ap_nr_wp[mdiff[i]] for i in xrange(n_high)])))
color=cm.Spectral(np.linspace(0,1,n_high))
for i in xrange(n_high):
#pr,rec,th = precision_recall_curve(alLabels[isp,mdiff[i]],allPred[isp,mdiff[i]]);
plt.plot(mrec_r_wp[:,mdiff[i]],mprec_r_wp[:,mdiff[i]],label=selected_attrs[mdiff[i]]+'_r',c=color[i])
for i in xrange(n_high):
pr,rec,th = precision_recall_curve(alLabels_nr[isp,mdiff[i]],allPred_nr[isp,mdiff[i]]);
plt.plot(mrec_nr_wp[:,mdiff[i]],mprec_nr_wp[:,mdiff[i]],label=selected_attrs[mdiff[i]]+'_nr',linestyle=':',c=color[i])
plt.legend(ncol=2);
plt.xlabel('Recall'); plt.ylabel('Precision')
plt.title('Top %d classes with largest difference in ap'%(n_high))
plt.show()
#================================================================================================
#============================= Co ouccerence computations =====================================
#================================================================================================
def plot_cooccur_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues, vmin=None, vmax=None):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.diagonal()[:, np.newaxis]
print("Normalized co-occurance matrix (w.r.t primary class counts)")
else:
print('Co-occurance matrix, without normalization')
print(cm)
if vmin is None:
vmin = cm.min()
if vmax is None:
vmin = cm.max()
plt.imshow(cm, interpolation='nearest', cmap=cmap, vmin=vmin, vmax=vmax)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90)
plt.yticks(tick_marks, classes)
fmt = '.1f'
thresh = cm.max() / 2.
#for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j, i, format(cm[i, j], fmt),
# horizontalalignment="center",
# color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('Primary Class')
plt.xlabel('Co-occuring class')
cidToCls = {cat['id']:cat['name'] for cat in data['categories']}
clsToids = {cls:i for i,cls in enumerate(selected_attrs)}
allCoOccur = np.zeros((80,80))
for img in data['images']:
if img['split'] == 'train':
imgCls = set()
for bb in img['bboxAnn']:
imgCls.add(cidToCls[bb['cid']])
for clsX in list(imgCls):
for clsY in list(imgCls):
allCoOccur[clsToids[clsX],clsToids[clsY]] +=1
plot_cooccur_matrix(np.log(allCoOccur+1e-1),selected_attrs,normalize=False,cmap=plt.cm.plasma); plt.show()
indivOccurenceNp = np.zeros(80)
clsCounts = np.zeros(80)
for img in data['images']:
if img['split'] == 'train':
imgCls = set()
for bb in img['bboxAnn']:
imgCls.add(cidToCls[bb['cid']])
imgCls = list(imgCls)
if len(imgCls)==1:
indivOccurenceNp[clsToids[imgCls[0]]] += 1
else:
clsCounts[[clsToids[cls] for cls in imgCls]] += 1
occurStats = np.log10(allCoOccur.diagonal())
fig, ax = plt.subplots()
ax.scatter(occurStats,aps)
for i, txt in enumerate(selected_attrs):
ax.annotate(txt, (occurStats[i],aps[i]))
fig, ax = plt.subplots()
texts = []
for i, txt in enumerate(selected_attrs):
texts.append(ax.annotate(txt, (aps[i]+0.003,rvox[i]+0.5),fontsize=14))
ax.scatter(aps,rvox,s=50,alpha=1.0)
plt.xlabel('Average precision for the class', fontsize=14)
plt.ylabel('% Violations to changes in context', fontsize=14)
plt.show()
fig, ax = plt.subplots()
ax.plot(np.arange(0.0,rmax_reg.max(),0.05),np.arange(0.0, rmax_reg.max(),0.05),'k-');
sax = ax.scatter(rmax_reg, rmax_da, c=apReg, s=24,cmap=plt.cm.plasma);
cbar = fig.colorbar(sax,ax=ax)
cbar.set_label('Average precision of the class', fontsize=16)
ax.set_xlabel('% violations in the original model', fontsize=16)
ax.set_ylabel('% violations in the Data augmented model',fontsize=16)
texts = []
th = 0.1
for i, txt in enumerate(selected_attrs):
if rmax_reg[i] - rmax_da[i] > th:
texts.append(ax.annotate(txt, (rmax_reg[i]-0.03,rmax_da[i]-0.03),fontsize=14))
elif rmax_reg[i] - rmax_da[i] < -th:
texts.append(ax.annotate(txt, (rmax_reg[i]+0.03,rmax_da[i]+0.03),fontsize=14))
plt.show()
|
{"hexsha": "1fa0d3224a6b4e6d297257dfd8e02599a8e86302", "size": 35131, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/miscCode.py", "max_stars_repo_name": "baudm/adversarial-object-removal", "max_stars_repo_head_hexsha": "2ad7caa0d0ffdb3fbe5fa59c66edd4e77e3557ed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 70, "max_stars_repo_stars_event_min_datetime": "2018-10-26T18:32:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-16T22:11:54.000Z", "max_issues_repo_path": "utils/miscCode.py", "max_issues_repo_name": "baudm/adversarial-object-removal", "max_issues_repo_head_hexsha": "2ad7caa0d0ffdb3fbe5fa59c66edd4e77e3557ed", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-12-13T03:45:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-02T06:59:28.000Z", "max_forks_repo_path": "utils/miscCode.py", "max_forks_repo_name": "baudm/adversarial-object-removal", "max_forks_repo_head_hexsha": "2ad7caa0d0ffdb3fbe5fa59c66edd4e77e3557ed", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2019-01-03T12:01:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-12T15:32:44.000Z", "avg_line_length": 46.4695767196, "max_line_length": 957, "alphanum_fraction": 0.6438188494, "include": true, "reason": "import numpy,from scipy", "num_tokens": 11048}
|
import random, math
from collections import deque, namedtuple
import itertools
import numpy as np
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
def generate_random_point_on_circle(radius=0.1):
"""
# using universality of uniform : assuming a random points on the circumvent of a unit, the distribution of points on
# the circle should follow p(x)=2x \for 0<x<1 (linear and it should sum up to 1). universality of uniform implies:
# x~p(x) then cdf(x)~uniform ,
# inverse of it implies cdf^-1(uniform) = p(x)
# it is easy to note that cdf^-1 in the circle distribution is sqrt()
# also more info is on the stackflow :
# https://stackoverflow.com/questions/5837572/generate-a-random-point-within-a-circle-uniformly/50746409#50746409
:return: returns random coordinates on a circle
"""
r = radius * math.sqrt(random.random())
return r
class Synthetic2DPlane(gym.Env):
metadata = {'render.modes': ['human']}
"""
Circle World Environment (Used in InfoGAIL paper):
Actions are the polar coordinates, directions A_t and P_t at discrete time t.
Observations at time t are cartesian coordinates from t-4 to t.
The (unlabeled) expert demonstrations contain three distinct modes, each generate with a stochastic expert policy
that produces a circle-like trajectories.
"""
def __init__(self):
Mode = namedtuple('Mode', ['center', 'radius'])
self.action_space = np.zeros((2,1)) # actions are (r_t, p_t), the polar coordinates.
# observations are positions from t-4 to t, each of which is (x_t,y_t)
self.observation_space = np.zeros((8, 1))
self.modes = [Mode((0, 10), 10), Mode((0, -10), 10), Mode((0, 20), 20)]
self.random_radius = 0.0 # generates random perturbation to radius r
self.mode = random.choice(self.modes)
self.center, self.radius = self.mode
self.num_points_per_circle = 1 * self.radius # Number of steps to circle back to where it started.
self.delta_theta = 2 * math.pi / self.num_points_per_circle
# We keep the past coordinates form rendering/visualization
self.polar_coordinates_histories = []
self.state = deque(maxlen=4)
# random angle initialization
theta = 2 * math.pi * random.random()
theta = 0
# stochastic distance to generate random policy
random_delta_distance = generate_random_point_on_circle(radius=self.random_radius*self.radius)
self.polar_coordinates_histories.append([self.radius + random_delta_distance, theta])
self.state.append([self.center[0] + (self.radius + random_delta_distance) * math.cos(theta),
self.center[1] + (self.radius + random_delta_distance) * math.sin(theta)])
# self.state.append([self.radius + random_delta_distance, theta])
# Observation/states at time t constitutes of the coordinate positions from t-4 to t
for time in range(3):
# It is assumed that observations are
theta = theta + self.delta_theta
# stochastic distance to generate random policy
random_delta_distance = generate_random_point_on_circle(radius=self.random_radius*self.radius)
self.polar_coordinates_histories.append([self.radius + random_delta_distance, theta])
self.state.append([self.center[0] + (self.radius + random_delta_distance) * math.cos(theta),
self.center[1] + (self.radius + random_delta_distance) * math.sin(theta)])
self.time = 0
self.done = 0
def stochastic_synthetic_policy(self):
"""
Built-in expert
:param action:
:return: actions which are numpy array of polar coordinates (directions)
"""
_, theta = self.polar_coordinates_histories[-1]
theta = theta + self.delta_theta
# stochastic distance to generate random policy
random_delta_distance = generate_random_point_on_circle(radius=self.random_radius*self.radius)
action = [random_delta_distance, self.delta_theta]
return np.array(action)
def step(self, action):
"""
:param action: which are polar coordinates
:return: states which are the the cartesian coordinates
"""
self.time += 1
if self.time > self.num_points_per_circle+1:
reward = 1/self.time
self.done = 1
return [np.array(list(itertools.chain.from_iterable(self.state))), reward, self.done, self.mode]
else:
delta_radius, delta_theta = action
_, theta = self.polar_coordinates_histories[-1]
theta = theta + delta_theta
radius = self.radius + delta_radius
new_coordinate = [radius, theta]
self.polar_coordinates_histories.append(new_coordinate)
self.state.append([self.center[0] + radius * math.cos(theta),
self.center[1] + radius * math.sin(theta)])
reward = 0
return [np.array(list(itertools.chain.from_iterable(self.state))), reward, self.done, self.mode]
def reset(self):
# restarts the polar coordinates
self.mode = random.choice(self.modes)
self.center, self.radius = self.mode
self.num_points_per_circle = 1 * self.radius # Number of steps to circle back to where it started.
self.delta_theta = 2 * math.pi / self.num_points_per_circle
self.polar_coordinates_histories = []
self.state = deque(maxlen=4)
# random angle initialization
theta = 2 * math.pi * random.random()
theta = 0
# stochastic distance to generate random policy
random_delta_distance = generate_random_point_on_circle(radius=self.random_radius*self.radius)
self.polar_coordinates_histories.append([self.radius + random_delta_distance, theta])
self.state.append([self.center[0] + (self.radius + random_delta_distance) * math.cos(theta),
self.center[1] + (self.radius + random_delta_distance) * math.sin(theta)])
# Observation/states at time t constitutes of the positions from t-4 to t
for time in range(3):
# It is assumed that observations are
theta = theta + self.delta_theta
# stochastic distance to generate random policy
random_delta_distance = generate_random_point_on_circle(radius=self.random_radius*self.radius)
self.polar_coordinates_histories.append([self.radius + random_delta_distance, theta])
self.state.append([self.center[0] + (self.radius + random_delta_distance) * math.cos(theta),
self.center[1] + (self.radius + random_delta_distance) * math.sin(theta)])
self.time = 0
self.done = 0
return np.array(list(itertools.chain.from_iterable(self.state)))
def render(self):
# polar to cartesian
x_cartesian_coordinates = [self.center[0] + r * math.cos(p) for r, p in self.polar_coordinates_histories[4:]]
y_cartesian_coordinates = [self.center[1] + r * math.sin(p) for r, p in self.polar_coordinates_histories[4:]]
if self.center[1]==10:
color ='b'
zorder = 10
elif self.center[1]==-10:
color ='g'
zorder = 5
elif self.center[1]==20:
color='r'
zorder = 1
plt.plot(x_cartesian_coordinates, y_cartesian_coordinates, color=color, linestyle='dotted', zorder = zorder)
plt.gca().set_aspect('equal', adjustable='box')
# Turn interactive plotting off
plt.ioff()
plt.savefig('circle_world')
# plt.clf()
|
{"hexsha": "ffdb2267c7bd788700a3a3a183ecd1446fb5613c", "size": 7850, "ext": "py", "lang": "Python", "max_stars_repo_path": "gym-added-environments/gym-synthetic2Dplane/gym_synthetic2Dplane/envs/synthetic2Dplane_env_backup.py", "max_stars_repo_name": "azarafrooz/corgail", "max_stars_repo_head_hexsha": "a1e6084054084fab93c84ed7c4377699a843664d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-18T21:27:25.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-18T21:27:25.000Z", "max_issues_repo_path": "gym-added-environments/gym-synthetic2Dplane/gym_synthetic2Dplane/envs/synthetic2Dplane_env_backup.py", "max_issues_repo_name": "azarafrooz/corgail", "max_issues_repo_head_hexsha": "a1e6084054084fab93c84ed7c4377699a843664d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gym-added-environments/gym-synthetic2Dplane/gym_synthetic2Dplane/envs/synthetic2Dplane_env_backup.py", "max_forks_repo_name": "azarafrooz/corgail", "max_forks_repo_head_hexsha": "a1e6084054084fab93c84ed7c4377699a843664d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.8658536585, "max_line_length": 121, "alphanum_fraction": 0.6517197452, "include": true, "reason": "import numpy", "num_tokens": 1747}
|
import sys
from glob import glob
from os import path
from pathlib import Path
from unittest import TestCase
from PIL import Image
from numpy import array
sys.path.append("..")
from src.solver.captcha import get_captcha_text
class TestCaptcha(TestCase):
def test_captcha(self):
cwd = Path(__file__).parent
sample_path = Path(f'{cwd}/samples').resolve()
print(sample_path)
samples = glob(path.join(sample_path, '*.jpg'))
print(samples)
for sample in samples:
image = array(Image.open(sample).convert('L'))
result = get_captcha_text(image)
expected_result = Path(sample).stem
print(f'Expected {expected_result}, got {result}')
self.assertEqual(result, expected_result)
|
{"hexsha": "12ff7ae6b3f558f8d02c8e8982f308cfdaf280c0", "size": 782, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_tesseract.py", "max_stars_repo_name": "mt-hack/nptu-auto-checkin", "max_stars_repo_head_hexsha": "65597630629eb3bb7b7efac7419cd7229f7467d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_tesseract.py", "max_issues_repo_name": "mt-hack/nptu-auto-checkin", "max_issues_repo_head_hexsha": "65597630629eb3bb7b7efac7419cd7229f7467d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_tesseract.py", "max_forks_repo_name": "mt-hack/nptu-auto-checkin", "max_forks_repo_head_hexsha": "65597630629eb3bb7b7efac7419cd7229f7467d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.962962963, "max_line_length": 62, "alphanum_fraction": 0.6675191816, "include": true, "reason": "from numpy", "num_tokens": 163}
|
import os
import functools as fun
import itertools as it
import collections as coll
import re
import numpy as np
from scipy import ndimage as nd
from skimage import io
from scipy.stats.mstats import mquantiles as quantiles
from skimage import morphology as skmorph, filter as imfilter
import skimage.filter.rank as rank
import skimage
import cytoolz as tlz
def morphop(im, operation='open', radius='5'):
"""Perform a morphological operation with spherical structuring element.
Parameters
----------
im : array, shape (M, N[, P])
2D or 3D grayscale image.
operation : string, optional
The operation to perform. Choices are 'opening', 'closing',
'erosion', and 'dilation'. Imperative verbs also work, e.g.
'dilate'.
radius : int, optional
The radius of the structuring element (disk or ball) used.
Returns
-------
imout : array, shape (M, N[, P])
The transformed image.
Raises
------
ValueError : if the image is not 2D or 3D.
"""
if im.ndim == 2:
selem = skmorph.disk(radius)
elif im.ndim == 3:
selem = skmorph.ball(radius)
else:
raise ValueError("Image input to 'morphop' should be 2D or 3D"
", got %iD" % im.ndim)
if operation.startswith('open'):
imout = nd.grey_opening(im, footprint=selem)
elif operation.startswith('clos'):
imout = nd.grey_closing(im, footprint=selem)
elif operation.startswith('dila'):
imout = nd.grey_dilation(im, footprint=selem)
elif operation.startswith('ero'):
imout = nd.grey_erosion(im, footprint=selem)
return imout
def basefn(fn):
"""Get the filename without the extension.
Parameters
----------
fn : string
A filename.
Returns
-------
outfn : string
`fn` with the extension stripped.
Examples
--------
>>> file_name = 'file_name.ext'
>>> basefn(file_name)
'file_name'
"""
return os.path.splitext(fn)[0]
def max_mask_iter(fns, offset=0, close_radius=0, erode_radius=0):
"""Find masks for a set of images having brightness artifacts.
Parameters
----------
fns : list of string
The images being examined.
offset : int, optional
Offset the threshold automatically found.
close_radius : int, optional
Perform a morphological closing of the mask of this radius.
erode_radius : int, optional
Perform a morphological erosion of the mask, after any closing,
of this radius.
Returns
-------
maxes : iterator of bool array
The max mask image corresponding to each input image.
"""
ms = maxes(fns)
t = imfilter.threshold_otsu(ms)
ims = it.imap(io.imread, fns)
masks = ((im < t + offset) for im in ims)
if close_radius > 0:
masks = (morphop(mask, 'close', close_radius) for mask in masks)
if erode_radius > 0:
masks = (morphop(mask, 'erode', erode_radius) for mask in masks)
return masks
def write_max_masks(fns, offset=0, close_radius=0, erode_radius=0,
suffix='.mask.tif'):
"""Find a mask for images having a brightness artifact.
This function iterates over a set of images and finds the maximum
value of each. Then, Otsu's threshold is applied to the set of
maxima, and any element brighter than this in *any* image is
masked out.
Parameters
----------
fns : list of string
The images being examined.
offset : int, optional
Offset the threshold automatically found.
close_radius : int, optional
Perform a morphological closing of the mask of this radius.
erode_radius : int, optional
Perform a morphological erosion of the mask, after any closing,
of this radius.
suffix : string, optional
Save an image next to the original, with this suffix.
Returns
-------
n, m : int
The number of images for which a mask was created, and the
total number of images
"""
masks = max_mask_iter(fns, offset, close_radius, erode_radius)
n = 0
m = 0
for fn, mask in it.izip(fns, masks):
outfn = basefn(fn) + suffix
m += 1
if not mask.all():
# we multiply by 255 to make the image easy to look at
io.imsave(outfn, mask.astype(np.uint8) * 255)
n += 1
return n, m
def maxes(fns):
"""Return an array of the maximum intensity of each image.
Parameters
----------
fns : list of string
The filenames of the images.
Returns
-------
maxes : 1D array
The maximum value of each image examined.
"""
ims = it.imap(io.imread, fns)
maxes = np.array(map(np.max, ims))
return maxes
def stretchlim(im, bottom=0.01, top=None, mask=None):
"""Stretch the image so new image range corresponds to given quantiles.
Parameters
----------
im : array, shape (M, N, [...,] P)
The input image.
bottom : float, optional
The lower quantile.
top : float, optional
The upper quantile. If not provided, it is set to 1 - `bottom`.
mask : array of bool, shape (M, N, [...,] P), optional
Only consider intensity values where `mask` is ``True``.
Returns
-------
out : np.ndarray of float
The stretched image.
"""
if mask is None:
mask = np.ones(im.shape, dtype=bool)
if top is None:
top = 1. - bottom
im = im.astype(float)
q0, q1 = quantiles(im[mask], [bottom, top])
out = (im - q0) / (q1 - q0)
out[out < 0] = 0
out[out > 1] = 1
return out
def run_quadrant_stitch(fns, re_string='(.*)_(s[1-4])_(w[1-3]).*',
re_quadrant_group=1):
"""Read images, stitched them, and write out to same directory.
Parameters
----------
fns : list of string
The filenames to be processed.
re_string : string, optional
The regular expression to match the filename.
re_quadrant_group : int, optional
The group from the re.match object that will contain quadrant info.
Returns
-------
fns_out : list of string
The output filenames
"""
qd = group_by_quadrant(fns, re_string, re_quadrant_group)
fns_out = []
for fn_pattern, fns in qd.items():
new_filename = '_'.join(fn_pattern) + '_stitched.tif'
ims = map(io.imread, sorted(fns))
im = quadrant_stitch(*ims)
io.imsave(new_filename, im)
fns_out.append(new_filename)
return fns_out
def crop(im, slices=(slice(100, -100), slice(250, -300))):
"""Crop an image to contain only plate interior.
Parameters
----------
im : array
The image to be cropped.
slices : tuple of slice objects, optional
The slices defining the crop. The default values are for
stitched images from the Marcelle screen.
Returns
-------
imc : array
The cropped image.
Examples
--------
>>> im = np.zeros((5, 5), int)
>>> im[1:4, 1:4] = 1
>>> crop(im, slices=(slice(1, 4), slice(1, 4)))
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
"""
return im[slices]
def group_by_channel(fns, re_string='(.*)_(w[1-3]).*',
re_channel_group=1):
"""Group filenames by channel to prepare for illumination estimation.
Intended to be run *after* quadrant stitching.
Parameters
----------
fns : list of string
The filenames to be processed.
re_string : string, optional
The regular expression to match the filename.
re_quadrant_group : int, optional
The group from the re.match object that will contain channel info.
Returns
-------
grouped : dict mapping tuple of string to list of string
The filenames, grouped into lists containing all images of the same
channel. The keys are the channel regular expression group, useful for
composing a filename for the illumination image.
Examples
--------
>>> fn_numbering = it.product(range(2), range(1, 4))
>>> fns = ['image_%i_w%i.tif' % (i, j) for i, j in fn_numbering]
>>> fns
['image_0_w1.tif', 'image_0_w2.tif', 'image_0_w3.tif', 'image_1_w1.tif', 'image_1_w2.tif', 'image_1_w3.tif']
>>> sorted(group_by_channel(fns).items())
[('w1', ['image_0_w1.tif', 'image_1_w1.tif']), ('w2', ['image_0_w2.tif', 'image_1_w2.tif']), ('w3', ['image_0_w3.tif', 'image_1_w3.tif'])]
"""
re_match = fun.partial(re.match, re_string)
match_objs = map(re_match, fns)
fns = [fn for fn, match in zip(fns, match_objs) if match is not None]
match_objs = filter(lambda x: x is not None, match_objs)
matches = map(lambda x: x.groups(), match_objs)
keys = [m[re_channel_group] for m in matches]
grouped = {}
for k, fn in zip(keys, fns):
grouped.setdefault(k, []).append(fn)
return grouped
def group_by_quadrant(fns, re_string='(.*)_(s[1-4])_(w[1-3]).*',
re_quadrant_group=1):
"""Group filenames by quadrant to prepare for stitching.
Parameters
----------
fns : list of string
The filenames to be processed.
re_string : string, optional
The regular expression to match the filename.
re_quadrant_group : int, optional
The group from the re.match object that will contain quadrant info.
Returns
-------
grouped : dict mapping tuple of string to tuple of string
The filenames, grouped into tuples containing four quadrants of the
same image. The keys are all the regular expression match groups
*other* than the quadrant group, useful for composing a filename for
the stitched images.
Examples
--------
>>> fn_numbering = it.product(range(2), range(1, 5))
>>> fns = ['image_%i_s%i_w1.TIF' % (i, j) for i, j in fn_numbering]
>>> fns
['image_0_s1_w1.TIF', 'image_0_s2_w1.TIF', 'image_0_s3_w1.TIF', 'image_0_s4_w1.TIF', 'image_1_s1_w1.TIF', 'image_1_s2_w1.TIF', 'image_1_s3_w1.TIF', 'image_1_s4_w1.TIF']
>>> sorted(group_by_quadrant(fns).items())
[(('image_0', 'w1'), ['image_0_s1_w1.TIF', 'image_0_s2_w1.TIF', 'image_0_s3_w1.TIF', 'image_0_s4_w1.TIF']), (('image_1', 'w1'), ['image_1_s1_w1.TIF', 'image_1_s2_w1.TIF', 'image_1_s3_w1.TIF', 'image_1_s4_w1.TIF'])]
"""
re_match = fun.partial(re.match, re_string)
match_objs = map(re_match, fns)
fns = [fn for fn, match in zip(fns, match_objs) if match is not None]
match_objs = filter(lambda x: x is not None, match_objs)
matches = map(lambda x: x.groups(), match_objs)
keys = map(tuple, [[m[i] for i in range(len(m)) if i != re_quadrant_group]
for m in matches])
grouped = {}
for k, fn in zip(keys, fns):
grouped.setdefault(k, []).append(fn)
return grouped
def quadrant_stitch(nw, ne, sw, se):
"""Stitch four seamless quadrant images into a single big image.
Parameters
----------
nw, ne, sw, se : np.ndarray, shape (Mi, Ni)
The four quadrant images, corresponding to the cardinal directions of
north-west, north-east, south-west, south-east.
Returns
-------
stitched : np.ndarray, shape (M0+M2, N0+N1)
The image resulting from stitching the four input images
Examples
--------
>>> imbase = np.ones((2, 3), int)
>>> nw, ne, sw, se = [i * imbase for i in range(4)]
>>> quadrant_stitch(nw, ne, sw, se)
array([[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[2, 2, 2, 3, 3, 3]])
"""
x1 = nw.shape[0]
x2 = sw.shape[0]
y1 = nw.shape[1]
y2 = ne.shape[1]
stitched = np.zeros((x1 + x2, y1 + y2), nw.dtype)
stitched[:x1, :y1] = nw
stitched[:x1, y1:] = ne
stitched[x1:, :y1] = sw
stitched[x1:, y1:] = se
return stitched
def rescale_to_11bits(im_float):
"""Rescale a float image in [0, 1] to integers in [0, 2047].
This operation makes rank filtering much faster.
Parameters
----------
im_float : array of float in [0, 1]
The float image. The range and type are *not* checked prior to
conversion!
Returns
-------
im11 : array of uint16 in [0, 2047]
The converted image.
Examples
--------
>>> im = np.array([0., 0.5, 1.])
>>> rescale_to_11bits(im)
array([ 0, 1024, 2047], dtype=uint16)
"""
im11 = np.round(im_float * 2047.).astype(np.uint16)
return im11
def rescale_from_11bits(im11):
"""Rescale a uint16 image with range in [0, 2047] to float in [0., 1.]
Parameters
----------
im11 : array of uint16, range in [0, 2047]
The input image, encoded in uint16 but having 11-bit range.
Returns
-------
imfloat : array of float, same shape as `im11`
The output image.
Examples
--------
>>> im = np.array([0, 1024, 2047], dtype=np.uint16)
>>> rescale_from_11bits(im)
array([ 0. , 0.5002, 1. ])
Notes
-----
Designed to be a no-op with the above `rescale_to_11bits` function,
although this is subject to approximation errors.
"""
return np.round(im11 / 2047., decimals=4)
def unpad(im, pad_width):
"""Remove padding from a padded image.
Parameters
----------
im : array
The input array.
pad_width : int or sequence of int
The width of padding: a number for the same width along each
dimension, or a sequence for different widths.
Returns
-------
imc : array
The unpadded image.
Examples
--------
>>> im = np.zeros((5, 5), int)
>>> im[1:4, 1:4] = 1
>>> unpad(im, 1)
array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
"""
if not isinstance(pad_width, coll.Iterable):
pad_width = [pad_width] * im.ndim
slices = tuple([slice(p, -p) for p in pad_width])
return im[slices]
def _reduce_with_count(pairwise, iterator, accumulator=None):
"""Return both the result of the reduction and the number of elements.
Parameters
----------
pairwise : function (a -> b -> a)
The function with which to reduce the `iterator` sequence.
iterator : iterable
The sequence being reduced.
accumulator : type "a", optional
An initial value with which to perform the reduction.
Returns
-------
result : type "a"
The result of the reduce operation.
count : int
The number of elements that were accumulated.
Examples
--------
>>> x = [5, 6, 7]
>>> _reduce_with_count(np.add, x)
(18, 3)
"""
def new_pairwise(a, b):
(elem1, c1), (elem2, c2) = a, b
return pairwise(elem1, elem2), c2
new_iter = it.izip(iterator, it.count(1))
new_acc = (0, accumulator)
return tlz.reduce(new_pairwise, new_iter, new_acc)
def find_background_illumination(fns, radius=51, quantile=0.05,
stretch_quantile=0., method='mean'):
"""Use a set of related images to find uneven background illumination.
Parameters
----------
fns : list of string
A list of image file names
radius : int, optional
The radius of the structuring element used to find background.
default: 51
quantile : float in [0, 1], optional
The desired quantile to find background. default: 0.05
stretch_quantile : float in [0, 1], optional
Stretch image to full dtype limit, saturating above this quantile.
method : 'mean', 'average', 'median', or 'histogram', optional
How to use combine the smoothed intensities of the input images
to infer the illumination field:
- 'mean' or 'average': Use the mean value of the smoothed
images at each pixel as the illumination field.
- 'median': use the median value. Since all images need to be
in-memory to compute this, use only for small sets of images.
- 'histogram': use the median value approximated by a
histogram. This can be computed on-line for large sets of
images.
Returns
-------
illum : np.ndarray, float, shape (M, N)
The estimated illumination over the image field.
See Also
--------
``correct_image_illumination``.
"""
# This function follows the "PyToolz" streaming data model to
# obtain the illumination estimate. First, define each processing
# step:
read = io.imread
normalize = (tlz.partial(stretchlim, bottom=stretch_quantile)
if stretch_quantile > 0
else skimage.img_as_float)
rescale = rescale_to_11bits
pad = fun.partial(skimage.util.pad, pad_width=radius, mode='reflect')
rank_filter = fun.partial(rank.percentile, selem=skmorph.disk(radius),
p0=quantile)
_unpad = fun.partial(unpad, pad_width=radius)
unscale = rescale_from_11bits
# Next, compose all the steps, apply to all images (streaming)
bg = (tlz.pipe(fn, read, normalize, rescale, pad, rank_filter, _unpad,
unscale)
for fn in fns)
# Finally, reduce all the images and compute the estimate
if method == 'mean' or method == 'average':
illum, count = _reduce_with_count(np.add, bg)
illum = skimage.img_as_float(illum) / count
elif method == 'median':
illum = np.median(list(bg), axis=0)
elif method == 'histogram':
raise NotImplementedError('histogram background illumination method '
'not yet implemented.')
else:
raise ValueError('Method "%s" of background illumination finding '
'not recognised.' % method)
return illum
def correct_image_illumination(im, illum, stretch_quantile=0, mask=None):
"""Divide input image pointwise by the illumination field.
Parameters
----------
im : np.ndarray of float
The input image.
illum : np.ndarray of float, same shape as `im`
The illumination field.
stretch_quantile : float, optional
Stretch the image intensity to saturate the top and bottom
quantiles given.
mask : array of bool, same shape as im, optional
Only stretch the image intensity where `mask` is ``True``.
Returns
-------
imc : np.ndarray of float, same shape as `im`
The corrected image.
"""
if im.dtype != np.float:
imc = skimage.img_as_float(im)
else:
imc = im.copy()
imc /= illum
lim = stretch_quantile
imc = stretchlim(imc, lim, 1-lim, mask)
return imc
|
{"hexsha": "afa96043fa776acd0fcf81791d70530109ae10ad", "size": 18721, "ext": "py", "lang": "Python", "max_stars_repo_path": "husc/preprocess.py", "max_stars_repo_name": "gitter-badger/husc", "max_stars_repo_head_hexsha": "6e7ae2879caef304de7bfb77f19f99d5308ca256", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "husc/preprocess.py", "max_issues_repo_name": "gitter-badger/husc", "max_issues_repo_head_hexsha": "6e7ae2879caef304de7bfb77f19f99d5308ca256", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "husc/preprocess.py", "max_forks_repo_name": "gitter-badger/husc", "max_forks_repo_head_hexsha": "6e7ae2879caef304de7bfb77f19f99d5308ca256", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3584589615, "max_line_length": 218, "alphanum_fraction": 0.6056834571, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5007}
|
import numpy as np
#import pandas as pd
import matplotlib
# import seaborn as sns
import matplotlib.pyplot as plt
names = "LR SVM CNN LSTM BERT".split(" ")
colors = ['tab:gray', 'tab:green','tab:orange', 'tab:red', 'tab:blue', 'orange' ]
patterns = ('--', '\\', '////', '\\\\', '\\\\', '\\\\', '.', '*')
fills = [False, True, False, False, True, True, False]
#colors = ['xkcd:light red', 'xkcd:dark red','xkcd:bright blue', 'xkcd:blue', 'xkcd:dodger blue', 'orange']
#colors = ['xkcd:red', 'xkcd:red','xkcd:blue', 'xkcd:blue', 'xkcd:blue', 'orange']
#patterns = ('--', '\\', '////', '\\\\', '\\\\', '\\\\', '.', '*')
#fills = [True, True, True, True, True, True, True]
#fills = [False, False, False, False, False, False, False]
#colors = ['tab:blue', 'tab:blue','tab:blue', 'tab:blue', 'tab:blue', 'blue' ]
#patterns = ('--', '++', 'x', '*', '.')
#fills = [False, False, False, False, False, False, False]
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 16}
params = {'legend.fontsize': 16,
'legend.handlelength': 2}
group1 = [["HOTEL", "SENT", "PARA", "REQ", "REF", "QUOTE", "SUPPORT", "AGAINST"], ["FUNNY-", "BOOK-"]]
group2 = [["SUGG", "HOMO", "HETER", "TV", "EVAL", "FACT", "ARGUE"], ["AMAZON-", "YELP", "FUNNY*", "BOOK*"]]
def plot(ds, datasets):
plt.rcParams.update(params)
plt.figure(figsize=(6,4))
matplotlib.rc('font', **font)
bar_width = 0.7
for i in range(len(names)):
plt.bar(i, datasets[ds][i], bar_width, color=colors[i],
edgecolor=colors[i], hatch=patterns[i], fill = fills[i],
label=names[i])
#bars = plt.bar(names, datasets[ds])
#plt.ylim([0.0, 1.0])
#for bar, pattern, color in zip(bars, patterns, colors):
# bar.set_color(color)
# bar.set_hatch(pattern)
plt.title(ds)
plt.xticks([i for i in range(len(names))], names)
plt.ylabel('AUC')
plt.tight_layout()
plt.savefig('%s.pdf' % ds, format='pdf')
def plot_all(datasets):
plt.rcParams.update(params)
plt.figure(figsize=(24,10))
matplotlib.rc('font', **font)
bar_width = 0.14
bar_break = 0.02
xaxis = [i for i in range(len(datasets.keys()))]
xticks = list(datasets.keys())
for i in range(len(xticks)):
for j in range(len(names)):
plt.bar(i+(j-2.5)*(bar_width+bar_break)+bar_width/2,
datasets[xticks[i]][j], bar_width, color=colors[j],
edgecolor=colors[j], hatch=patterns[j], fill = fills[j],
label=names[j])
plt.xticks(xaxis, xticks, rotation=20)
plt.ylabel('AUC')
plt.tight_layout()
plt.savefig('all.pdf', format='pdf')
def plot_per_group(groups, datasets, name):
plt.rcParams.update(params)
plt.figure(figsize=(20,6))
matplotlib.rc('font', **font)
bar_width = 0.14
bar_break = 0.02
size = sum([len(g) for g in groups])+len(groups)-1
xaxis = [i for i in range(13)]
xticks = []
for g in groups:
xticks += g + [""]
while len(xticks) < len(xaxis):
xticks.append("")
print(len(xticks))
for i in range(len(xticks)):
if len(xticks[i]) < 1:
continue
for j in range(len(names)):
if i == 0:
plt.bar(i+(j-2.5)*(bar_width+bar_break)+bar_width/2,
datasets[xticks[i]][j], bar_width, color=colors[j],
edgecolor=colors[j], hatch=patterns[j], fill = fills[j],
label=names[j])
else:
plt.bar(i+(j-2.5)*(bar_width+bar_break)+bar_width/2,
datasets[xticks[i]][j], bar_width, color=colors[j],
edgecolor=colors[j], hatch=patterns[j], fill = fills[j])
# TODO: finish the annotation.
annotations = ["Small", "Large"]
for i in range(len(groups)):
start = len(groups[i-1])+0.5 if i > 0 else -0.5
end = start + len(groups[i])
plt.annotate("", xy=(start, -0.14), xytext=(end, -0.14),
arrowprops=dict(arrowstyle="<->", connectionstyle="arc3"),
annotation_clip=False)
plt.annotate(annotations[i], xy = ((end+start-(len(annotations[i])/3))/2, -0.15),
xytext=((end+start-(len(annotations[i])/10))/2, -0.15), bbox=dict(boxstyle="round", edgecolor="w", fc="w"),
annotation_clip=False)
plt.ylim([0.0, 1.0])
plt.xticks(xaxis, xticks, rotation=20)
plt.ylabel('AUC')
plt.legend(ncol=len(names), loc='upper center',
fancybox="False", mode="expand", framealpha=0.0, bbox_to_anchor=(0, 1.01, 1, 0.1))
plt.tight_layout()
plt.savefig(name, format='pdf')
# F1 score
#data = """Dataset LR SVM CNN LSTM BERT
#SUGG 0.79 0.77 0.77 0.68 0.86
#HOTEL 0.53 0.55 0.46 0.59 0.67
#SENT 0.5 0.51 0.43 0.45 0.57
#PARA 0.56 0.59 0.5 0.48 0.65
#FUNNY 0.29 0.38 0.08 0.12 0.32
#HOMO 0.87 0.89 0.9 0.9 0.95
#HETER 0.87 0.87 0.87 0.86 0.93
#TV 0.7 0.68 0.54 0.63 0.81
#BOOK 0.17 0.15 0.06 0.11 0.15
#EVAL 0.72 0.73 0.75 0.73 0.81
#REQ 0.69 0.69 0.67 0.7 0.84
#FACT 0.69 0.69 0.74 0.73 0.82
#REF 0.8 0.79 0.78 0.83 0.93
#QUOTE 0.1 0.1 0.23 0.22 0.66
#ARGUE 0.72 0.72 0.7 0.72 0.78
#SUPPORT 0.46 0.45 0.41 0.41 0.54
#AGAINST 0.53 0.51 0.41 0.43 0.62
#AMAZON 0.91 0.92 0.89 0.86 0.96
#YELP 0.94 0.96 0.94 0.93 0.96
#FUNNY* 0.81 0.81 0.68 0.73 0.82
#BOOK* 0.72 0.7 0.7 0.67 0.74"""
# Accuracy
#data = """Dataset LR SVM CNN LSTM BERT
#SUGG,0.777027027,0.77027027,0.744932432,0.755067568,0.861486486
#HOTEL,0.932979429,0.95487724,0.946914399,0.942269409,0.96549436
#SENT,0.869068541,0.922671353,0.894551845,0.898945518,0.90202109
#PARA,0.839421613,0.869101979,0.853120244,0.853120244,0.869101979
#FUNNY-,0.90558868,0.975737803,0.930510695,0.910500895,0.97461281
#HOMO,0.855555556,0.86,0.888888889,0.875555556,0.92
#HETER,0.81741573,0.823033708,0.828651685,0.803370787,0.896067416
#TV,0.667569397,0.658090724,0.665538253,0.648612051,0.742721733
#BOOK-,0.914113574,0.968200397,0.942775715,0.91,0.9592625
#EVAL,0.785085882,0.795978215,0.780896523,0.784666946,0.852953498
#REQ,0.88646837,0.894009217,0.858818601,0.873062421,0.932970256
#FACT,0.781315459,0.818181818,0.810640972,0.806032677,0.871386678
#REF,0.989526602,0.990364474,0.99287809,0.994553833,0.995391705
#QUOTE,0.963133641,0.987431923,0.986594051,0.980310013,0.99078341
#ARGUE,0.703073008,0.727931102,0.722450577,0.731454296,0.788804071
#SUPPORT,0.670972793,0.808377373,0.752201996,0.762967313,0.815032296
#AGAINST,0.680955177,0.769426502,0.736347622,0.766294774,0.800156586
#AMAZON-,0.894551318,0.911326108,0.877901526,0.872251597,0.952900589
#YELP,0.942157895,0.955184211,0.939368421,0.931315789,0.961447368
#FUNNY*,0.791903612,0.80294972,0.748271489,0.726956,0.815079982
#BOOK*,0.713554785,0.701421739,0.698933696,0.69,0.744381061"""
# AUC
data = """Dataset LR SVM CNN LSTM BERT
SUGG,0.843989682,0.846512053,0.832667549,0.835235573,0.929750274
HOTEL,0.933139064,0.937242998,0.899991905,0.755091468,0.96838271
SENT,0.859050857,0.860315396,0.8034626,0.850297558,0.915473934
PARA,0.828795752,0.834996156,0.824876391,0.811097715,0.881528593
FUNNY-,0.876396507,0.863728718,0.79,0.64207622,0.866466344
HOMO,0.921278334,0.924137227,0.945757607,0.938712477,0.967658771
HETER,0.868184405,0.889015367,0.89660406,0.8592676,0.956137355
TV,0.727967457,0.730249127,0.730840228,0.703787461,0.823063063
BOOK-,0.742188288,0.699554912,0.67,0.65,0.733362075
EVAL,0.858437675,0.869008869,0.861581387,0.853642442,0.928040946
REQ,0.941446811,0.941994922,0.889681851,0.872395961,0.970358167
FACT,0.871573184,0.885937257,0.880619514,0.88302968,0.941695462
REF,0.99586718,0.997033989,0.991084153,0.994540046,0.999109306
QUOTE,0.943260858,0.921819377,0.881332494,0.921381237,0.972205488
ARGUE,0.812698093,0.811934308,0.803741955,0.814103078,0.878756466
SUPPORT,0.752139796,0.7498748,0.723139355,0.730673894,0.840962338
AGAINST,0.765041736,0.757056379,0.726550871,0.742615807,0.839675757
AMAZON-,0.959394175,0.969422931,0.942824764,0.938451354,0.987827378
YELP,0.986247747,0.991189875,0.794138377,0.977483331,0.993182922
FUNNY*,0.878091689,0.882675787,0.814906659,0.774294325,0.895337897
BOOK*,0.789396637,0.77512214,0.61,0.748828551,0.824213805"""
datasets = {}
for line in data.split("\n")[1:]:
#items = line.split(" ")
items = line.split(",")
datasets[items[0]] = [float(v) for v in items[1:]]
#for ds in datasets:
# plot(ds, datasets)
#plot_all(datasets)
plot_per_group(group1, datasets, "low.pdf")
plot_per_group(group2, datasets, "high.pdf")
|
{"hexsha": "cbd7a2b9373878ddd24d74d27416d1d31cd8b675", "size": 8564, "ext": "py", "lang": "Python", "max_stars_repo_path": "appendix/auc_accuracy/xl_plot.py", "max_stars_repo_name": "rit-git/tagging", "max_stars_repo_head_hexsha": "b075ce1553492be7088026b67f525a529bf03770", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-11-21T03:45:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T00:40:20.000Z", "max_issues_repo_path": "appendix/auc_accuracy/xl_plot.py", "max_issues_repo_name": "rit-git/tagging", "max_issues_repo_head_hexsha": "b075ce1553492be7088026b67f525a529bf03770", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "appendix/auc_accuracy/xl_plot.py", "max_forks_repo_name": "rit-git/tagging", "max_forks_repo_head_hexsha": "b075ce1553492be7088026b67f525a529bf03770", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-09-21T15:07:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-02T20:25:36.000Z", "avg_line_length": 41.1730769231, "max_line_length": 112, "alphanum_fraction": 0.6410555815, "include": true, "reason": "import numpy", "num_tokens": 3545}
|
import torch
import random
import numpy as np
from PIL import Image, ImageOps, ImageFilter
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation.
Args:
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""
def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):
self.mean = mean
self.std = std
def __call__(self, sample):
img = sample['image']
#mask = sample['label']
img = np.array(img).astype(np.float32)
#mask = np.array(mask).astype(np.float32)
img /= 255.0
img -= self.mean
img /= self.std
return {'image': img}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
img = sample['image']
#mask = sample['label']
img = np.array(img).astype(np.float32).transpose((2, 0, 1))
#mask = np.array(mask).astype(np.float32)
img = torch.from_numpy(img).float()
#mask = torch.from_numpy(mask).float()
return {'image': img}
class FixedResize(object):
def __init__(self, fix_w, fix_h):
self.size = (fix_w, fix_h) # size: (h, w)
#self.output_size = (output_w, output_h)
def __call__(self, sample):
img = sample['image']
#mask = sample['label']
#assert img.size == mask.size
img = img.resize(self.size, Image.BILINEAR)
#mask = mask.resize(self.output_size, Image.NEAREST)
return {'image': img}
#return {'image': img,
# 'label': mask}
|
{"hexsha": "e05c33be05d74c291614f0a51d80fa37aa90439a", "size": 1728, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataloaders/custom_transforms_test.py", "max_stars_repo_name": "SteveSZF/Traffic-Lane-Detection", "max_stars_repo_head_hexsha": "8217808178cdf2d655d02632eb71c543d39f5258", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-10-08T08:52:43.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-08T08:55:37.000Z", "max_issues_repo_path": "dataloaders/custom_transforms_test.py", "max_issues_repo_name": "SteveSZF/Traffic-Lane-Detection", "max_issues_repo_head_hexsha": "8217808178cdf2d655d02632eb71c543d39f5258", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dataloaders/custom_transforms_test.py", "max_forks_repo_name": "SteveSZF/Traffic-Lane-Detection", "max_forks_repo_head_hexsha": "8217808178cdf2d655d02632eb71c543d39f5258", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4285714286, "max_line_length": 67, "alphanum_fraction": 0.572337963, "include": true, "reason": "import numpy", "num_tokens": 447}
|
"""This is how I made the digits_of_pi.txt file because I wanted to go
a little further than the book did."""
# Install using pipinstall mpmath
from mpmath import mp
import os
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(THIS_FOLDER, "pi_digits.txt")
# Set the number of digits of pi to get, get pi as a string with no decimal.
mp.dps = 1000
pi_to_str = str(mp.pi)
pi_to_str = pi_to_str.replace(".", "")
# Break up pi into 10 digit chunks
x = 0
pi_list = []
while x < len(pi_to_str):
pi_list.append(pi_to_str[x : x + 10])
x += 10
# Clear out the file
with open(filename, 'w') as f:
for chunk in pi_list:
if chunk == pi_to_str[0:10]:
chunk_list = list(chunk)
chunk_list.insert(1, ".")
chunk = ''.join(chunk_list)
f.write(f"{chunk}\n")
|
{"hexsha": "1077c3d2fff72a0b2604d5719751971d9b2f46ec", "size": 839, "ext": "py", "lang": "Python", "max_stars_repo_path": "Jupyter/PythonCrashCourse2ndEdition/ch10_files/generate_pi.py", "max_stars_repo_name": "awakun/LearningPython", "max_stars_repo_head_hexsha": "578f9290c8065df37ade49abe4b0ab4e6b35a1bd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Jupyter/PythonCrashCourse2ndEdition/ch10_files/generate_pi.py", "max_issues_repo_name": "awakun/LearningPython", "max_issues_repo_head_hexsha": "578f9290c8065df37ade49abe4b0ab4e6b35a1bd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Jupyter/PythonCrashCourse2ndEdition/ch10_files/generate_pi.py", "max_forks_repo_name": "awakun/LearningPython", "max_forks_repo_head_hexsha": "578f9290c8065df37ade49abe4b0ab4e6b35a1bd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.064516129, "max_line_length": 76, "alphanum_fraction": 0.6543504172, "include": true, "reason": "from mpmath", "num_tokens": 232}
|
import code
import sys
import argparse
import os
import time
import json
import shutil
import cv2
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.optim.lr_scheduler as lrsched
from Network import Net5
from Progress import ProgressBar
# This class is concerned with controlling the training process for
# the top level network (the one that classifies by group). It handles
# training, recording error, learning rate schedulers, optimization
# algorithms, etc.
class Controller:
def __init__(self, training_set, network, seed=None, **kwargs):
if seed is not None:
torch.manual_seed(seed)
np.random.seed(seed)
self.processKwargs(kwargs)
self.validateArgs()
self.network = network
self.opt = self.initializeOptimizer()
self.criterion = nn.CrossEntropyLoss()
self.training_inputs = training_set[0]
self.training_labels = training_set[1]
self.validation_inputs = training_set[2]
self.validation_labels = training_set[3]
if self.sched:
self.scheduler = lrsched.ReduceLROnPlateau(
self.opt, mode='max',
threshold=0.0001,
patience=10,
verbose=self.verbose,
factor=0.75
)
def getClassificationAccuracy(self, actual, correct):
n_correct = 0
for row in range(actual.shape[0]):
act = actual[row, :]
cor = correct[row]
if torch.argmax(act) == cor:
n_correct += 1
return n_correct / actual.shape[0]
def train(self):
training_acc = []
validation_acc = []
cross_entropy_loss = []
timings = []
for idx in range(self.max_iter):
try:
self.opt.zero_grad()
start = time.time_ns()
output = self.network(self.training_inputs)
stop = time.time_ns()
timings.append(stop - start)
loss = self.criterion(output, self.training_labels)
loss.backward()
self.opt.step()
except:
print("")
print("Terminating early . . . ")
break
cross_entropy_loss.append(loss.item())
if idx % self.val_freq == 0:
with torch.no_grad():
output = self.network(self.training_inputs, train=False)
t_acc = self.getClassificationAccuracy(
output,
self.training_labels
)
output = self.network(self.validation_inputs, train=False)
v_acc = self.getClassificationAccuracy(
output,
self.validation_labels
)
training_acc.append(t_acc)
validation_acc.append(v_acc)
if self.verbose:
print("iteration: %06d / %06d, "%(idx, self.max_iter), end='')
print("cel: %1.4f, t_acc: %1.4f, v_acc: %1.4f"%(
loss.item(), t_acc, v_acc
))
if self.sched:
self.scheduler.step(v_acc)
return (training_acc, validation_acc, cross_entropy_loss, timings)
def initializeOptimizer(self):
init = {
'sgd' : optim.SGD,
'adadelta' : optim.Adadelta,
'adagrad' : optim.Adagrad,
'adam' : optim.Adam,
'adamw' : optim.AdamW,
'adamax' : optim.Adamax,
'asgd' : optim.ASGD
}
if self.optimizer == 'sgd':
opt = init[self.optimizer](
self.network.parameters(),
lr=self.lr,
weight_decay=self.l2rs,
momentum=self.momentum
)
else:
opt = init[self.optimizer](
self.network.parameters(),
lr=self.lr,
weight_decay=self.l2rs
)
return opt
def validateArgs(self):
valid_optimizers = [
'sgd', 'adadelta', 'adagrad', 'adam',
'adamw', 'adamax', 'asgd'
]
self.optimizer = self.optimizer.lower()
if self.optimizer not in valid_optimizers:
raise Exception("Unrecognized optimizer \'%s\'"%self.optimizer)
if self.lr <= 0.0:
raise Exception("lr must be >= 0.0")
if self.momentum != 0.0 and self.optimizer != 'sgd':
raise Exception("Momentum is only valid for \'sgd\' optimizer.")
if self.l2rs < 0.0:
raise Exception("l2rs must be >= 0.0")
if self.max_iter < 1:
raise Exception("max_iter must be >= 1")
if self.val_freq < 1:
raise Exception("val_freq must be >= 1")
def processKwargs(self, kwargs):
valid_keys = [
'optimizer', 'lr', 'momentum',
'l2rs', 'sched', 'verbose',
'max_iter', 'stop_fn', 'val_freq'
]
defaults = [
'adamw', 0.01, 0.0,
0.01, True, True,
10000, None, 25
]
for k in kwargs:
if k not in valid_keys:
raise Exception("Invalid argument: %s"%k)
if 'optimizer' not in kwargs:
self.optimizer = defaults[valid_keys.index('optimizer')]
else:
self.optimizer = kwargs['optimizer']
if 'lr' not in kwargs:
self.lr = defaults[valid_keys.index('lr')]
else:
self.lr = kwargs['lr']
if 'momentum' not in kwargs:
self.momentum = defaults[valid_keys.index('momentum')]
else:
self.momentum = kwargs['momentum']
if 'l2rs' not in kwargs:
self.l2rs = defaults[valid_keys.index('l2rs')]
else:
self.l2rs = kwargs['l2rs']
if 'verbose' not in kwargs:
self.verbose = defaults[valid_keys.index('verbose')]
else:
self.verbose = kwargs['verbose']
if 'sched' not in kwargs:
self.sched = defaults[valid_keys.index('sched')]
else:
self.sched = kwargs['sched']
if 'max_iter' not in kwargs:
self.max_iter = defaults[valid_keys.index('max_iter')]
else:
self.max_iter = kwargs['max_iter']
if 'stop_fn' not in kwargs:
self.stop_fn = defaults[valid_keys.index('stop_fn')]
else:
self.stop_fn = kwargs['stop_fn']
if 'val_freq' not in kwargs:
self.val_freq = defaults[valid_keys.index('val_freq')]
else:
self.val_freq = kwargs['val_freq']
|
{"hexsha": "c7900cabe8d091ef40eb948f083291759e014ebc", "size": 5560, "ext": "py", "lang": "Python", "max_stars_repo_path": "L2/TrainingController.py", "max_stars_repo_name": "derangedhk417/ML-Lessons", "max_stars_repo_head_hexsha": "3433e3fa6324791b74771fcfd8a6c5361ba69c53", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "L2/TrainingController.py", "max_issues_repo_name": "derangedhk417/ML-Lessons", "max_issues_repo_head_hexsha": "3433e3fa6324791b74771fcfd8a6c5361ba69c53", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "L2/TrainingController.py", "max_forks_repo_name": "derangedhk417/ML-Lessons", "max_forks_repo_head_hexsha": "3433e3fa6324791b74771fcfd8a6c5361ba69c53", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8626609442, "max_line_length": 70, "alphanum_fraction": 0.6559352518, "include": true, "reason": "import numpy", "num_tokens": 1560}
|
# %%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append("../shared")
from analytic_tools import fractal_latent_heat_alex
from wednesdaySPEED import simulation
# %%
tau = 9
pi_2_vals = [0.0, 0.1, 0.2, 0.3, 0.5]
plt.figure(figsize=(10,5))
for i, val in enumerate(pi_2_vals):
G,P,_,S,X,D,T,U,C, initial_account_balance = simulation(trigger = False, bound = False, pd = 0.05, pe = 0.01,
ph = 0.0485, pa = 0.7, N0=1000, N1 = 100, A = 4, a=1, h=1,
pi1 = 0.5, pi2 = val, pi3 = 0.5 - val)
q_vals, C_q, S_q, X_q = fractal_latent_heat_alex(np.array(S), tau, 100)
plt.plot(q_vals[2:], -S_q[:-1], label=f"Pi2 = {val}")
plt.ylabel("S - Entropy")
plt.xlabel("Temperature")
plt.xlim(-20, 20)
plt.grid()
plt.legend()
plt.show()
# %%
tau = 9
pi_2_vals = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
plt.figure(figsize=(10,5))
for i, val in enumerate(pi_2_vals):
G,P,_,S,X,D,T,U,C, initial_account_balance = simulation(trigger = False, bound = False, pd = 0.05, pe = 0.01,
ph = 0.0485, pa = 0.7, N0=1000, N1 = 100, A = 4, a=1, h=1,
pi1 = 0.5, pi2 = val, pi3 = 0.5 - val)
q_vals, C_q, S_q, X_q = fractal_latent_heat_alex(np.array(S), tau, 100)
plt.plot(q_vals[2:],C_q, label=f"Pi2 = {val}")
plt.ylabel("C_p - Specific heat")
plt.xlabel("Temperature")
plt.xlim(-5, 5)
plt.grid()
plt.legend()
plt.show()
# %%
tau = 9
pi_2_vals = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
plt.figure(figsize=(10,5))
for i, val in enumerate(pi_2_vals):
G,P,_,S,X,D,T,U,C, initial_account_balance = simulation(trigger = False, bound = False, pd = 0.05, pe = 0.01,
ph = 0.0485, pa = 0.7, N0=1000, N1 = 100, A = 4, a=1, h=1,
pi1 = 1 - val, pi2 = val, pi3 = 0)
q_vals, C_q, S_q, X_q = fractal_latent_heat_alex(np.array(S), tau, 100)
plt.plot(q_vals[2:], -S_q[:-1], label=f"Pi2 = {val}")
plt.ylabel("S - Entropy")
plt.xlabel("Temperature")
plt.xlim(-20, 20)
plt.grid()
plt.legend()
plt.show()
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# plt.figure(figsize=(10,5))
# plt.plot(q_vals/40, X_q/np.max(X_q), c="r", label="Free Energy - H")
# plt.plot(q_vals[2:]/40, -S_q[:-1]/np.max(-S_q), c="b", label="Entropy - dH/dT")
# plt.plot(q_vals[2:]/40,C_q/np.max(C_q), c="g", label="Specific heat- dH^2/dT^2")
# plt.ylabel("")
# plt.xlabel("Temperature")
# plt.legend()
# plt.show()
# plt.figure(figsize=(10,5))
# plt.plot(q_vals, X_q)
# plt.ylabel("H - Free Energy")
# plt.xlabel("Temperature")
# plt.show()
plt.figure(figsize=(10,5))
plt.plot(q_vals[2:], -S_q[:-1])
plt.ylabel("S - Entropy")
plt.xlabel("Temperature")
plt.show()
plt.figure(figsize=(10,5))
plt.plot(q_vals[2:],C_q)
plt.ylabel("C_p - Specific heat")
plt.xlabel("Temperature")
plt.show()
# %%
# %%
|
{"hexsha": "7324471ad5f7eb701c44cc42ce5e268c8586c20c", "size": 2844, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/james/OTHER/entropy_test.py", "max_stars_repo_name": "charelF/ComplexSystems", "max_stars_repo_head_hexsha": "3efc9b577ec777fcecbd5248bbbaf77b7d90fc65", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/james/OTHER/entropy_test.py", "max_issues_repo_name": "charelF/ComplexSystems", "max_issues_repo_head_hexsha": "3efc9b577ec777fcecbd5248bbbaf77b7d90fc65", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/james/OTHER/entropy_test.py", "max_forks_repo_name": "charelF/ComplexSystems", "max_forks_repo_head_hexsha": "3efc9b577ec777fcecbd5248bbbaf77b7d90fc65", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.8301886792, "max_line_length": 113, "alphanum_fraction": 0.5843881857, "include": true, "reason": "import numpy", "num_tokens": 1031}
|
# coding: utf-8
# In[1]:
import pandas as pd
import gensim
import os
import collections
import smart_open
import random
import numpy as np
# In[2]:
df = pd.read_csv(open('library.corr','rU'), encoding='utf8',header=None, engine='c',delimiter=',', error_bad_lines=False, low_memory=False, index_col=None)
# In[3]:
df = df[[0,1,2]].dropna()
# In[4]:
corpus = []
for index, row in df.iterrows():
tagged_d = gensim.models.doc2vec.TaggedDocument(gensim.utils.simple_preprocess(row[2]), [index])
corpus.append(tagged_d)
# In[5]:
train = corpus[:int(len(corpus)*0.8)]
test = corpus[int(len(corpus)*0.8):]
# In[6]:
model = gensim.models.doc2vec.Doc2Vec(size=75, min_count=2, iter=1000)
model.build_vocab(train)
# In[7]:
model.train(train, total_examples=model.corpus_count, epochs=model.iter)
model.save('modelDoc2Vec.model')
|
{"hexsha": "caf947a8cefa3cf1ba838be28b84034afbc96160", "size": 848, "ext": "py", "lang": "Python", "max_stars_repo_path": "doc2vec.py", "max_stars_repo_name": "Skeftical/Research-Paper-Recommender", "max_stars_repo_head_hexsha": "14f676f54627bf7f834f594787b17fc52d3530d4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc2vec.py", "max_issues_repo_name": "Skeftical/Research-Paper-Recommender", "max_issues_repo_head_hexsha": "14f676f54627bf7f834f594787b17fc52d3530d4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-01T22:28:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-01T22:28:26.000Z", "max_forks_repo_path": "doc2vec.py", "max_forks_repo_name": "Skeftical/Research-Paper-Recommender", "max_forks_repo_head_hexsha": "14f676f54627bf7f834f594787b17fc52d3530d4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-11-07T03:03:11.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-07T03:03:11.000Z", "avg_line_length": 16.96, "max_line_length": 155, "alphanum_fraction": 0.7004716981, "include": true, "reason": "import numpy", "num_tokens": 252}
|
[STATEMENT]
lemma in_outs_rpv [iff]: "out \<in> outs'_rpv rpv \<longleftrightarrow> (\<exists>input. out \<in> outs'_gpv (rpv input))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (out \<in> outs'_rpv rpv) = (\<exists>input. out \<in> outs'_gpv (rpv input))
[PROOF STEP]
by(simp add: outs'_rpv_def)
|
{"llama_tokens": 135, "file": "CryptHOL_Generative_Probabilistic_Value", "length": 1}
|
import pickle
import numpy as np
import cf_recommender as cf
import similarity_functions as sf
import movie_reviews_compiler as mrc
path = '../data/'
def run_test_top_k(cosine=True,k=10):
'''compute the predictions for masked values in the testing set (user review vectors) using the training set (critic review matrix)
model for predictions: average of top k critics using cosine similiarity'''
#get testing data
audience_names = pickle.load(open(path + 'audience_names.pkl','rb'))
audience_review_test_set = pickle.load(open(path + 'audience_test_data.pkl','rb'))
#get training data
movie_critic, critic_movie, matrix, movie_keys, critic_keys = mrc.import_pickle()
#store results for pickle
top_k_results = {}
for aud_review_index in range(len(audience_review_test_set)):
name = audience_names[aud_review_index].split("'s")[0]
print('\nTest Vector: ' + name)
test_vector = audience_review_test_set[aud_review_index]
#find indicies of masks for testing
reviewed_indicies = [i for i in range(len(test_vector)) if test_vector[i] != 0]
#if there are more than 1 reviews for the user:
if(len(reviewed_indicies) > 1):
actual_vals = []
prediced_vals = []
av = []
pv = []
for mask in reviewed_indicies:
#mask selected index
vector = [i for i in test_vector]
vector[mask] = 0
#compute predicted value
if(cosine):
critics_sim = sf.run_cosine(vector,matrix,movie_critic,movie_keys,critic_keys)
else:
critics_sim = sf.run_pearson(vector,matrix,movie_critic,movie_keys,critic_keys)
result_vector = cf.user_based_top_k(vector,critics_sim,movie_keys,critic_keys,movie_critic,k)
print('\tPredicted for index ' + str(mask) + ': ' + str(result_vector[mask]))
print('\tActual for index ' + str(mask) + ': ' + str(test_vector[mask]))
prediced_vals.append(result_vector[mask])
actual_vals.append(test_vector[mask])
av.append((mask,test_vector[mask]))
pv.append((mask,result_vector[mask]))
#calculate accuracy using the root mean square error value
RMSE = float(((sum([(actual_vals[i]-prediced_vals[i])**2 for i in range(len(reviewed_indicies))]))/len(reviewed_indicies))**0.5)
print('\n\tRMSE for Test Vector: ' + str(RMSE))
top_k_results[name] = {'actual':av,'predicted':pv,'RMSE':RMSE}
else:
print('\n\tOnly 1 review not predictable')
top_k_results[name] = 'Error'
#export weighted sums results
if(cosine):
pickle.dump(top_k_results, open(path + "top_k_results_cosine.pkl", "wb" ) )
else:
pickle.dump(top_k_results, open(path + "top_k_results_pearson.pkl", "wb" ) )
return top_k_results
|
{"hexsha": "2be8cfe203783df00627c55aa54172eb2f784462", "size": 2656, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/test_data_top_k.py", "max_stars_repo_name": "alaw1290/CS591B1", "max_stars_repo_head_hexsha": "57e9a3425e84405f1bfff76cc14e14e4501acc1e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2016-11-02T18:10:42.000Z", "max_stars_repo_stars_event_max_datetime": "2016-11-03T19:03:47.000Z", "max_issues_repo_path": "analysis/test_data_top_k.py", "max_issues_repo_name": "alaw1290/CS591B1", "max_issues_repo_head_hexsha": "57e9a3425e84405f1bfff76cc14e14e4501acc1e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/test_data_top_k.py", "max_forks_repo_name": "alaw1290/CS591B1", "max_forks_repo_head_hexsha": "57e9a3425e84405f1bfff76cc14e14e4501acc1e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6202531646, "max_line_length": 132, "alphanum_fraction": 0.7134789157, "include": true, "reason": "import numpy", "num_tokens": 714}
|
//==================================================================================================
/*!
@file
@Copyright 2016 Numscale SAS
@copyright 2016 J.T.Lapreste
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
//==================================================================================================
#ifndef BOOST_SIMD_ARCH_X86_SSE4_1_SIMD_FUNCTION_MAX_HPP_INCLUDED
#define BOOST_SIMD_ARCH_X86_SSE4_1_SIMD_FUNCTION_MAX_HPP_INCLUDED
#include <boost/simd/detail/overload.hpp>
namespace boost { namespace simd { namespace ext
{
namespace bd = boost::dispatch;
namespace bs = boost::simd;
BOOST_DISPATCH_OVERLOAD ( max_
, (typename A0)
, bs::sse4_1_
, bs::pack_<bd::int8_<A0>, bs::sse_>
, bs::pack_<bd::int8_<A0>, bs::sse_>
)
{
BOOST_FORCEINLINE A0 operator() ( const A0 & a0
, const A0 & a1 ) const BOOST_NOEXCEPT
{
return _mm_max_epi8(a0,a1);
}
};
BOOST_DISPATCH_OVERLOAD ( max_
, (typename A0)
, bs::sse4_1_
, bs::pack_<bd::uint16_<A0>, bs::sse_>
, bs::pack_<bd::uint16_<A0>, bs::sse_>
)
{
BOOST_FORCEINLINE A0 operator() ( const A0 & a0
, const A0 & a1 ) const BOOST_NOEXCEPT
{
return _mm_max_epu16(a0,a1);
}
};
BOOST_DISPATCH_OVERLOAD ( max_
, (typename A0)
, bs::sse4_1_
, bs::pack_<bd::uint32_<A0>, bs::sse_>
, bs::pack_<bd::uint32_<A0>, bs::sse_>
)
{
BOOST_FORCEINLINE A0 operator() ( const A0 & a0
, const A0 & a1 ) const BOOST_NOEXCEPT
{
return _mm_max_epu32(a0,a1);
}
};
BOOST_DISPATCH_OVERLOAD ( max_
, (typename A0)
, bs::sse4_1_
, bs::pack_<bd::int32_<A0>, bs::sse_>
, bs::pack_<bd::int32_<A0>, bs::sse_>
)
{
BOOST_FORCEINLINE A0 operator() ( const A0 & a0
, const A0 & a1 ) const BOOST_NOEXCEPT
{
return _mm_max_epi32(a0,a1);
}
};
} } }
#endif
|
{"hexsha": "7c1f797fe477f756aaf4c666d7fa3ef8b385a2f2", "size": 2556, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/simd/arch/x86/sse4_1/simd/function/max.hpp", "max_stars_repo_name": "yaeldarmon/boost.simd", "max_stars_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/boost/simd/arch/x86/sse4_1/simd/function/max.hpp", "max_issues_repo_name": "yaeldarmon/boost.simd", "max_issues_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/simd/arch/x86/sse4_1/simd/function/max.hpp", "max_forks_repo_name": "yaeldarmon/boost.simd", "max_forks_repo_head_hexsha": "561316cc54bdc6353ca78f3b6d7e9120acd11144", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3544303797, "max_line_length": 100, "alphanum_fraction": 0.4401408451, "num_tokens": 614}
|
MODULE Euler_CharacteristicDecompositionModule_NonRelativistic_TABLE
USE KindModule, ONLY: &
DP, &
Zero, &
Half, &
One
USE GeometryFieldsModule, ONLY: &
nGF, &
iGF_Gm_dd_11, &
iGF_Gm_dd_22, &
iGF_Gm_dd_33
USE UnitsModule, ONLY: &
Gram, &
Centimeter
USE FluidFieldsModule, ONLY: &
nCF, &
iCF_D, &
iCF_S1, &
iCF_S2, &
iCF_S3, &
iCF_E, &
iCF_Ne
USE Euler_UtilitiesModule_NonRelativistic, ONLY: &
ComputePrimitive_Euler_NonRelativistic
USE EquationOfStateModule_TABLE, ONLY: &
ComputeSpecificInternalEnergy_TABLE, &
ComputeAuxiliary_Fluid_TABLE, &
ComputePressure_TABLE
USE TimersModule_Euler, ONLY: &
TimersStart_Euler, &
TimersStop_Euler, &
Timer_Euler_SL_CharDecomp
IMPLICIT NONE
PRIVATE
LOGICAL, PARAMETER :: Debug = .FALSE.
REAL(DP), PARAMETER :: dCs_Threshold = 0.1_DP
REAL(DP), PARAMETER :: D_Threshold = 1.0d16 * Gram / Centimeter**3
REAL(DP), PARAMETER, DIMENSION(6,6) :: &
I_6x6 = RESHAPE( (/1, 0, 0, 0, 0, 0, &
0, 1, 0, 0, 0, 0, &
0, 0, 1, 0, 0, 0, &
0, 0, 0, 1, 0, 0, &
0, 0, 0, 0, 1, 0, &
0, 0, 0, 0, 0, 1/), &
(/6,6/) )
PUBLIC :: ComputeCharacteristicDecomposition_Euler_NonRelativistic_TABLE
CONTAINS
SUBROUTINE ComputeCharacteristicDecomposition_Euler_NonRelativistic_TABLE &
( iDim, G, U, R, invR, FJ, cs_T, UseAnalytic_Option )
INTEGER, INTENT(in) :: iDim
REAL(DP), INTENT(in) :: G(nGF)
REAL(DP), INTENT(in) :: U(nCF)
REAL(DP), INTENT(out) :: R(nCF,nCF)
REAL(DP), INTENT(out) :: invR(nCF,nCF)
LOGICAL, INTENT(in) , OPTIONAL :: UseAnalytic_Option
REAL(DP), INTENT(out), OPTIONAL :: cs_T
REAL(DP), INTENT(out), OPTIONAL :: FJ(nCF,nCF)
LOGICAL :: UseAnalytic
REAL(DP) :: Gmdd11, Gmdd22, Gmdd33
REAL(DP) :: D, Vu1, Vu2, Vu3, Vd1, Vd2, Vd3
REAL(DP) :: E, Ne, P, Cs, Cs_table
REAL(DP) :: K, H, Tau, T, Y, Vsq, CsSq, W, Em, Gm, S
REAL(DP) :: dPdD, dPdT, dPdY
REAL(DP) :: dEdD, dEdT, dEdY
REAL(DP) :: dPdE, dPdDe, dPdTau
REAL(DP) :: dFdU(nCF,nCF)
IF ( PRESENT ( UseAnalytic_Option ) ) THEN
UseAnalytic = UseAnalytic_Option
ELSE
UseAnalytic = .TRUE.
END IF
CALL TimersStart_Euler( Timer_Euler_SL_CharDecomp )
CALL ComputePrimitive_Euler_NonRelativistic &
( U(iCF_D ), U(iCF_S1), U(iCF_S2), &
U(iCF_S3), U(iCF_E ), U(iCF_Ne), &
D, Vu1, Vu2, Vu3, E, Ne, &
G(iGF_Gm_dd_11), &
G(iGF_Gm_dd_22), &
G(iGF_Gm_dd_33) )
CALL ComputeAuxiliary_Fluid_TABLE &
( D, E, Ne, P, T, Y, S, Em, Gm, Cs_table )
CALL ComputeSpecificInternalEnergy_TABLE &
( D, T, Y, Em, dEdD, dEdT, dEdY )
CALL ComputePressure_TABLE &
( D, T, Y, P, dPdD, dPdT, dPdY )
Gmdd11 = G(iGF_Gm_dd_11)
Gmdd22 = G(iGF_Gm_dd_22)
Gmdd33 = G(iGF_Gm_dd_33)
! --- Distinguish co-/contra-variant velocities.
Vd1 = Gmdd11 * Vu1
Vd2 = Gmdd22 * Vu2
Vd3 = Gmdd33 * Vu3
Tau = 1.0_DP / D
dPdE = dPdT / dEdT
dPdDe = ( Tau ) * ( dPdY - dEdY * dPdE )
dPdTau = (dPdDe * Y + dEdD * dPdE - dPdD) / (Tau**2)
Vsq = Vu1 * Vd1 + Vu2 * Vd2 + Vu3 * Vd3
CsSq = Tau**2 * ( P * dPdE - dPdTau ) + Y * dPdDe
IF ( CsSq .LT. Zero .OR. D .GT. D_Threshold ) THEN
R = I_6x6
invR = I_6x6
IF ( PRESENT( cs_T ) )THEN
cs_T = Cs_table
END IF
IF( PRESENT( FJ ) ) THEN
FJ = I_6x6
END IF
RETURN
ELSE
Cs = SQRT( CsSq )
IF ( ABS( Cs - Cs_table ) / Cs_table .GT. dCs_Threshold ) THEN
R = I_6x6
invR = I_6x6
IF ( PRESENT( cs_T ) )THEN
cs_T = Cs_table
END IF
IF( PRESENT( FJ ) ) THEN
FJ = I_6x6
END IF
RETURN
END IF
END IF
K = ( ( - ( Y / Tau ) * dPdDe + dPdE * ( &
Half * Vsq + Em ) + dPdTau * Tau ) / ( dPdE ) )
H = ( Cs**2 / ( dPdE * Tau ) ) + K
W = Tau * ( dPdE * ( Vsq - 2.0_DP * Em ) &
- 2.0_DP * dPdTau * Tau )
! --- Compute the flux Jacobian for debugging/use in numeric routine ---
SELECT CASE( iDim )
CASE(1)
CALL ComputeFluxJacobian_X1 &
( Gmdd11, Tau, T, Y, Vu1, Vu2, Vu3, Vd1, Vd2, Vd3, Vsq, Em, H, dPdTau, dPdE, dPdDe, dFdU )
CASE(2)
CALL ComputeFluxJacobian_X2 &
( Gmdd22, Tau, T, Y, Vu1, Vu2, Vu3, Vd1, Vd2, Vd3, Vsq, Em, H, dPdTau, dPdE, dPdDe, dFdU )
CASE(3)
CALL ComputeFluxJacobian_X3 &
( Gmdd33, Tau, T, Y, Vu1, Vu2, Vu3, Vd1, Vd2, Vd3, Vsq, Em, H, dPdTau, dPdE, dPdDe, dFdU )
END SELECT
IF( PRESENT( FJ ) ) THEN
FJ = dFdU
END IF
IF ( UseAnalytic ) THEN
CALL ComputeCharacteristicDecomposition_Analytic &
( iDim, Gmdd11, Gmdd22, Gmdd33, &
D, Vu1, Vu2, Vu3, Vd1, Vd2, Vd3, E, &
Ne, P, Cs, K, H, Tau, T, Y, VSq, W, Em, &
dPdD, dPdT, dPdY, dEdD, dEdT, dEdY, dPdE, &
dPdDe, dPdTau, R, invR )
ELSE
CALL ComputeCharacteristicDecomposition_Numeric( R, invR, dFdU )
END IF
CALL TimersStop_Euler( Timer_Euler_SL_CharDecomp )
! -- Begin debugging statements. ---
IF ( PRESENT( cs_T ) )THEN
cs_T = Cs_table
END IF
END SUBROUTINE ComputeCharacteristicDecomposition_Euler_NonRelativistic_TABLE
SUBROUTINE ComputeCharacteristicDecomposition_Analytic &
( iDim, Gmdd11, Gmdd22, Gmdd33, &
D, Vu1, Vu2, Vu3, Vd1, Vd2, Vd3, E, &
Ne, P, Cs, K, H, Tau, T, Y, VSq, W, Em, &
dPdD, dPdT, dPdY, dEdD, dEdT, dEdY, dPdE, &
dPdDe, dPdTau, R, invR )
INTEGER, INTENT(in) :: iDim
REAL(DP), INTENT(in) :: Gmdd11, Gmdd22, Gmdd33
REAL(DP), INTENT(in) :: D, Vu1, Vu2, Vu3, Vd1, Vd2, Vd3
REAL(DP), INTENT(in) :: E, Ne, P, Cs
REAL(DP), INTENT(in) :: K, H, Tau, T, Y, Vsq, W, Em
REAL(DP), INTENT(in) :: dPdD, dPdT, dPdY
REAL(DP), INTENT(in) :: dEdD, dEdT, dEdY
REAL(DP), INTENT(in) :: dPdE, dPdDe, dPdTau
INTEGER :: i
REAL(DP) :: X, Alpha, B, Delta, Zero2, invCsSq
REAL(DP), DIMENSION(3) :: Phi_u, Phi_d
REAL(DP), INTENT(out) :: R(nCF,nCF)
REAL(DP), INTENT(out) :: invR(nCF,nCF)
Phi_u(1) = dPdE * Tau * Vu1
Phi_u(2) = dPdE * Tau * Vu2
Phi_u(3) = dPdE * Tau * Vu3
Phi_d(1) = dPdE * Tau * Vd1
Phi_d(2) = dPdE * Tau * Vd2
Phi_d(3) = dPdE * Tau * Vd3
invCsSq = 1.0_DP / Cs**2
SELECT CASE( iDim )
CASE( 1 )
Delta = Vu1 * Vd1 - Vu2 * Vd2 - Vu3 * Vd3
B = 0.5_DP * (Delta + 2.0_DP * Em + &
(2.0_DP * dPdTau * Tau)/dPdE)
X = (dPdE * ( Delta + 2.0_DP * Em) + 2.0_DP * dPdTau * Tau )
Alpha = 2.0_DP * (Y) * dPdDe - X * Tau
R(:,1) = [ One, Vd1 - Cs * SQRT( Gmdd11 ), Vd2, &
Vd3, H - Cs * SQRT( Gmdd11 ) * Vu1, Y ]
R(:,2) = [ Zero, Zero, One, Zero, Vu2, Zero ]
R(:,3) = [ One, Vd1, Zero, Zero, B, Zero ]
R(:,4) = [ One, Vd1, Zero, Zero, Zero, &
(Tau * X) / (2.0_DP * dPdDe) ]
R(:,5) = [ Zero, Zero, Zero, One, Vu3, Zero ]
R(:,6) = [ One, Vd1 + Cs * SQRT( Gmdd11 ), Vd2, &
Vd3, H + Cs * SQRT( Gmdd11 ) * Vu1, Y ]
invR(:,1) = invCsSq * &
[ + 0.25_DP * (W + 2.0_DP * Cs * SQRT( Gmdd11 ) * Vu1), &
- Half * Vd2 * W, &
+ (2.0_DP * Cs**2 * X + Alpha * W / Tau)/(2.0_DP * X), &
- (Y) * dPdDe * W / (X * Tau), &
- Half * Vd3 * W, &
+ 0.25_DP * (W - 2.0_DP * Cs * SQRT( Gmdd11 ) * Vu1) ]
invR(:,2) = invCsSq * &
[ - Half * ( ( Cs / SQRT( Gmdd11 ) ) + Phi_u(1) ), &
+ Phi_u(1) * Vd2, &
- Phi_u(1) * Alpha / (X * Tau), &
+ 2.0_DP * Y * dPdDe * Phi_u(1) / (X * Tau), &
+ Phi_u(1) * Vd3, &
+ Half * ( ( Cs / SQRT( Gmdd11 ) ) - Phi_u(1) ) ]
invR(:,3) = invCsSq * &
[ - Half * Phi_u(2), &
+ Cs**2 + Phi_u(2) * Vd2, &
- Phi_u(2) * Alpha / (X * Tau), &
+ 2.0_DP * Y * dPdDe * Phi_u(2) / (X * Tau), &
+ Phi_u(2) * Vd3, &
- Half * Phi_u(2) ]
invR(:,4) = invCsSq * &
[ - Half * Phi_u(3), &
+ Phi_u(3) * Vd2, &
- Phi_u(3) * Alpha / (X * Tau), &
+ 2.0_DP * Y * dPdDe * Phi_u(3) / (X * Tau), &
+ Cs**2 + Phi_u(3) * Vd3, &
- Half * Phi_u(3) ]
invR(:,5) = invCsSq * &
[ + Half * dPdE * Tau, &
- Phi_d(2), &
+ dPdE * Alpha / X, &
- ((2.0_DP * Y * dPdDe * dPdE)/ X), &
- Phi_d(3), &
+ Half * dPdE * Tau ]
invR(:,6) = invCsSq * &
[ + Half * dPdDe, &
- Vd2 * dPdDe, &
+ (dPdDe * (-2.0_DP * Cs**2 + Alpha))/(Tau * X), &
+ 2.0_DP * dPdDe * (Cs**2 - Y * dPdDe)/(Tau * X), &
- Vd3 * dPdDe, &
+ Half * dPdDe ]
IF( Debug )THEN
WRITE(*,*)
WRITE(*,'(A4,A)') '', 'invR * R (X1):'
WRITE(*,*)
DO i = 1, 6
WRITE(*,'(A4,6ES16.7E2)') '', MATMUL( invR(i,:), R(:,:) )
END DO
END IF
CASE(2)
Delta = Vu2 * Vd2 - Vu1 * Vd1 - Vu3 * Vd3
B = 0.5_DP * (Delta + 2.0_DP * Em + &
(2.0_DP * dPdTau * Tau)/dPdE)
X = (dPdE * ( Delta + 2.0_DP * Em) + 2.0_DP * dPdTau * Tau )
Alpha = 2.0_DP * (Y) * dPdDe - X * Tau
R(:,1) = [ One, Vd1, Vd2 - Cs * SQRT( Gmdd22 ), &
Vd3, H - Cs * SQRT( Gmdd22 ) * Vu2, Y ]
R(:,2) = [ Zero, One, Zero, Zero, Vu1, Zero ]
R(:,3) = [ One, Zero, Vd2, Zero, B, Zero ]
R(:,4) = [ One, Zero, Vd2, Zero, Zero, &
(Tau * X) / (2.0_DP * dPdDe) ]
R(:,5) = [ Zero, Zero, Zero, One, Vu3, Zero ]
R(:,6) = [ One, Vd1, Vd2 + Cs * SQRT( Gmdd22 ), &
Vd3, H + Cs * SQRT( Gmdd22 ) * Vu2, Y ]
invR(:,1) = invCsSq * &
[ + 0.25_DP * (W + 2.0_DP * Cs * SQRT( Gmdd22 ) * Vu2), &
- Half * Vd1 * W, &
+ (2.0_DP * Cs**2 * X + Alpha * W / Tau)/(2.0_DP * X), &
- (Y) * dPdDe * W / (X * Tau), &
- Half * Vd3 * W, &
+ 0.25_DP * (W - 2.0_DP * Cs * SQRT( Gmdd22 ) * Vu2) ]
invR(:,2) = invCsSq * &
[ - Half * (Phi_u(1)), &
+ Cs**2 + Phi_u(1) * Vd1, &
- Phi_u(1) * Alpha / (X * Tau), &
+ 2.0_DP * Y * dPdDe * Phi_u(1) / (X * Tau), &
+ Phi_u(1) * Vd3, &
- Half * (Phi_u(1)) ]
invR(:,3) = invCsSq * &
[ - Half * ( ( Cs / Gmdd22 ) + Phi_u(2)), &
+ Phi_u(2) * Vd1, &
- Phi_u(2) * Alpha / (X * Tau), &
+ 2.0_DP * Y * dPdDe * Phi_u(2) / (X * Tau), &
+ Phi_u(2) * Vd3, &
+ Half * ( ( Cs / Gmdd22 ) - Phi_u(2)) ]
invR(:,4) = invCsSq * &
[ - Half * Phi_u(3), &
+ Phi_u(3) * Vd1, &
- Phi_u(3) * Alpha / (X * Tau), &
+ 2.0_DP * Y * dPdDe * Phi_u(3) / (X * Tau), &
+ Cs**2 + Phi_u(3) * Vd3, &
- Half * Phi_u(3) ]
invR(:,5) = invCsSq * &
[ + Half * dPdE * Tau, &
- Phi_d(1), &
+ dPdE * Alpha / X, &
- ((2.0_DP * Y * dPdDe * dPdE)/ X), &
- Phi_d(3), &
+ Half * dPdE * Tau ]
invR(:,6) = invCsSq * &
[ + Half * dPdDe, &
- Vd1 * dPdDe, &
+ (dPdDe * (-2.0_DP * Cs**2 + Alpha))/(Tau * X), &
+ 2.0_DP * dPdDe * (Cs**2 - Y * dPdDe)/(Tau * X), &
- Vd3 * dPdDe, &
+ Half * dPdDe ]
IF( Debug )THEN
WRITE(*,*)
WRITE(*,'(A4,A)') '', 'invR * R (X2):'
WRITE(*,*)
DO i = 1, 6
WRITE(*,'(A4,6ES16.7E2)') '', MATMUL( invR(i,:), R(:,:) )
END DO
END IF
END SELECT
END SUBROUTINE ComputeCharacteristicDecomposition_Analytic
SUBROUTINE ComputeCharacteristicDecomposition_Numeric( R, invR, dFdU )
REAL(DP), INTENT(in) :: dFdU(nCF,nCF)
REAL(DP), INTENT(out) :: R(nCF,nCF)
REAL(DP), INTENT(out) :: invR(nCF,nCF)
REAL(DP) :: Lambda(nCF,nCF)
INTEGER :: i, INFO, LWORK
INTEGER :: IPIV(nCF)
REAL(DP) :: WR(nCF)
REAL(DP) :: WI(nCF)
REAL(DP) :: TEMP(1)
REAL(DP) :: dFdU_Copy(nCF,nCF)
REAL(DP) :: invR_Copy(nCF,nCF)
REAL(DP), ALLOCATABLE :: WORK1(:), WORK2(:)
! --- Copy to avoid overwriting dFdU ---
dFdU_Copy = dFdU
! --- Necessary workplace query to get LWORK. ----
CALL DGEEV( 'V', 'N', nCF, dFdU_Copy, nCF, WR, &
WI, invR, nCF, 0, nCF, TEMP, &
-1, INFO )
LWORK = TEMP(1)
ALLOCATE( WORK1(LWORK) )
Lambda(:,:) = Zero
! --- Actual computation of eigedecomposition. ---
CALL DGEEV( 'V', 'N', nCF, dFdU_Copy, nCF, WR, &
WI, invR, nCF, 0, nCF, WORK1, &
LWORK, INFO )
invR = TRANSPOSE( invR )
invR_Copy = invR
CALL DGETRF( nCF, nCF, invR_Copy, nCF, IPIV, INFO )
LWORK = -1
CALL DGETRI( nCF, invR_Copy, nCF, IPIV, TEMP, LWORK, INFO )
LWORK = TEMP(1)
ALLOCATE( WORK2(LWORK) )
CALL DGETRI( nCF, invR_Copy, nCF, IPIV, WORK2, LWORK, INFO )
R = invR_Copy
IF ( ( INFO .NE. 0 ) .OR. ( ANY( ABS( WI ) > 1d-15 ) ) ) THEN
PRINT*, 'INFO: ', INFO
PRINT*, 'WR: ', WR
PRINT*, 'WI: ', WI
DO i = 1, 6
PRINT*, 'Lambda(i,:) : ', Lambda(i,:)
END DO
END IF
END SUBROUTINE ComputeCharacteristicDecomposition_Numeric
SUBROUTINE ComputeFluxJacobian_X1( Gmdd11, Tau, T, Y, Vu1, Vu2, Vu3, &
Vd1, Vd2, Vd3, Vsq, Em, H, dPdTau, &
dPdE, dPdDe, dFdU_X1 )
REAL(DP), INTENT(in) :: Gmdd11
REAL(DP), INTENT(in) :: Tau, T, Y
REAL(DP), INTENT(in) :: Vu1, Vu2, Vu3, Vd1, Vd2, Vd3
REAL(DP), INTENT(in) :: Vsq, Em, H
REAL(DP), INTENT(in) :: dPdTau, dPdE, dPdDe
REAL(DP), INTENT(out) :: dFdU_X1(nCF,nCF)
dFdU_X1(1,:) = [ 0.0_DP, 1.0_DP / Gmdd11, 0.0_DP, 0.0_DP, 0.0_DP, 0.0_DP ]
dFdU_X1(2,1) = - Vu1 * Vd1 - Tau**2 * dPdTau &
- Tau * dPdE * ( Em - 0.5_DP * Vsq )
dFdU_X1(2,2) = Vu1 * ( 2.0_DP - Tau * dPdE )
dFdU_X1(2,3) = - dPdE * Tau * Vu2
dFdU_X1(2,4) = - dPdE * Tau * Vu3
dFdU_X1(2,5) = dPdE * Tau
dFdU_X1(2,6) = dPdDe
dFdU_X1(3,:) = [ - Vu1 * Vd2, Vd2 / Gmdd11, Vu1, 0.0_DP, 0.0_DP, 0.0_DP ]
dFdU_X1(4,:) = [ - Vu1 * Vd3, Vd3 / Gmdd11, 0.0_DP, Vu1, 0.0_DP, 0.0_DP ]
dFdU_X1(5,1) = Vu1 * ( - H - dPdTau * Tau**2 &
- Tau * dPdE * ( Em - 0.5_DP * Vsq ) )
dFdU_X1(5,2) = ( H / Gmdd11 ) - dPdE * Tau * Vu1**2
dFdU_X1(5,3) = - dPdE * Tau * Vu1 * Vu2
dFdU_X1(5,4) = - dPdE * Tau * Vu1 * Vu3
dFdU_X1(5,5) = Vu1 * ( 1.0_DP + dPdE * Tau )
dFdU_X1(5,6) = Vu1 * dPdDe
dFdU_X1(6,:) = [ - Vu1 * Y, Y / Gmdd11, 0.0_DP, 0.0_DP, 0.0_DP, Vu1 ]
END SUBROUTINE ComputeFluxJacobian_X1
SUBROUTINE ComputeFluxJacobian_X2( Gmdd22, Tau, T, Y, Vu1, Vu2, Vu3, &
Vd1, Vd2, Vd3, Vsq, Em, H, dPdTau, &
dPdE, dPdDe, dFdU_X2 )
REAL(DP), INTENT(in) :: Gmdd22
REAL(DP), INTENT(in) :: Tau, T, Y
REAL(DP), INTENT(in) :: Vu1, Vu2, Vu3, Vd1, Vd2, Vd3
REAL(DP), INTENT(in) :: Vsq, Em, H
REAL(DP), INTENT(in) :: dPdTau, dPdE, dPdDe
REAL(DP), INTENT(out) :: dFdU_X2(nCF,nCF)
dFdU_X2(1,:) = [ 0.0_DP, 0.0_DP, 1.0_DP / Gmdd22, 0.0_DP, 0.0_DP, 0.0_DP ]
dFdU_X2(2,:) = [ - Vu2 * Vd1, Vu2, Vd1 / Gmdd22, 0.0_DP, 0.0_DP, 0.0_DP ]
dFdU_X2(3,1) = - Vu2 * Vd2 - Tau**2 * dPdTau &
- Tau * dPdE * ( Tau * Em - 0.5_DP * Vsq )
dFdU_X2(3,2) = - dPdE * Tau * Vu1
dFdU_X2(3,3) = Vu2 * ( 2.0_DP - Tau * dPdE )
dFdU_X2(3,4) = - dPdE * Tau * Vu3
dFdU_X2(3,5) = dPdE * Tau
dFdU_X2(3,6) = dPdDe
dFdU_X2(4,:) = [ - Vu2 * Vd3, 0.0_DP, Vd3 / Gmdd22, Vu2, 0.0_DP, 0.0_DP ]
dFdU_X2(5,1) = Vu2 * ( - H - dPdTau * Tau**2 &
- Tau * dPdE * ( Em - 0.5_DP * Vsq ) )
dFdU_X2(5,2) = - dPdE * Tau * Vu1 * Vu2
dFdU_X2(5,3) = ( H / Gmdd22 ) - dPdE * Tau * Vu2**2
dFdU_X2(5,4) = - dPdE * Tau * Vu2 * Vu3
dFdU_X2(5,5) = Vu2 * ( 1.0_DP + dPdE * Tau )
dFdU_X2(5,6) = Vu2 * dPdDe
dFdU_X2(6,:) = [ - Vu2 * Y, 0.0_DP, Y / Gmdd22, 0.0_DP, 0.0_DP, Vu2 ]
END SUBROUTINE ComputeFluxJacobian_X2
SUBROUTINE ComputeFluxJacobian_X3( Gmdd33, Tau, T, Y, Vu1, Vu2, Vu3, &
Vd1, Vd2, Vd3, Vsq, Em, H, dPdTau, &
dPdE, dPdDe, dFdU_X3 )
REAL(DP), INTENT(in) :: Gmdd33
REAL(DP), INTENT(in) :: Tau, T, Y
REAL(DP), INTENT(in) :: Vu1, Vu2, Vu3, Vd1, Vd2, Vd3
REAL(DP), INTENT(in) :: Vsq, Em, H
REAL(DP), INTENT(in) :: dPdTau, dPdE, dPdDe
REAL(DP), INTENT(out) :: dFdU_X3(nCF,nCF)
dFdU_X3(1,:) = [ 0.0_DP, 0.0_DP, 0.0_DP, 1.0_DP / Gmdd33, 0.0_DP, 0.0_DP ]
dFdU_X3(2,:) = [ - Vu3 * Vd1, Vu3, 0.0_DP, Vd1 / Gmdd33, 0.0_DP, 0.0_DP ]
dFdU_X3(3,:) = [ - Vu3 * Vd2, 0.0_DP, Vu3, Vd2 / Gmdd33, 0.0_DP, 0.0_DP ]
dFdU_X3(4,1) = - Vu3 * Vd3 - Tau**2 * dPdTau &
- Tau * dPdE * ( Tau * Em - 0.5_DP * Vsq )
dFdU_X3(4,2) = - dPdE * Tau * Vu1
dFdU_X3(4,3) = - dPdE * Tau * Vu2
dFdU_X3(4,4) = Vu3 * ( 2.0_DP - Tau * dPdE )
dFdU_X3(4,5) = dPdE * Tau
dFdU_X3(4,6) = dPdDe
dFdU_X3(5,1) = Vu3 * ( - H - dPdTau * Tau**2 &
- Tau * dPdE * ( Em - 0.5_DP * Vsq ) )
dFdU_X3(5,2) = - dPdE * Tau * Vu3 * Vu1
dFdU_X3(5,3) = ( H / Gmdd33 ) - dPdE * Tau * Vu3**2
dFdU_X3(5,4) = - dPdE * Tau * Vu3 * Vu2
dFdU_X3(5,5) = Vu3 * ( 1.0_DP + dPdE * Tau )
dFdU_X3(5,6) = Vu3 * dPdDe
dFdU_X3(6,:) = [ - Vu3 * Y, 0.0_DP, 0.0_DP, Y / Gmdd33, 0.0_DP, Vu3 ]
END SUBROUTINE ComputeFluxJacobian_X3
END MODULE Euler_CharacteristicDecompositionModule_NonRelativistic_TABLE
|
{"hexsha": "bf0d1dcf04821f73041f51971be2166463e1537b", "size": 18599, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Modules/Euler/Euler_CharacteristicDecompositionModule_NonRelativistic_TABLE.f90", "max_stars_repo_name": "srichers/thornado", "max_stars_repo_head_hexsha": "bc6666cbf9ae8b39b1ba5feffac80303c2b1f9a8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-12-08T16:16:55.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-24T19:31:21.000Z", "max_issues_repo_path": "Modules/Euler/Euler_CharacteristicDecompositionModule_NonRelativistic_TABLE.f90", "max_issues_repo_name": "srichers/thornado", "max_issues_repo_head_hexsha": "bc6666cbf9ae8b39b1ba5feffac80303c2b1f9a8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2019-07-10T20:13:15.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-11T13:21:00.000Z", "max_forks_repo_path": "Modules/Euler/Euler_CharacteristicDecompositionModule_NonRelativistic_TABLE.f90", "max_forks_repo_name": "srichers/thornado", "max_forks_repo_head_hexsha": "bc6666cbf9ae8b39b1ba5feffac80303c2b1f9a8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-11-14T01:13:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-24T02:08:20.000Z", "avg_line_length": 31.7931623932, "max_line_length": 105, "alphanum_fraction": 0.4705629335, "num_tokens": 7792}
|
"""Minimum jerk trajectory."""
import numpy as np
from .base import PointToPointMovement
from .data._minimum_jerk import generate_minimum_jerk
class MinimumJerkTrajectory(PointToPointMovement):
"""Precomputed point to point movement with minimum jerk.
Parameters
----------
n_dims : int
State space dimensions.
execution_time : float
Execution time of the DMP.
dt : float, optional (default: 0.01)
Time difference between DMP steps.
"""
def __init__(self, n_dims, execution_time, dt=0.01):
super(MinimumJerkTrajectory, self).__init__(n_dims, n_dims)
self.X = None
self.Xd = None
self.execution_time = execution_time
self.dt = dt
self.step_idx = 0
self.initialized = False
def reset(self):
"""Reset initial state and time."""
self.step_idx = 0
self.initialized = False
def step(self, last_y, last_yd):
"""Perform step.
Parameters
----------
last_y : array, shape (n_dims,)
Last state.
last_yd : array, shape (n_dims,)
Last time derivative of state (e.g., velocity).
Returns
-------
y : array, shape (n_dims,)
Next state.
yd : array, shape (n_dims,)
Next time derivative of state (e.g., velocity).
"""
if not self.initialized:
self.X, self.Xd, _ = generate_minimum_jerk(
self.start_y, self.goal_y, self.execution_time, self.dt)
self.initialized = True
self.current_y = self.X[self.step_idx]
self.current_yd = self.Xd[self.step_idx]
self.step_idx += 1
return np.copy(self.current_y), np.copy(self.current_yd)
|
{"hexsha": "183072fccb2a7a806b23652b48413b0e0c4a3189", "size": 1771, "ext": "py", "lang": "Python", "max_stars_repo_path": "movement_primitives/minimum_jerk_trajectory.py", "max_stars_repo_name": "maotto/movement_primitives", "max_stars_repo_head_hexsha": "b79c78a5a0667cc24a26b7b6cc64a5762d8f4dd4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2021-11-17T15:36:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T08:49:25.000Z", "max_issues_repo_path": "movement_primitives/minimum_jerk_trajectory.py", "max_issues_repo_name": "DavidYaonanZhu/movement_primitives", "max_issues_repo_head_hexsha": "ce355837f06cb5fada24be7259cb0305e8ea5d91", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-12-01T10:33:04.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T12:41:39.000Z", "max_forks_repo_path": "movement_primitives/minimum_jerk_trajectory.py", "max_forks_repo_name": "DavidYaonanZhu/movement_primitives", "max_forks_repo_head_hexsha": "ce355837f06cb5fada24be7259cb0305e8ea5d91", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2021-11-25T03:53:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T03:19:25.000Z", "avg_line_length": 27.671875, "max_line_length": 72, "alphanum_fraction": 0.5934500282, "include": true, "reason": "import numpy", "num_tokens": 413}
|
/-
Copyright (c) 2018 Jeremy Avigad. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Jeremy Avigad, Mario Carneiro, Simon Hudon, Alex Keizer
-/
import Qpf.MathlibPort.Fin2
import Qpf.Util.HEq
-- import Mathlib
universe u v w
abbrev DVec {n : Nat} (αs : Fin2 n → Type u) : Type _
:= (i : Fin2 n) → αs i
abbrev Vec (α : Type _) (n : Nat)
:= @DVec n fun _ => α
namespace Vec
def append1 {α : Type u} {n} (tl : Vec α n) (hd : α) : Vec α (.succ n)
| .fs i => tl i
| .fz => hd
-- infixl:67 " ::: " => append1
/-- Drop the last element from a `Vec` -/
def drop (v : Vec α (n+1)) : Vec α n
:= fun i => v <| .fs i
def constVec {α : Type _} (a : α) (n : Nat) : Vec α n
:= fun _ => a
end Vec
unif_hint (n : Nat) where |- Fin2 n → Type u =?= Vec.{u+1} (Type u) n
unif_hint {α : Type _} (n : Nat) where |- DVec.{u+1} (Vec.constVec α n) =?= Vec.{u+1} α n
namespace DVec
/-- Return the last element from a `DVec` -/
abbrev last (v : @DVec (n+1) αs ) : αs 0
:= v 0
/-- Drop the last element from a `DVec` -/
def drop (v : DVec αs) : DVec (Vec.drop αs)
:= fun i => v <| .fs i
@[reducible]
def nil : @DVec 0 αs
:= fun emp => by contradiction
@[reducible]
def append1 {α : Type u} {αs : Vec (Type u) n} (tl : DVec αs) (hd : α) : DVec (Vec.append1 αs α)
| .fs i => tl i
| .fz => hd
-- infixl:67 " ::: " => append1
end DVec
namespace Vec
variable {α : Type _} {n : Nat}
abbrev nil : Vec α 0 := DVec.nil
abbrev last : Vec α n.succ → α := DVec.last
end Vec
/-
# Notation macros
-/
syntax "!![" term,* "]" : term
macro_rules
| `(!![]) => `(Vec.nil)
| `(!![$x]) => `(Vec.append1 !![] $x)
| `(!![ $xs,* , $x]) => `(Vec.append1 !![$xs,*] $x)
namespace Vec
theorem drop_append1 {v : Vec α n} {a : α} {i : PFin2 n} :
drop (append1 v a) i = v i :=
rfl
theorem drop_append1' {v : Vec α n} {a : α} :
drop (append1 v a) = v :=
by funext x; rfl
theorem last_append1 {v : Vec α n} {a : α} :
last (append1 v a) = a
:= rfl
@[simp]
theorem append1_drop_last (v : Vec α (n+1)) : append1 (drop v) (last v) = v :=
funext $ fun i => by cases i; rfl; rfl
def reverse (v : Vec α n) : Vec α n :=
fun i => v i.inv
@[simp]
theorem reverse_involution {v : Vec α n} :
v.reverse.reverse = v :=
by
funext i;
dsimp only [reverse]
apply congrArg;
simp only [Fin2.inv, PFin2.toFin2_ofFin2_iso, PFin2.inv_involution, PFin2.ofFin2_toFin2_iso]
end Vec
namespace Vec
/-- Create a `Vec` from a `List`. Note that this conceptually reverses the list, since in a `Vec`
the 0th index points to the right-most element
-/
def ofList : (as : List α) → Vec α (as.length)
| List.nil => Vec.nil
| List.cons a as => Vec.append1 (ofList as) a
/-- Create a `List` from a `Vec`. Note that this conceptually reverses the vector, since in a `Vec`
the 0th index points to the right-most element
-/
def toList : {n : Nat} → Vec α n → List α
| 0, _ => List.nil
| _+1, v => List.cons v.last (toList v.drop)
@[simp]
theorem toList_length_eq_n {v : Vec α n} :
v.toList.length = n :=
by
induction n
case zero => rfl
case succ n ih =>
dsimp only [toList, List.length]
dsimp only [HAdd.hAdd, Add.add, Nat.add]
apply congrArg
apply ih
@[simp]
theorem ofList_toList_iso {v : Vec α n} :
HEq (ofList (toList v)) v :=
by
apply HEq.trans (b := cast (β:=Vec α (List.length (toList v))) ?hc v);
case hc =>
simp only [toList_length_eq_n]
case h₂ =>
apply cast_heq
case h₁ =>
apply heq_of_eq;
funext i;
apply eq_of_heq;
rw[cast_arg] <;> try (solve | simp);
simp_heq
induction n <;> cases i;
case succ.fz n ih => {
dsimp[ofList, toList, append1, last, DVec.last]
apply hcongr <;> (try solve | intros; rfl)
simp_heq;
simp only [OfNat.ofNat]
apply hcongr <;> (try solve | intros; rfl)
simp
}
case succ.fs n ih i => {
dsimp[ofList, toList, append1, drop]
apply HEq.trans (@ih (fun i => v (.fs i)) i);
apply hcongr <;> (try solve | intros; rfl)
simp_heq
apply hcongr;
case H₂ => apply cast_heq
case H₃ => apply congrArg; simp
case H₄ => intro _; apply congrArg; simp
apply hcongr <;> (try solve | intros; rfl);
simp
}
theorem ofList_toList_iso' {v : Vec α n} :
HEq (fun (j : PFin2.{u} (toList v).length) => ofList (toList v) j.toFin2)
(fun (j : PFin2.{u} (toList v).length) => v <| PFin2.toFin2 <| cast (by rw[toList_length_eq_n]) j) :=
by
apply HEq.funext
. rfl
intro j
have n_eq : (toList v).length = n := toList_length_eq_n;
apply hcongr
. apply ofList_toList_iso
. intros
apply hcongr <;> intros <;> (try rw[n_eq])
. simp_heq
. intros; simp
. rw[n_eq]
@[simp]
theorem toList_ofList_iso {as : List α} :
toList (ofList as) = as :=
by
induction as;
case nil => rfl
case cons a as ih => simp only [toList, ofList, append1, last, DVec.last, drop, ih]
instance : Coe (Vec (Type u) n) (TypeVec.{u} n) where
coe v i := v i
instance : Coe (TypeVec.{u} n) (Vec (Type u) n) where
coe v i := v i
instance : Coe (Fin n → α) (Vec α n) where
coe f i := f (Fin2.inv i)
end Vec
|
{"author": "alexkeizer", "repo": "qpf4", "sha": "980f97425b9d5a5e3897073df33794192b3b3124", "save_path": "github-repos/lean/alexkeizer-qpf4", "path": "github-repos/lean/alexkeizer-qpf4/qpf4-980f97425b9d5a5e3897073df33794192b3b3124/Qpf/Util/Vec.lean"}
|
import sys
import numpy as np
from matplotlib import pyplot as plt
sys.path.append("../../")
from spook import SpookPosL1
from spook.utils import normalizedATA
np.random.seed(1996)
Na = 17
Nb = 9
Ns = 10000
Ng = 11
A = np.random.rand(Ns, Na) * 50
Xtrue = np.zeros((Na, Nb))
bb, aa = np.meshgrid(np.arange(Nb), np.arange(Na))
for p1, p2 in zip([1,-1],[1,-1]):
tmp = 0.1*(Na+Nb) - abs((aa - Na//2) + p1* (bb - Nb//2) - p2* 0.2*(Na+Nb))
tmp[tmp<0] = 0
Xtrue += tmp
plt.ion()
fig = plt.figure()
plt.pcolormesh(Xtrue)
plt.colorbar()
G = np.identity(Ng) - 0.2*np.diag(np.ones(Ng-1),k=-1) - 0.2*np.diag(np.ones(Ng-1),k=1)
G = G[:,:Nb]
B0 = A @ Xtrue
B1 = B0 @ (G.T)
B0 += 1e-3*np.linalg.norm(B0) * np.random.randn(*(B0.shape))
B1 += 1e-3*np.linalg.norm(B1) * np.random.randn(*(B1.shape))
SpookPosL1.verbose = True
spk0 = SpookPosL1(B0, A, "raw", lsparse=1, lsmooth=(0.1,0.01))
AtA, sA = normalizedATA(A)
spk0f= SpookPosL1((A.T @ B0)/sA, AtA, "contracted", lsparse=1, lsmooth=(0.1,0.01), pre_normalize=False)
X0 = spk0.getXopt()
X0f= spk0f.getXopt()/sA
print(np.allclose(X0, X0f))
X0 = spk0.getXopt(0.1, (1e-4,1e-4))
fig = plt.figure()
plt.pcolormesh(X0)
plt.colorbar()
|
{"hexsha": "da972869f01f1df2a8777bd38ba34e3e3ba8845d", "size": 1177, "ext": "py", "lang": "Python", "max_stars_repo_path": "spook/unittest/testPreNorm.py", "max_stars_repo_name": "congzlwag/spook", "max_stars_repo_head_hexsha": "0c728086b811ce829c6226e0a9a10a350772ec15", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "spook/unittest/testPreNorm.py", "max_issues_repo_name": "congzlwag/spook", "max_issues_repo_head_hexsha": "0c728086b811ce829c6226e0a9a10a350772ec15", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spook/unittest/testPreNorm.py", "max_forks_repo_name": "congzlwag/spook", "max_forks_repo_head_hexsha": "0c728086b811ce829c6226e0a9a10a350772ec15", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.6346153846, "max_line_length": 103, "alphanum_fraction": 0.6355140187, "include": true, "reason": "import numpy", "num_tokens": 487}
|
#!/usr/bin/env python
from setuptools import setup
from setuptools import Extension
import numpy
short_desc = "Package for evaluating the NRSur7dq2 surrogate model"
long_desc = \
"""
NRSur7dq2 is a surrogate model for gravitational waves from numerical
relativity simulations of binary black hole mergers.
It is described in Blackman et al. 2017:
https://arxiv.org/abs/1705.07089
https://journals.aps.org/prd/abstract/10.1103/PhysRevD.96.024058
This package provides a class NRSurrogate7dq2 for evaluating the NRSur7dq2
surrogate model.
See the NRSurrogate7dq2 class docstring for usage.
A tutorial Jupyter notebook can be found at
https://www.black-holes.org/surrogates/
"""
extensions = [
Extension(
'_NRSur7dq2_utils',
sources=['NRSur7dq2_utils/src/NRSur7dq2_utils.c'],
include_dirs = ['NRSur7dq2_utils/include', numpy.get_include()],
language='c',
extra_compile_args = ['-fPIC', '-O3'],
)
]
setup(
name = 'NRSur7dq2',
version = '1.0.6',
description = short_desc,
long_description = long_desc,
author = 'Jonathan Blackman',
author_email = 'jonathan.blackman.0@gmail.com',
url = 'https://www.black-holes.org/surrogates/',
packages = ['NRSur7dq2'],
package_data = {'NRSur7dq2': ['NRSur7dq2.h5']},
ext_modules = extensions,
install_requires = ['numpy', 'scipy', 'h5py'],
)
|
{"hexsha": "4d9ae7e413b14d0620e5106d7cecae562a35a11c", "size": 1536, "ext": "py", "lang": "Python", "max_stars_repo_path": "setup.py", "max_stars_repo_name": "jblackma/NRSur7dq2", "max_stars_repo_head_hexsha": "fd2383f5a49f180598af25dd5ea2994dccfab2f0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-10-30T04:43:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-16T14:05:18.000Z", "max_issues_repo_path": "setup.py", "max_issues_repo_name": "jblackma/NRSur7dq2", "max_issues_repo_head_hexsha": "fd2383f5a49f180598af25dd5ea2994dccfab2f0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-11-11T03:40:05.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-11T19:03:52.000Z", "max_forks_repo_path": "setup.py", "max_forks_repo_name": "jblackma/NRSur7dq2", "max_forks_repo_head_hexsha": "fd2383f5a49f180598af25dd5ea2994dccfab2f0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-04-01T00:27:04.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-11T03:33:20.000Z", "avg_line_length": 32.6808510638, "max_line_length": 80, "alphanum_fraction": 0.6334635417, "include": true, "reason": "import numpy", "num_tokens": 400}
|
import numpy as np
import matrix
import tkinter as tk
from PIL import Image, ImageTk
class MyApp(tk.Tk):
def __init__(self):
super().__init__()
self.geometry("600x400+10+10")
self.bind('<Button-1>', self.callback)
game = matrix.create_field(12,8)
matrix.rand_array(game,15)
self.game = matrix.modify(game)
def callback(self, event):
print ("Pointer now at x = %d and y = %d" %(event.x, event.y))
x = event.x//50
y = event.y//50
print ("game = ",x , y)
print (self.game[y,x])
cnv = tk.Canvas(bg= "white", height= 50, width = 50)
img = ImageTk.PhotoImage(file = '/home/rv/code/tmp/pointer/1.gif')
cnv.create_image(50, 50, image = img)
cnv.place(x = x*50, y = y*50)
def main():
myField = MyApp()
myField.mainloop()
if __name__ == '__main__':
main()
|
{"hexsha": "18cbc25fd89bd90868b841105c2e18893c7d77b1", "size": 925, "ext": "py", "lang": "Python", "max_stars_repo_path": "form.py", "max_stars_repo_name": "rvgorod122/pointer", "max_stars_repo_head_hexsha": "99bb95e6b75bc5de3a35f4b8a2d300dd6294292c", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "form.py", "max_issues_repo_name": "rvgorod122/pointer", "max_issues_repo_head_hexsha": "99bb95e6b75bc5de3a35f4b8a2d300dd6294292c", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "form.py", "max_forks_repo_name": "rvgorod122/pointer", "max_forks_repo_head_hexsha": "99bb95e6b75bc5de3a35f4b8a2d300dd6294292c", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0, "max_line_length": 74, "alphanum_fraction": 0.5535135135, "include": true, "reason": "import numpy", "num_tokens": 256}
|
using Ejemplo
using Test
@testset "Ejemplo.jl" begin
@test f_xy(1,5) == 6# Write your tests here.
end
|
{"hexsha": "dd6b051524fbeb0f9d1adf2c28fbd53e3745cbe3", "size": 107, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "EdwardAngelino/Ejemplo.jl", "max_stars_repo_head_hexsha": "e59cd0c957eefbbd5259190fd30e6cb34c4660c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "EdwardAngelino/Ejemplo.jl", "max_issues_repo_head_hexsha": "e59cd0c957eefbbd5259190fd30e6cb34c4660c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "EdwardAngelino/Ejemplo.jl", "max_forks_repo_head_hexsha": "e59cd0c957eefbbd5259190fd30e6cb34c4660c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.2857142857, "max_line_length": 48, "alphanum_fraction": 0.691588785, "num_tokens": 39}
|
#pragma once
#include "AxisString.hpp"
#include "SymbolTable.hpp"
#include <boost/spirit/include/qi.hpp>
#include <list>
#include "foundation/definitions/AxisInputLanguage.hpp"
#include "services/language/primitives/OrExpressionParser.hpp"
#include "services/language/primitives/GeneralExpressionParser.hpp"
namespace axis { namespace application { namespace parsing { namespace preprocessing {
/// <summary>
/// Defines an existence expression parser with the default
/// behavior.
/// </summary>
class ExistenceExpressionParser
{
public:
/// <summary>
/// Creates a new instance of this class.
/// </summary>
/// <param name="st">Associated symbol table to be used when evaluating an expression.</param>
ExistenceExpressionParser(axis::application::parsing::preprocessing::SymbolTable& st);
/// <summary>
/// Destroys this object.
/// </summary>
~ExistenceExpressionParser(void);
/// <summary>
/// Returns if the supplied expression is syntactically valid.
/// </summary>
/// <param name="e">The expression to be validated.</param>
bool IsSyntacticallyValid(const axis::String& e);
/// <summary>
/// Evaluates the supplied expression.
/// </summary>
/// <param name="e">The expression to be evaluated.</param>
/// <returns>
/// Returns True if preprocessing was successful, False otherwise.
/// </returns>
bool Evaluate(const axis::String& e);
/// <summary>
/// Evaluates the supplied expression.
/// </summary>
/// <param name="e">The expression to be evaluated.</param>
/// <param name="expectedSymbolDefinedState">State (defined or not) expected when evaluating each identifier in expression.</param>
/// <returns>
/// Returns True if preprocessing was successful, False otherwise.
/// </returns>
bool Evaluate(const axis::String& e, bool expectedSymbolDefinedState);
/// <summary>
/// Returns the result of the last evaluated expression.
/// </summary>
/// <remarks>
/// If no expression has been evaluated by this object yet, False is returned.
/// </remarks>
bool GetLastResult( void ) const;
private:
// our temporary work list of tokens
typedef std::list<axis::application::parsing::preprocessing::Symbol> token_list;
/// <summary>
/// Process a token obtained when parsing an expression.
/// </summary>
/// <param name="type">Type identifier of the token.</param>
/// <param name="name">Token name.</param>
void ProcessToken(axis::foundation::definitions::TokenType type, axis::String const& name);
/// <summary>
/// Process a token obtained when parsing an expression.
/// </summary>
/// <param name="type">Type identifier of the token.</param>
/// <param name="name">Token name.</param>
/// <param name="precedence">Precedence number of the token.</param>
/// <param name="associativity">Associativity number of the token.</param>
void ProcessToken(axis::foundation::definitions::TokenType type, axis::String const& name,
int precedence, axis::foundation::definitions::OperatorAssociativity associativity);
void EvaluateParseTree(const axis::services::language::parsing::ParseTreeNode & parseNode);
bool IsValidId(const axis::String& id) const;
// our grammar rules
axis::services::language::primitives::OrExpressionParser _binary_op;
axis::services::language::primitives::OrExpressionParser _unary_op;
axis::services::language::primitives::OrExpressionParser _expression;
axis::services::language::primitives::GeneralExpressionParser _expression_alt1;
axis::services::language::primitives::OrExpressionParser _expression2;
axis::services::language::primitives::GeneralExpressionParser _expression2_alt1;
axis::services::language::primitives::GeneralExpressionParser _expression2_alt2;
axis::services::language::primitives::OrExpressionParser _term;
axis::services::language::primitives::GeneralExpressionParser _term_alt1;
axis::services::language::primitives::OrExpressionParser _operand;
axis::services::language::primitives::GeneralExpressionParser _group;
axis::services::language::primitives::OrExpressionParser _invalidIdExpression;
// associated symbol table
axis::application::parsing::preprocessing::SymbolTable& _symbolTable;
// used when evaluating an expression
token_list _expressionTokens;
token_list _operatorStack;
bool _lastResult;
};
} } } } // namespace axis::application::parsing::preprocessing
|
{"hexsha": "8f252e2fe7afede74c087403a1349df08483e0ec", "size": 4359, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "Axis.Core/application/parsing/preprocessing/ExistenceExpressionParser.hpp", "max_stars_repo_name": "renato-yuzup/axis-fem", "max_stars_repo_head_hexsha": "2e8d325eb9c8e99285f513b4c1218ef53eb0ab22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-07-23T08:49:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-29T22:07:30.000Z", "max_issues_repo_path": "Axis.Core/application/parsing/preprocessing/ExistenceExpressionParser.hpp", "max_issues_repo_name": "renato-yuzup/axis-fem", "max_issues_repo_head_hexsha": "2e8d325eb9c8e99285f513b4c1218ef53eb0ab22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Axis.Core/application/parsing/preprocessing/ExistenceExpressionParser.hpp", "max_forks_repo_name": "renato-yuzup/axis-fem", "max_forks_repo_head_hexsha": "2e8d325eb9c8e99285f513b4c1218ef53eb0ab22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9908256881, "max_line_length": 132, "alphanum_fraction": 0.7435191558, "num_tokens": 1008}
|
import os
import os.path as osp
from argparse import ArgumentParser
import mmcv
import numpy as np
from xtcocotools.coco import COCO
from mmpose.apis import (inference_pose_lifter_model,
inference_top_down_pose_model, vis_3d_pose_result)
from mmpose.apis.inference import init_pose_model
from mmpose.core import SimpleCamera
def _keypoint_camera_to_world(keypoints,
camera_params,
image_name=None,
dataset='Body3DH36MDataset'):
"""Project 3D keypoints from the camera space to the world space.
Args:
keypoints (np.ndarray): 3D keypoints in shape [..., 3]
camera_params (dict): Parameters for all cameras.
image_name (str): The image name to specify the camera.
dataset (str): The dataset type, e.g. Body3DH36MDataset.
"""
cam_key = None
if dataset == 'Body3DH36MDataset':
subj, rest = osp.basename(image_name).split('_', 1)
_, rest = rest.split('.', 1)
camera, rest = rest.split('_', 1)
cam_key = (subj, camera)
else:
raise NotImplementedError
camera = SimpleCamera(camera_params[cam_key])
keypoints_world = keypoints.copy()
keypoints_world[..., :3] = camera.camera_to_world(keypoints[..., :3])
return keypoints_world
def main():
parser = ArgumentParser()
parser.add_argument(
'pose_lifter_config',
help='Config file for the 2nd stage pose lifter model')
parser.add_argument(
'pose_lifter_checkpoint',
help='Checkpoint file for the 2nd stage pose lifter model')
parser.add_argument(
'--pose-detector-config',
type=str,
default=None,
help='Config file for the 1st stage 2D pose detector')
parser.add_argument(
'--pose-detector-checkpoint',
type=str,
default=None,
help='Checkpoint file for the 1st stage 2D pose detector')
parser.add_argument('--img-root', type=str, default='', help='Image root')
parser.add_argument(
'--json-file',
type=str,
default=None,
help='Json file containing image and bbox inforamtion. Optionally,'
'The Json file can also contain 2D pose information. See'
'"only-second-stage"')
parser.add_argument(
'--camera-param-file',
type=str,
default=None,
help='Camera parameter file for converting 3D pose predictions from '
' the camera space to to world space. If None, no conversion will be '
'applied.')
parser.add_argument(
'--only-second-stage',
action='store_true',
help='If true, load 2D pose detection result from the Json file and '
'skip the 1st stage. The pose detection model will be ignored.')
parser.add_argument(
'--rebase-keypoint-height',
action='store_true',
help='Rebase the predicted 3D pose so its lowest keypoint has a '
'height of 0 (landing on the ground). This is useful for '
'visualization when the model do not predict the global position '
'of the 3D pose.')
parser.add_argument(
'--show-ground-truth',
action='store_true',
help='If True, show ground truth if it is available. The ground truth '
'should be contained in the annotations in the Json file with the key '
'"keypoints_3d" for each instance.')
parser.add_argument(
'--show',
action='store_true',
default=False,
help='whether to show img')
parser.add_argument(
'--out-img-root',
type=str,
default=None,
help='Root of the output visualization images. '
'Default not saving the visualization images.')
parser.add_argument(
'--device', default='cuda:0', help='Device for inference')
parser.add_argument('--kpt-thr', type=float, default=0.3)
parser.add_argument(
'--radius',
type=int,
default=4,
help='Keypoint radius for visualization')
parser.add_argument(
'--thickness',
type=int,
default=1,
help='Link thickness for visualization')
args = parser.parse_args()
assert args.show or (args.out_img_root != '')
coco = COCO(args.json_file)
# First stage: 2D pose detection
pose_det_results_list = []
if args.only_second_stage:
from mmpose.apis.inference import _xywh2xyxy
print('Stage 1: load 2D pose results from Json file.')
for image_id, image in coco.imgs.items():
image_name = osp.join(args.img_root, image['file_name'])
ann_ids = coco.getAnnIds(image_id)
pose_det_results = []
for ann_id in ann_ids:
ann = coco.anns[ann_id]
keypoints = np.array(ann['keypoints']).reshape(-1, 3)
keypoints[..., 2] = keypoints[..., 2] >= 1
keypoints_3d = np.array(ann['keypoints_3d']).reshape(-1, 4)
keypoints_3d[..., 3] = keypoints_3d[..., 3] >= 1
bbox = np.array(ann['bbox']).reshape(1, -1)
pose_det_result = {
'image_name': image_name,
'bbox': _xywh2xyxy(bbox),
'keypoints': keypoints,
'keypoints_3d': keypoints_3d
}
pose_det_results.append(pose_det_result)
pose_det_results_list.append(pose_det_results)
else:
print('Stage 1: 2D pose detection.')
pose_det_model = init_pose_model(
args.pose_detector_config,
args.pose_detector_checkpoint,
device=args.device.lower())
assert pose_det_model.cfg.model.type == 'TopDown', 'Only "TopDown"' \
'model is supported for the 1st stage (2D pose detection)'
dataset = pose_det_model.cfg.data['test']['type']
img_keys = list(coco.imgs.keys())
for i in mmcv.track_iter_progress(range(len(img_keys))):
# get bounding box annotations
image_id = img_keys[i]
image = coco.loadImgs(image_id)[0]
image_name = osp.join(args.img_root, image['file_name'])
ann_ids = coco.getAnnIds(image_id)
# make person results for single image
person_results = []
for ann_id in ann_ids:
person = {}
ann = coco.anns[ann_id]
person['bbox'] = ann['bbox']
person_results.append(person)
pose_det_results, _ = inference_top_down_pose_model(
pose_det_model,
image_name,
person_results,
bbox_thr=None,
format='xywh',
dataset=dataset,
return_heatmap=False,
outputs=None)
for res in pose_det_results:
res['image_name'] = image_name
pose_det_results_list.append(pose_det_results)
# Second stage: Pose lifting
print('Stage 2: 2D-to-3D pose lifting.')
pose_lift_model = init_pose_model(
args.pose_lifter_config,
args.pose_lifter_checkpoint,
device=args.device.lower())
assert pose_lift_model.cfg.model.type == 'PoseLifter', 'Only' \
'"PoseLifter" model is supported for the 2nd stage ' \
'(2D-to-3D lifting)'
dataset = pose_lift_model.cfg.data['test']['type']
camera_params = None
if args.camera_param_file is not None:
camera_params = mmcv.load(args.camera_param_file)
for i, pose_det_results in enumerate(
mmcv.track_iter_progress(pose_det_results_list)):
# 2D-to-3D pose lifting
# Note that the pose_det_results are regarded as a single-frame pose
# sequence
pose_lift_results = inference_pose_lifter_model(
pose_lift_model,
pose_results_2d=[pose_det_results],
dataset=dataset,
with_track_id=False)
image_name = pose_det_results[0]['image_name']
# Pose processing
pose_lift_results_vis = []
for idx, res in enumerate(pose_lift_results):
keypoints_3d = res['keypoints_3d']
# project to world space
if camera_params is not None:
keypoints_3d = _keypoint_camera_to_world(
keypoints_3d,
camera_params=camera_params,
image_name=image_name,
dataset=dataset)
# rebase height (z-axis)
if args.rebase_keypoint_height:
keypoints_3d[..., 2] -= np.min(
keypoints_3d[..., 2], axis=-1, keepdims=True)
res['keypoints_3d'] = keypoints_3d
# Add title
det_res = pose_det_results[idx]
instance_id = det_res.get('track_id', idx)
res['title'] = f'Prediction ({instance_id})'
pose_lift_results_vis.append(res)
# Add ground truth
if args.show_ground_truth:
if 'keypoints_3d' not in det_res:
print('Fail to show ground truth. Please make sure that'
' the instance annotations from the Json file'
' contain "keypoints_3d".')
else:
gt = res.copy()
gt['keypoints_3d'] = det_res['keypoints_3d']
gt['title'] = f'Ground truth ({instance_id})'
pose_lift_results_vis.append(gt)
# Visualization
if args.out_img_root is None:
out_file = None
else:
os.makedirs(args.out_img_root, exist_ok=True)
out_file = osp.join(args.out_img_root, f'vis_{i}.jpg')
vis_3d_pose_result(
pose_lift_model,
result=pose_lift_results_vis,
img=image_name,
out_file=out_file)
if __name__ == '__main__':
main()
|
{"hexsha": "730622c3f850fe3e700ed8a94f3e5c67de944aed", "size": 9999, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo/body3d_two_stage_img_demo.py", "max_stars_repo_name": "jlgzb/mmpose", "max_stars_repo_head_hexsha": "0ecf06e3580f141f6ab44645768a0d6d8ba48383", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-10-02T08:21:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-01T07:25:24.000Z", "max_issues_repo_path": "demo/body3d_two_stage_img_demo.py", "max_issues_repo_name": "jlgzb/mmpose", "max_issues_repo_head_hexsha": "0ecf06e3580f141f6ab44645768a0d6d8ba48383", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo/body3d_two_stage_img_demo.py", "max_forks_repo_name": "jlgzb/mmpose", "max_forks_repo_head_hexsha": "0ecf06e3580f141f6ab44645768a0d6d8ba48383", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6263736264, "max_line_length": 79, "alphanum_fraction": 0.5878587859, "include": true, "reason": "import numpy", "num_tokens": 2214}
|
\section{\module{hmac} ---
Keyed-Hashing for Message Authentication}
\declaremodule{standard}{hmac}
\modulesynopsis{Keyed-Hashing for Message Authentication (HMAC)
implementation for Python.}
\moduleauthor{Gerhard H{\"a}ring}{ghaering@users.sourceforge.net}
\sectionauthor{Gerhard H{\"a}ring}{ghaering@users.sourceforge.net}
\versionadded{2.2}
This module implements the HMAC algorithm as described by \rfc{2104}.
\begin{funcdesc}{new}{key\optional{, msg\optional{, digestmod}}}
Return a new hmac object. If \var{msg} is present, the method call
\code{update(\var{msg})} is made. \var{digestmod} is the digest
module for the HMAC object to use. It defaults to the
\refmodule{md5} module.
\end{funcdesc}
An HMAC object has the following methods:
\begin{methoddesc}[hmac]{update}{msg}
Update the hmac object with the string \var{msg}. Repeated calls
are equivalent to a single call with the concatenation of all the
arguments: \code{m.update(a); m.update(b)} is equivalent to
\code{m.update(a + b)}.
\end{methoddesc}
\begin{methoddesc}[hmac]{digest}{}
Return the digest of the strings passed to the \method{update()}
method so far. This is a 16-byte string (for \refmodule{md5}) or a
20-byte string (for \refmodule{sha}) which may contain non-\ASCII{}
characters, including NUL bytes.
\end{methoddesc}
\begin{methoddesc}[hmac]{hexdigest}{}
Like \method{digest()} except the digest is returned as a string of
length 32 for \refmodule{md5} (40 for \refmodule{sha}), containing
only hexadecimal digits. This may be used to exchange the value
safely in email or other non-binary environments.
\end{methoddesc}
\begin{methoddesc}[hmac]{copy}{}
Return a copy (``clone'') of the hmac object. This can be used to
efficiently compute the digests of strings that share a common
initial substring.
\end{methoddesc}
|
{"hexsha": "1d49417853ceb34813d8057fc02ef05b2278eac8", "size": 1879, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Doc/lib/libhmac.tex", "max_stars_repo_name": "deadsnakes/python2.4", "max_stars_repo_head_hexsha": "f493d5415b662e99a73d017bcafe2148c5bc8fb5", "max_stars_repo_licenses": ["PSF-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Doc/lib/libhmac.tex", "max_issues_repo_name": "deadsnakes/python2.4", "max_issues_repo_head_hexsha": "f493d5415b662e99a73d017bcafe2148c5bc8fb5", "max_issues_repo_licenses": ["PSF-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Doc/lib/libhmac.tex", "max_forks_repo_name": "deadsnakes/python2.4", "max_forks_repo_head_hexsha": "f493d5415b662e99a73d017bcafe2148c5bc8fb5", "max_forks_repo_licenses": ["PSF-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3469387755, "max_line_length": 69, "alphanum_fraction": 0.7339010112, "num_tokens": 526}
|
#!/usr/bin/env python3
# bagus@ep.its.ac.id,
# changelog:
# 2019-04-16: init code from avec
# 2019-07-02: modify to extract 10039 iemocap data
import numpy as np
import os
import time
import ntpath
import pickle
feature_type = 'egemaps'
exe_opensmile = '~/opensmile-2.3.0/bin/linux_x64_standalone_static/SMILExtract'
path_config = '~/opensmile-2.3.0/config/'
iemocap_path = '/media/bagustris/bagus/dataset/IEMOCAP_full_release/'
with open(iemocap_path+'data_collected_full.pickle', 'rb') as handle:
data = pickle.load(handle)
if feature_type=='mfcc':
folder_output = '../audio_features_mfcc/' # output folder
conf_smileconf = path_config + 'MFCC12_0_D_A.conf' # MFCCs 0-12 with delta and acceleration coefficients
opensmile_options = '-configfile ' + conf_smileconf + ' -appendcsv 0 -timestampcsv 1 -headercsv 1' # options from standard_data_output_lldonly.conf.inc
outputoption = '-csvoutput' # options from standard_data_output_lldonly.conf.inc
elif feature_type=='egemaps':
folder_output = './audio_features_egemaps_10039/' # output folder
conf_smileconf = path_config + 'gemaps/eGeMAPSv01a.conf' # eGeMAPS feature set
opensmile_options = '-configfile ' + conf_smileconf + ' -appendcsvlld 0 -timestampcsvlld 1 -headercsvlld 1' # options from standard_data_output.conf.inc
outputoption = '-lldcsvoutput' # options from standard_data_output.conf.inc
else:
print('Error: Feature type ' + feature_type + ' unknown!')
if not os.path.exists(folder_output):
os.mkdir(folder_output)
listfile = [id['id'] for id in data]
for fn in listfile:
filename = iemocap_path+'Session'+fn[4]+'/sentences/wav/'+fn[:-5]+'/'+fn+'.wav'
instname = fn #os.path.splitext(filename)[0]
outfilename = folder_output + instname + '.csv'
opensmile_call = exe_opensmile + ' ' + opensmile_options + ' -inputfile ' + filename + ' ' + outputoption + ' ' + outfilename + ' -instname ' + instname + ' -output ?' # (disabling htk output
os.system(opensmile_call)
time.sleep(0.01)
os.remove('smile.log')
|
{"hexsha": "7a11d41cac91c0bb07d08d9d442bce352c67fc54", "size": 2096, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/extract_feature/save_egemaps_10039-pc.py", "max_stars_repo_name": "bagustris/dimensional-ser", "max_stars_repo_head_hexsha": "ce9bfae1d962b3581dd7022e4f145429615e2771", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-07-09T08:31:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T10:23:54.000Z", "max_issues_repo_path": "code/extract_feature/save_egemaps_10039-pc.py", "max_issues_repo_name": "bagustris/dimensional-ser", "max_issues_repo_head_hexsha": "ce9bfae1d962b3581dd7022e4f145429615e2771", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2020-01-28T23:16:11.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-11T00:28:10.000Z", "max_forks_repo_path": "code/extract_feature/save_egemaps_10039-pc.py", "max_forks_repo_name": "bagustris/dimensional-ser", "max_forks_repo_head_hexsha": "ce9bfae1d962b3581dd7022e4f145429615e2771", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-23T01:22:33.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-23T01:22:33.000Z", "avg_line_length": 42.7755102041, "max_line_length": 196, "alphanum_fraction": 0.7089694656, "include": true, "reason": "import numpy", "num_tokens": 610}
|
from __future__ import division
import itertools
import logging
import numpy as np
import time
import cv2 as cv
import sys
import tqdm
from scipy import stats
def exhaustive_search_block_matching(reference_img, search_img, block_size=16, max_search_range=16, norm='l1',
verbose=False):
logger = logging.getLogger(__name__)
norm_options = {'l1', 'l2'}
start_time = time.time()
height = reference_img.shape[0]
width = reference_img.shape[1]
# Assertions
assert block_size > 0, 'Block size should be bigger than 0 pixels'
assert max_search_range > 0, 'Max search range should be bigger than 0 pixels'
assert norm in norm_options, '{} norm not supported. Choose one of {}'.format(norm, norm_options)
# Pad reference image to have dimensions multiple of block size
pad_ref_height = int(block_size * np.ceil(height / block_size))
pad_ref_width = int(block_size * np.ceil(width / block_size))
pad_y = pad_ref_height - height
pad_x = pad_ref_width - width
pad_reference_img = np.pad(reference_img, ((0, pad_y), (0, pad_x)), mode='constant')
# Placeholder for predicted frame and optical flow
pad_predicted_frame = np.empty_like(pad_reference_img, dtype=np.uint8)
num_blocks_width, num_blocks_height = int(pad_ref_width / block_size), int(pad_ref_height / block_size)
optical_flow = np.zeros((num_blocks_height, num_blocks_width, 2))
total_number_blocks = num_blocks_width * num_blocks_height
# Loop through every NxN block in the target image
for (block_row, block_col) in tqdm.tqdm(itertools.product(
range(0, pad_ref_height - (block_size - 1), block_size),
range(0, pad_ref_width - (block_size - 1), block_size)
), desc='Exhaustive Block Matching progress', total=total_number_blocks, file=sys.stdout):
# Current block in the reference image
block = pad_reference_img[block_row:block_row + block_size, block_col:block_col + block_size]
# Placeholders for minimum norm and matching block
dfd_n_min = np.infty
matching_block = np.zeros((block_size, block_size))
# Search in a surronding region, determined by search_range
search_range = range(-max_search_range, block_size + max_search_range)
for (search_col, search_row) in itertools.product(search_range, search_range):
# Up left corner of the candidate block
up_left_y = block_row + search_row
up_left_x = block_col + search_col
# Bottom right corner of the candidate block
bottom_right_y = block_row + search_row + block_size - 1
bottom_right_x = block_col + search_col + block_size - 1
# Do not search if upper left corner is defined outside the reference image
if up_left_y < 0 or up_left_x < 0:
continue
# Do not search if bottom right corner is defined outside the reference image
if bottom_right_y >= height or bottom_right_x >= width:
continue
# Get the candidate block
candidate_block = search_img[up_left_y:bottom_right_y + 1, up_left_x:bottom_right_x + 1]
assert candidate_block.shape == (block_size, block_size)
# Compute the Displaced Frame Difference (DFD) and compute the specified norm
dfd = np.array(candidate_block, dtype=np.float32) - np.array(block, dtype=np.float32)
norm_order = 2 if norm == 'l2' else 1
candidate_dfd_norm = np.linalg.norm(dfd, ord=norm_order)
# Store the minimum norm and corresponding displacement vector
if candidate_dfd_norm < dfd_n_min:
dfd_n_min = candidate_dfd_norm
matching_block = candidate_block
dy = search_col
dx = search_row
# construct the predicted image with the block that matches this block
pad_predicted_frame[block_row:block_row + block_size, block_col:block_col + block_size] = matching_block
if verbose:
logger.info(
"Block [{blk_row}, {blk_col}] out of [{total_blks_rows}, {total_blks_cols}] --> "
"Displacement: ({dx}, {dy})\t Minimum DFD norm: {norm}".format(
blk_row=block_row // block_size,
blk_col=block_col // block_size,
total_blks_rows=num_blocks_height,
total_blks_cols=num_blocks_width,
dx=dx,
dy=dy,
norm=dfd_n_min,
)
)
# Store displacement of this block in each direction
optical_flow[block_row // block_size, block_col // block_size, 0] = dx
optical_flow[block_row // block_size, block_col // block_size, 1] = dy
# Create dense optical flow to match input image dimensions by repeating values
dense_optical_flow = np.repeat(optical_flow, block_size, axis=0)
dense_optical_flow = np.repeat(dense_optical_flow, block_size, axis=1)
end_time = time.time()
total_time = end_time - start_time
logger.info('Total time: {:.2f} s\tTime per block: {:.2f} s'.format(
total_time, total_time / (num_blocks_height * num_blocks_width)
))
# Crop results to match real input dimensions
if pad_y != 0 and pad_x != 0:
predicted_frame = pad_predicted_frame[:-pad_y, :-pad_x]
dense_optical_flow = dense_optical_flow[:-pad_y, :-pad_x]
elif pad_y != 0:
predicted_frame = pad_predicted_frame[:-pad_y, :]
dense_optical_flow = dense_optical_flow[:-pad_y, :]
elif pad_x != 0:
predicted_frame = pad_predicted_frame[:, :-pad_x]
dense_optical_flow = dense_optical_flow[:, :-pad_x]
else:
predicted_frame = pad_predicted_frame
dense_optical_flow = dense_optical_flow
return predicted_frame, optical_flow, dense_optical_flow, total_time
def opencv_optflow(ref_img_data, search_img_data, block_size):
farneback_params = {
'pyr_scale': 0.5,
'levels': 3,
'winsize': block_size,
'iterations': 3,
'poly_n': 5,
'poly_sigma': 1.2,
'flags': cv.OPTFLOW_USE_INITIAL_FLOW
}
if '3' in cv.__version__:
dense_flow = cv.calcOpticalFlowFarneback(ref_img_data, search_img_data, None, **farneback_params)
elif '2.4' in cv.__version__:
dense_flow = cv.calcOpticalFlowFarneback(ref_img_data, search_img_data, **farneback_params)
else:
dense_flow = None
return dense_flow
def video_stabilization(image, flow, optical_flow_mode, strategy, area_search, acc_direction, previous_direction,
running_avg_weight, **kwargs):
# Unpack directions
previous_u, previous_v = previous_direction
acc_u, acc_v = acc_direction
# Find compensation optical_flow_mode
if strategy == 'max':
xedges = np.arange(-area_search, area_search + 2)
yedges = np.arange(-area_search, area_search + 2)
flow_hist2d, _, _ = np.histogram2d(
np.ravel(flow[:, :, 0]), np.ravel(flow[:, :, 1]), bins=(xedges, yedges)
)
flow_hist2d = flow_hist2d.T
max_pos = np.argwhere(flow_hist2d == flow_hist2d.max())
y_max_pos = max_pos[0, 0]
x_max_pos = max_pos[0, 1]
u_compensate = xedges[x_max_pos]
v_compensate = yedges[y_max_pos]
elif strategy == 'trimmed_mean':
# Use the mean optical_flow_mode to compensate
u_compensate = int(np.round(stats.trim_mean(flow[:, :, 0], 0.2, axis=None)))
v_compensate = int(np.round(stats.trim_mean(flow[:, :, 1], 0.2, axis=None)))
elif strategy == 'background_blocks':
center_positions = kwargs['center_positions']
neighborhood = kwargs['neighborhood']
u_compensate = 0
v_compensate = 0
for center_i, center_j in center_positions:
u_compensate_vals = flow[center_i-neighborhood:center_i+neighborhood,
center_j-neighborhood:center_j+neighborhood, 0]
v_compensate_vals = flow[center_i-neighborhood:center_i+neighborhood,
center_j-neighborhood:center_j+neighborhood, 1]
u_compensate += np.mean(u_compensate_vals)
v_compensate += np.mean(v_compensate_vals)
u_compensate /= len(center_positions)
v_compensate /= len(center_positions)
else:
raise ValueError('Strategy {!r} not supported. Use one of: [max, trimmed_mean, background_blocks]'.format(
strategy
))
print('Displacement (before running avg.): (%s,%s)' % (u_compensate, v_compensate))
# Compute a running average
u_compensate = running_avg_weight * previous_u + (1 - running_avg_weight) * u_compensate
v_compensate = running_avg_weight * previous_v + (1 - running_avg_weight) * v_compensate
print('Displacement (after running avg.): (%s,%s)' % (u_compensate, v_compensate))
if optical_flow_mode == 'forward':
acc_u = int(acc_u - u_compensate)
acc_v = int(acc_v - v_compensate)
else:
acc_u = int(acc_u + u_compensate)
acc_v = int(acc_v + v_compensate)
print('Accumulated displacement: (%s,%s)' % (acc_u, acc_v))
rect_image = np.zeros(image.shape)
if acc_u == 0 and acc_v == 0:
rect_image = image
elif acc_u == 0:
if acc_v > 0:
rect_image[:, acc_v:, :] = image[:, :-acc_v, :]
else:
rect_image[:, :acc_v, :] = image[:, -acc_v:, :]
elif acc_v == 0:
if acc_u > 0:
rect_image[acc_u:, :, :] = image[:-acc_u, :, :]
else:
rect_image[:acc_u, :, :] = image[-acc_u:, :, :]
elif acc_u > 0:
if acc_v > 0:
rect_image[acc_u:, acc_v:, :] = image[:-acc_u, :-acc_v, :]
else:
rect_image[acc_u:, :acc_v, :] = image[:-acc_u, -acc_v:, :]
else:
if acc_v > 0:
rect_image[:acc_u, acc_v:, :] = image[-acc_u:, :-acc_v, :]
else:
rect_image[:acc_u, :acc_v, :] = image[-acc_u:, -acc_v:, :]
return rect_image, (acc_u, acc_v), (u_compensate, v_compensate)
def video_stabilization_sota(prev_gray, cur_gray, prev_to_cur_transform, prev_corner):
cur_corner, status, err = cv.calcOpticalFlowPyrLK(prev_gray, cur_gray, prev_corner, None)
# storage for keypoints with status 1
prev_corner2 = []
cur_corner2 = []
for i, st in enumerate(status):
# if keypoint found in frame i & i-1
if st == 1:
# store coords of keypoints that appear in both
prev_corner2.append(prev_corner[i])
cur_corner2.append(cur_corner[i])
prev_corner2 = np.array(prev_corner2)
prev_corner2 = np.array(prev_corner2)
cur_corner2 = np.array(cur_corner2)
# estimate partial transform (resource: http://nghiaho.com/?p=2208)
T_new = cv.estimateRigidTransform(prev_corner2, cur_corner2, False)
if T_new is not None:
T = T_new
# translation x
dx = T[0, 2]
# translation y
dy = T[1, 2]
# rotation
da = np.arctan2(T[1, 0], T[0, 0])
# store for saving to disk as table
prev_to_cur_transform.append([dx, dy, da])
return prev_to_cur_transform
def read_flo_flow(name):
flow = None
with open(name, 'rb') as f:
header = f.read(4)
if header.decode("utf-8") != 'PIEH':
raise Exception('Flow file header does not contain PIEH')
width = np.fromfile(f, np.int32, 1).squeeze()
height = np.fromfile(f, np.int32, 1).squeeze()
flow = np.fromfile(f, np.float32, width * height * 2).reshape((height, width, 2))
return flow.astype(np.float32)
def read_kitti_flow(img):
# BGR -> RGB
img = img[:, :, ::-1]
optical_flow = img[:, :, :2].astype(float)
optical_flow -= 2 ** 15
optical_flow /= 64.0
valid_pixels = img[:, :, 2] == 1.0
return optical_flow, valid_pixels
|
{"hexsha": "d49ff39603ca028cce74d8e8eed29aed89f2925d", "size": 12021, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/optical_flow.py", "max_stars_repo_name": "mcv-m6-video/mcv-m6-2018-team5", "max_stars_repo_head_hexsha": "bb7ee72a06ce021bab751c8e8773ec128170b524", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tools/optical_flow.py", "max_issues_repo_name": "mcv-m6-video/mcv-m6-2018-team5", "max_issues_repo_head_hexsha": "bb7ee72a06ce021bab751c8e8773ec128170b524", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/optical_flow.py", "max_forks_repo_name": "mcv-m6-video/mcv-m6-2018-team5", "max_forks_repo_head_hexsha": "bb7ee72a06ce021bab751c8e8773ec128170b524", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9368770764, "max_line_length": 114, "alphanum_fraction": 0.6373013892, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3045}
|
using Cairo
using Rsvg
abstract type AbstractGraphics <: AbstractResource end
mutable struct Texture{T} <: AbstractGraphics
ptr::Ptr{T}
width::Int
height::Int
center_x::Int
center_y::Int
end
function Texture(render_ptr::Ptr{SDL.Renderer}, sdl_surface::Ptr{SDL.Surface}; halign = 0.5, valign = 0.5)
texture_ptr = SDL.CreateTextureFromSurface(render_ptr, sdl_surface)
SDL.FreeSurface(sdl_surface)
width, height = Int[0], Int[0]
SDL.QueryTexture(texture_ptr, C_NULL, C_NULL, width, height)
self = Texture(texture_ptr, width[], height[], round(Int, width[]*halign), round(Int, height[]*valign))
finalizer(destroy!, self)
return self
end
function Texture(resources::Resources, filename::String; width::Int = -1, height::Int = -1, scale::Real = 1.0)
if filename in keys(resources)
@debug("resource already loaded: '$filename'")
return resources[filename]::Texture
end
ptr, width, height = load(query(filename), resources, width, height, scale)
self = Texture(ptr, width, height, round(Int, width/2), round(Int, height/2))
finalizer(destroy!, self)
resources[filename] = self
return self
end
function load(f::File{format"SVG"}, resources::Resources, width::Int, height::Int, scale::Real)
rsvg_handle = Rsvg.handle_new_from_file(f.filename)
Int(rsvg_handle.ptr) == 0 && error("'$(f.filename)' is not a valid SVG file")
rsvg_dim = Rsvg.handle_get_dimensions(rsvg_handle)
width = ceil(Int, scale*(width >= 0 ? width : rsvg_dim.width))
height = ceil(Int, scale*(height >= 0 ? height : rsvg_dim.height))
cairo_surface = Cairo.CairoImageSurface(fill(UInt32(0), (height, width)), Cairo.FORMAT_ARGB32)
cairo_context = Cairo.CairoContext(cairo_surface)
scale_x, scale_y = width / rsvg_dim.width, height / rsvg_dim.height
Cairo.scale(cairo_context, scale_x, scale_y)
Rsvg.handle_render_cairo(cairo_context, rsvg_handle)
Cairo.destroy(cairo_context)
width, height = ceil(Int, cairo_surface.width), ceil(Int, cairo_surface.height)
sdl_surface = SDL.CreateRGBSurfaceFrom(pointer(cairo_surface.data),
width, height, 32, Int32(width*4),
0x00_ff_00_00, 0x00_00_ff_00, 0x00_00_00_ff, 0xff_00_00_00)
texture_ptr = SDL.CreateTextureFromSurface(resources.render_ptr, sdl_surface)
SDL.FreeSurface(sdl_surface)
Cairo.destroy(cairo_surface)
if texture_ptr == C_NULL
error("Failed to load texture from: '$(f.filename)'")
end
return texture_ptr, width, height
end
function destroy!(texture::Texture{SDL.Texture})
SDL.DestroyTexture(texture.ptr)
texture.ptr = C_NULL
return nothing
end
|
{"hexsha": "a85cdfcdfbb8f1e739c0567989734607b85d1f59", "size": 2740, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/texture.jl", "max_stars_repo_name": "freemin7/Gloria.jl", "max_stars_repo_head_hexsha": "35ab01e48c19e414320b2bb2800a9cbd57e86075", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 42, "max_stars_repo_stars_event_min_datetime": "2018-09-01T11:30:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T22:13:02.000Z", "max_issues_repo_path": "src/texture.jl", "max_issues_repo_name": "freemin7/Gloria.jl", "max_issues_repo_head_hexsha": "35ab01e48c19e414320b2bb2800a9cbd57e86075", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2019-12-07T19:55:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-22T18:29:43.000Z", "max_forks_repo_path": "src/texture.jl", "max_forks_repo_name": "freemin7/Gloria.jl", "max_forks_repo_head_hexsha": "35ab01e48c19e414320b2bb2800a9cbd57e86075", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-09-07T10:15:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-16T10:02:59.000Z", "avg_line_length": 37.5342465753, "max_line_length": 110, "alphanum_fraction": 0.6919708029, "num_tokens": 713}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the Kramers-Kronig Calculator software package.
#
# Copyright (c) 2013 Benjamin Watts, Daniel J. Lauk
#
# The software is licensed under the terms of the zlib/libpng license.
# For details see LICENSE.txt
import kkcalc as kk
import numpy as np
import matplotlib.pyplot as plt
filename = 'Fe.cf' # full path and filename
chemical_formula = 'Fe'
x_min = 690
x_max = 740
output = kk.kk_calculate_real(filename,
chemical_formula,
load_options=None,
input_data_type='Beta',
merge_points=[x_min, x_max],
add_background=False,
fix_distortions=False,
curve_tolerance=None,
curve_recursion=50)
Stoichiometry = kk.data.ParseChemicalFormula(chemical_formula)
ASF_E, ASF_Data = kk.data.calculate_asf(Stoichiometry)
ASF_Data2 = kk.data.coeffs_to_ASF(ASF_E, np.vstack((ASF_Data, ASF_Data[-1])))
plt.figure()
plt.plot(output[:, 0], output[:, 1], label='f1 kkcalc')
plt.plot(output[:, 0], output[:, 2], label='f2 kkcalc')
plt.plot(ASF_E, ASF_Data2, label='Henke f2')
plt.legend()
plt.xlim(x_min, x_max)
plt.title('{:d} eV - {:d} eV'.format(x_min, x_max))
plt.xlabel('Energy [eV]')
plt.ylabel('f1, f2')
plt.show()
|
{"hexsha": "66493732f5f10b7673a567dc6ee63eb7118b7ba5", "size": 1414, "ext": "py", "lang": "Python", "max_stars_repo_path": "example/example.py", "max_stars_repo_name": "dschick/kkcalc", "max_stars_repo_head_hexsha": "9218d557fb3217ff1339dcc81230380e2cc0059b", "max_stars_repo_licenses": ["Zlib"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example/example.py", "max_issues_repo_name": "dschick/kkcalc", "max_issues_repo_head_hexsha": "9218d557fb3217ff1339dcc81230380e2cc0059b", "max_issues_repo_licenses": ["Zlib"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example/example.py", "max_forks_repo_name": "dschick/kkcalc", "max_forks_repo_head_hexsha": "9218d557fb3217ff1339dcc81230380e2cc0059b", "max_forks_repo_licenses": ["Zlib"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4222222222, "max_line_length": 77, "alphanum_fraction": 0.6110325318, "include": true, "reason": "import numpy", "num_tokens": 350}
|
#!/usr/bin/env python
'''Testing for tracking.py
@author: Zach Hafen
@contact: zachary.h.hafen@gmail.com
@status: Development
'''
import copy
import mock
import numpy as np
import numpy.testing as npt
import pytest
import unittest
import unyt
import galaxy_dive.read_data.ahf as read_ahf
import galaxy_dive.analyze_data.halo_data as analyze_halos
import galaxy_dive.galaxy_linker.linker as general_galaxy_linker
########################################################################
# Useful global test variables
########################################################################
gal_linker_kwargs = {
'length_scale': 'Rvir',
'redshift': 0.16946003,
'snum': 500,
'hubble': 0.70199999999999996,
'halo_data_dir': './tests/data/analysis_dir3',
'mtree_halos_index': 600,
'main_mt_halo_id': 0,
'halo_file_tag': 'smooth',
'galaxy_cut': 0.1,
'ids_to_return': [ 'halo_id', 'host_halo_id', 'gal_id', 'host_gal_id',
'mt_halo_id', 'mt_gal_id', 'd_gal',
'd_other_gal_scaled', '0.1_Rvir', ],
'minimum_criteria': 'n_star',
'minimum_value': 0,
'low_memory_mode': False,
}
########################################################################
class TestGalaxyLinker( unittest.TestCase ):
def setUp( self ):
# Get input data
comoving_halo_coords = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812],
[ 31926.42103071, 51444.46756529, 1970.1967437 ] ])
self.redshift = gal_linker_kwargs['redshift']
self.hubble = gal_linker_kwargs['hubble']
halo_coords = comoving_halo_coords / (1. + self.redshift) / self.hubble
# Make the necessary kwargs
self.kwargs = gal_linker_kwargs
self.galaxy_linker = general_galaxy_linker.GalaxyLinker(
halo_coords, **self.kwargs )
# Get the necessary reader.
self.galaxy_linker.halo_data.data_reader = read_ahf.AHFReader(
self.kwargs['halo_data_dir'] )
# Get the full needed ahf info.
self.galaxy_linker.halo_data.data_reader.get_halos( 500 )
########################################################################
@mock.patch( 'galaxy_dive.analyze_data.halo_data.HaloData.get_data' )
def test_valid_halo_inds( self, mock_get_halo_data ):
# Make sure we actually have a minimum
self.galaxy_linker.minimum_value = 10
# Mock the halo data
mock_get_halo_data.side_effect = [ np.array( [ 100, 5, 10, 0 ] ), ]
actual = self.galaxy_linker.valid_halo_inds
expected = np.array( [ 0, 2, ] )
npt.assert_allclose( expected, actual )
########################################################################
def test_dist_to_all_valid_halos( self ):
'''Test that this works for using r_scale.'''
self.galaxy_linker.particle_positions = np.array([
# Right in the middle of mt halo 0 at snap 500
[ 29414.96458784, 30856.75007114, 32325.90901812],
# Just outside the scale radius of mt halo 0 at snap 500.
[ 29414.96458784 + 50., 30856.75007114, 32325.90901812],
# Just inside the scale radius of mt halo 0 at snap 500.
[ 29414.96458784, 30856.75007114 - 25., 32325.90901812],
])
self.galaxy_linker.particle_positions *= 1. / (1. + self.redshift) / \
self.hubble
self.galaxy_linker.n_particles = 3
actual = self.galaxy_linker.dist_to_all_valid_halos
# Build the expected output
n_halos = self.galaxy_linker.halo_data.data_reader.halos.index.size
n_particles = self.galaxy_linker.n_particles
expected_shape = ( n_particles, n_halos )
npt.assert_allclose( actual[ 0, 0 ], 0., atol=1e-7 )
npt.assert_allclose( actual[ 1, 0 ], 50. * 1. / (1. + self.redshift) /
self.hubble )
npt.assert_allclose( actual[ 2, 0 ], 25. * 1. / (1. + self.redshift) /
self.hubble )
self.assertEqual( actual.shape, expected_shape )
########################################################################
def test_dist_to_all_valid_halos_single_particle( self ):
'''Test that we can get the distance to all valid halos,
formatted correctly, even for a single particle.
'''
# Setup test data.
particle_positions = np.array( [
[ 29414.96458784, 30856.75007114, 32325.90901812 ]
] ) / ( ( 1. + self.redshift) * self.hubble )
actual = self.galaxy_linker.dist_to_all_valid_halos_fn(
particle_positions )
# Build the expected output
n_halos = self.galaxy_linker.halo_data.data_reader.halos.index.size
n_particles = 1
expected_shape = ( n_particles, n_halos )
npt.assert_allclose( actual[ 0, 0, ], 0., atol=1e-7 )
self.assertEqual( actual.shape, expected_shape )
########################################################################
def test_find_containing_halos( self ):
result = self.galaxy_linker.find_containing_halos()
# If none of the distances are within any of the halos,
# we have a problem.
assert result.sum() > 0
########################################################################
def test_find_containing_halos_strict( self ):
'''Here I'll restrict the fraction to a very small fraction of the
virial radius, such that the sum of the results should be two.
'''
result = self.galaxy_linker.find_containing_halos( 0.0001 )
# If none of the distances are within any of the halos,
# we have a problem.
npt.assert_allclose( 2, result.sum() )
########################################################################
def test_find_containing_halos_r_scale( self ):
'''Test that this works for using r_scale.'''
# Set the length scale
self.galaxy_linker.galaxy_cut = 1.
self.galaxy_linker.length_scale = 'r_scale'
r_scale_500 = 21.113602882685832
self.galaxy_linker.particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812], # Right in the middle of mt halo 0 at snap 500
[ 29414.96458784 + r_scale_500*1.01, 30856.75007114, 32325.90901812], # Just outside the scale radius of mt halo 0 at snap 500.
[ 29414.96458784 + r_scale_500*0.99, 30856.75007114, 32325.90901812], # Just inside the scale radius of mt halo 0 at snap 500.
])
self.galaxy_linker.particle_positions *= 1./(1. + self.redshift)/self.hubble
self.galaxy_linker.n_particles = 3
actual = self.galaxy_linker.find_containing_halos( 1. )
# Build the expected output
n_halos = self.galaxy_linker.halo_data.data_reader.halos.index.size
expected = np.zeros( (self.galaxy_linker.particle_positions.shape[0], n_halos) ).astype( bool )
expected[ 0, 0 ] = True
expected[ 1, 0 ] = False
expected[ 2, 0 ] = True
npt.assert_allclose( actual, expected )
########################################################################
def test_find_containing_halos_nan_particle( self ):
# Anywhere the particle data has NaN values, we want that to read as False
self.galaxy_linker.particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812], # Right in the middle of mt halo 0 at snap 500
[ np.nan, np.nan, np.nan ], # Invalid values, because a particle with that ID didn't exist
])
self.galaxy_linker.particle_positions *= 1./(1. + self.redshift)/self.hubble
self.galaxy_linker.n_particles = 2
actual = self.galaxy_linker.find_containing_halos()
n_halos = self.galaxy_linker.halo_data.data_reader.halos.index.size
expected = np.zeros( (self.galaxy_linker.particle_positions.shape[0], n_halos) ).astype( bool )
expected[ 0, 0 ] = True
npt.assert_allclose( actual, expected )
########################################################################
def test_find_mt_containing_halos( self ):
self.galaxy_linker.particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812], # Right in the middle of mt halo 0 at snap 500
[ 29467.07226789, 30788.6179313 , 32371.38749237], # Right in the middle of mt halo 9 at snap 500.
# mt halo 9 is 0.5 Rvir_mt_0 (2 Rvir_mt_9) away from the center of mt halo 0
[ 29073.22333685, 31847.72434505, 32283.53620817], # Right in the middle of mt halo 19 at snap 500.
])
self.galaxy_linker.particle_positions *= 1./(1. + self.redshift)/self.hubble
actual = self.galaxy_linker.find_mt_containing_halos( 2.5 )
# Build the expected output
expected = np.zeros( (self.galaxy_linker.particle_positions.shape[0], 6) ).astype( bool )
expected[ 0, 0 ] = True
expected[ 0, -2 ] = True
expected[ 1, 0 ] = True
expected[ 1, -2 ] = True
expected[ 2, -1 ] = True
npt.assert_allclose( actual, expected )
########################################################################
def test_find_mt_containing_halos_r_scale( self ):
'''Test that this works for using r_scale.'''
# Set the length scale
self.galaxy_linker.galaxy_cut = 1.
self.galaxy_linker.mt_length_scale = 'r_scale'
r_scale_500 = 21.113602882685832
self.galaxy_linker.particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812], # Right in the middle of mt halo 0 at snap 500
[ 29414.96458784 + r_scale_500*1.01, 30856.75007114, 32325.90901812], # Just outside the scale radius of mt halo 0 at snap 500.
[ 29414.96458784 + r_scale_500*0.99, 30856.75007114, 32325.90901812], # Just inside the scale radius of mt halo 0 at snap 500.
# (It will be. It currently isn't.)
])
self.galaxy_linker.particle_positions *= 1./(1. + self.redshift)/self.hubble
self.galaxy_linker.n_particles = 3
actual = self.galaxy_linker.find_mt_containing_halos( 1. )
# Build the expected output
n_halos = len( self.galaxy_linker.halo_data.data_reader.mtree_halos )
expected = np.zeros( (self.galaxy_linker.particle_positions.shape[0], n_halos) ).astype( bool )
expected[ 0, 0 ] = True
expected[ 1, 0 ] = False
expected[ 2, 0 ] = True
npt.assert_allclose( actual, expected )
########################################################################
def test_find_mt_containing_halos_nan_particles( self ):
'''Test that this works for using r_scale.'''
self.galaxy_linker.particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812], # Right in the middle of mt halo 0 at snap 500
[ np.nan, np.nan, np.nan, ], # Just outside the scale radius of mt halo 0 at snap 500.
])
self.galaxy_linker.particle_positions *= 1./(1. + self.redshift)/self.hubble
self.galaxy_linker.n_particles = 2
actual = self.galaxy_linker.find_mt_containing_halos( 1. )
# Build the expected output
n_halos = len( self.galaxy_linker.halo_data.data_reader.mtree_halos )
expected = np.zeros( (self.galaxy_linker.particle_positions.shape[0], n_halos) ).astype( bool )
expected[ 0, 0 ] = True
npt.assert_allclose( actual, expected )
########################################################################
def test_find_smallest_host_halo( self ):
self.galaxy_linker.particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812],
[ 31926.42103071, 51444.46756529, 1970.1967437 ],
[ 29467.07226789, 30788.6179313 , 32371.38749237],
[ 29459.32290246, 30768.32556725, 32357.26078864], # Halo 3783, host halo 3610
])
self.galaxy_linker.particle_positions *= 1./(1. + self.redshift)/self.hubble
self.galaxy_linker.n_particles = 4
expected = np.array( [0, 6962, 7, 3783] )
actual = self.galaxy_linker.find_halo_id()
npt.assert_allclose( expected, actual )
########################################################################
def test_find_smallest_host_halo_none( self ):
self.galaxy_linker.particle_positions = np.array([
[ 0., 0., 0. ],
[ 0., 0., 0. ],
])
expected = np.array( [-2, -2] )
actual = self.galaxy_linker.find_halo_id()
npt.assert_allclose( expected, actual )
########################################################################
def test_find_host_id( self ):
self.galaxy_linker.particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812], # Halo 0, host halo 0
[ 30068.5541178 , 32596.72758226, 32928.1115097 ], # Halo 10, host halo 1
[ 29459.32290246, 30768.32556725, 32357.26078864], # Halo 3783, host halo 3610
])
self.galaxy_linker.particle_positions *= 1./(1. + self.redshift)/self.hubble
self.galaxy_linker.n_particles = 3
expected = np.array( [-1, 1, 3610] )
actual = self.galaxy_linker.find_host_id()
npt.assert_allclose( expected, actual )
########################################################################
def test_find_host_id_none( self ):
self.galaxy_linker.particle_positions = np.array([
[ 0., 0., 0. ],
[ 0., 0., 0. ],
])
expected = np.array( [-2, -2] )
actual = self.galaxy_linker.find_host_id()
npt.assert_allclose( expected, actual )
########################################################################
def test_find_mt_halo_id( self ):
self.galaxy_linker.particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812], # Right in the middle of mt halo 0 at snap 500
[ 29467.07226789, 30788.6179313 , 32371.38749237], # Right in the middle of mt halo 9 at snap 500.
# mt halo 9 is 0.5 Rvir_mt_0 (2 Rvir_mt_9) away from the center of mt halo 0
[ 29073.22333685, 31847.72434505, 32283.53620817], # Right in the middle of mt halo 19 at snap 500.
[ 0., 0., 0.], # The middle of nowhere.
])
self.galaxy_linker.particle_positions *= 1./(1. + self.redshift)/self.hubble
self.galaxy_linker.n_particles = 4
actual = self.galaxy_linker.find_halo_id( 2.5, 'mt_halo_id' )
# Build the expected output
expected = np.array([ 0, 0, 19, -2 ])
npt.assert_allclose( actual, expected )
########################################################################
def test_find_mt_halo_id_early_universe( self ):
'''Test that, when there are no galaxies formed, we return an mt halo value of -2'''
# Set it to early redshifts
self.galaxy_linker.snum = 0
# It doesn't really matter where the particles are, because there shouldn't be any galaxies anyways....
self.galaxy_linker.particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812], # Right in the middle of mt halo 0 at snap 500
[ 29467.07226789, 30788.6179313 , 32371.38749237], # Right in the middle of mt halo 9 at snap 500.
# mt halo 9 is 0.5 Rvir_mt_0 (2 Rvir_mt_9) away from the center of mt halo 0
[ 29073.22333685, 31847.72434505, 32283.53620817], # Right in the middle of mt halo 19 at snap 500.
[ 0., 0., 0.], # The middle of nowhere.
])
self.galaxy_linker.particle_positions *= 1./(1. + 30.)/self.hubble
self.galaxy_linker.n_particles = 4
actual = self.galaxy_linker.find_halo_id( 2.5, 'mt_halo_id' )
# Build the expected output
expected = np.array([ -2, -2, -2, -2 ])
npt.assert_allclose( actual, expected )
########################################################################
def test_extract_additional_keys( self ):
self.galaxy_linker.particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812],
[ 31926.42103071, 51444.46756529, 1970.1967437 ],
[ 29467.07226789, 30788.6179313 , 32371.38749237],
[ 29459.32290246, 30768.32556725, 32357.26078864], # Halo 3783, host halo 3610
])
self.galaxy_linker.particle_positions *= 1./(1. + self.redshift)/self.hubble
self.galaxy_linker.n_particles = 4
self.galaxy_linker.supplementary_data_keys = [ 'Xc', 'Yc', 'Zc' ]
expected_id, expected = (
np.array( [0, 6962, 7, 3783] ),
{
'Xc': np.array([ 29414.96458784, 31926.42103071, 29467.07226789, 29459.32290246, ]),
'Yc': np.array([ 30856.75007114, 51444.46756529, 30788.6179313 , 30768.32556725, ]),
'Zc': np.array([ 32325.90901812, 1970.1967437, 32371.38749237, 32357.26078864, ]),
},
)
actual_id, actual = self.galaxy_linker.find_halo_id( supplementary_data=True )
assert len( expected.keys() ) == len( actual.keys() )
for key in expected.keys():
npt.assert_allclose( expected[key], actual[key] )
npt.assert_allclose( expected_id, actual_id )
########################################################################
def test_find_ids( self ):
particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812], # Halo 0, host halo 0
[ 30068.5541178 , 32596.72758226, 32928.1115097 ], # Halo 10, host halo 1
[ 29459.32290246, 30768.32556725, 32357.26078864], # Halo 3783, host halo 3610
])
particle_positions *= 1./(1. + self.redshift)/self.hubble
# Move one just off to the side to help with testing
particle_positions[0] += 5.
used_kwargs = copy.deepcopy( self.kwargs )
used_kwargs['ids_to_return'].append( '0.00000001_Rvir' )
used_kwargs['ids_with_supplementary_data'] = [ 'gal_id', 'd_gal' ]
used_kwargs['supplementary_data_keys'] = [ 'Xc', 'Yc', 'Zc' ]
expected = {
'd_gal': np.array( [ 5.*np.sqrt( 3 ), 0., 0., ] ),
'host_halo_id': np.array( [-1, 1, 3610] ),
'halo_id': np.array( [0, 10, 3783] ),
'host_gal_id': np.array( [-1, 1, 3610] ),
'gal_id': np.array( [0, 10, 3783] ),
'mt_gal_id': np.array( [0, -2, -2] ),
'mt_halo_id': np.array( [0, 1, 0] ),
'0.00000001_Rvir': np.array( [-2, 10, 3783] ),
'gal_id_Xc': np.array([ 29414.96458784, 30068.5541178, 29459.32290246, ]),
'gal_id_Yc': np.array([ 30856.75007114, 32596.72758226, 30768.32556725, ]),
'gal_id_Zc': np.array([ 32325.90901812, 32928.1115097, 32357.26078864, ]),
'd_gal_Xc': np.array([ 29414.96458784, 30068.5541178, 29459.32290246, ]),
'd_gal_Yc': np.array([ 30856.75007114, 32596.72758226, 30768.32556725, ]),
'd_gal_Zc': np.array([ 32325.90901812, 32928.1115097, 32357.26078864, ]),
}
# Do the actual calculation
galaxy_linker = general_galaxy_linker.GalaxyLinker(
particle_positions,
**used_kwargs
)
actual = galaxy_linker.find_ids()
for key in expected.keys():
try:
npt.assert_allclose( expected[key], actual[key], atol=1e-10 )
except AssertionError:
raise AssertionError( 'key = {}, expected = {}, actual = {}'.format(
key,
expected[key],
actual[key],
)
)
########################################################################
def test_find_ids_different_mt_scale( self ):
particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812], # Halo 0, host halo 0
[ 30068.5541178 , 32596.72758226, 32928.1115097 ], # Halo 10, host halo 1
[ 29459.32290246, 30768.32556725, 32357.26078864], # Halo 3783, host halo 3610
])
# Shift the first particle over slightly to put it outside Rmax, which
# we'll use as our length scale.
particle_positions[0][0] += 4.
particle_positions *= 1./(1. + self.redshift)/self.hubble
expected = {
'd_gal': np.array( [ 4., 0., 0., ] )/(1. + self.redshift)/self.hubble,
'host_gal_id': np.array( [-1, 1, 3610] ),
'gal_id': np.array( [0, 10, 3783] ),
'mt_gal_id': np.array( [-2, -2, -2] ),
}
kwargs = copy.deepcopy( self.kwargs )
kwargs['mt_length_scale'] = 'Rmax'
kwargs['galaxy_cut'] = 0.1
kwargs['ids_to_return'] = [
'd_gal',
'd_other_gal_scaled',
'host_gal_id',
'gal_id',
'mt_gal_id',
]
# Do the actual calculation
galaxy_linker = general_galaxy_linker.GalaxyLinker(
particle_positions,
**kwargs
)
actual = galaxy_linker.find_ids()
for key in expected.keys():
print(key)
npt.assert_allclose( expected[key], actual[key], atol=1e-10 )
########################################################################
def test_find_ids_rockstar( self ):
'''Test that this works for Rockstar as well.'''
# Update the arguments
used_kwargs = copy.copy( self.kwargs )
used_kwargs['ids_to_return'] = [
'd_gal',
'gal_id',
]
used_kwargs['halo_data_dir'] = './tests/data/rockstar_dir'
used_kwargs['halo_finder'] = 'Rockstar'
used_kwargs['minimum_criteria'] = 'Np'
used_kwargs['minimum_value'] = 10
used_kwargs['length_scale'] = 'Rs'
used_kwargs['halo_length_scale'] = 'R200b'
particle_positions = np.array([
[ 29534.48, 29714.94, 32209.94, ], # Halo 6811
[ 28495.69, 30439.15, 31563.17, ], # Halo 7339
[ 28838.19, 30631.51, 32055.31, ], # Halo 9498
])
particle_positions *= 1./(1. + self.redshift)/self.hubble
expected = {
'd_gal': np.array( [ 0., 0., 0., ] ),
'gal_id': np.array( [ 6811, 7339, 9498 ] ),
}
# Do the actual calculation
galaxy_linker = general_galaxy_linker.GalaxyLinker(
particle_positions,
**used_kwargs
)
actual = galaxy_linker.find_ids()
for key in expected.keys():
print(key)
npt.assert_allclose( expected[key], actual[key], atol=1e-10 )
########################################################################
def test_find_ids_snap0( self ):
particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812], # Halo 0, host halo 0
[ 30068.5541178 , 32596.72758226, 32928.1115097 ], # Halo 10, host halo 1
[ 29459.32290246, 30768.32556725, 32357.26078864], # Halo 3783, host halo 3610
])
particle_positions *= 1./(1. + 30.)/self.hubble
expected = {
'host_halo_id': np.array( [-2, -2, -2] ),
'halo_id': np.array( [-2, -2, -2] ),
'host_gal_id': np.array( [-2, -2, -2] ),
'gal_id': np.array( [-2, -2, -2] ),
'mt_gal_id': np.array( [-2, -2, -2] ),
'mt_halo_id': np.array( [-2, -2, -2] ),
}
# Setup the input parameters
snap0_kwargs = copy.deepcopy( self.kwargs )
snap0_kwargs['snum'] = 0
# Do the actual calculation
galaxy_linker = general_galaxy_linker.GalaxyLinker( particle_positions, **snap0_kwargs )
actual = galaxy_linker.find_ids()
for key in expected.keys():
print(key)
npt.assert_allclose( expected[key], actual[key] )
########################################################################
def test_find_ids_early_universe( self ):
particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812], # Halo 0, host halo 0
[ 30068.5541178 , 32596.72758226, 32928.1115097 ], # Halo 10, host halo 1
[ 29459.32290246, 30768.32556725, 32357.26078864], # Halo 3783, host halo 3610
])
particle_positions *= 1./(1. + 28.)/self.hubble
expected = {
'host_halo_id': np.array( [-2, -2, -2] ),
'halo_id': np.array( [-2, -2, -2] ),
'host_gal_id': np.array( [-2, -2, -2] ),
'gal_id': np.array( [-2, -2, -2] ),
'mt_gal_id': np.array( [-2, -2, -2] ),
'mt_halo_id': np.array( [-2, -2, -2] ),
'0.1_Rvir': np.array( [-2, -2, -2] ),
}
# Setup the input parameters
snap0_kwargs = copy.deepcopy( self.kwargs )
snap0_kwargs['snum'] = 1
# Do the actual calculation
galaxy_linker = general_galaxy_linker.GalaxyLinker( particle_positions, **snap0_kwargs )
actual = galaxy_linker.find_ids()
for key in expected.keys():
print(key)
npt.assert_allclose( expected[key], actual[key] )
########################################################################
def test_pass_halo_data( self ):
'''Test that it still works when we pass in an halo_data. '''
particle_positions = np.array([
[ 29414.96458784, 30856.75007114, 32325.90901812], # Halo 0, host halo 0
[ 30068.5541178 , 32596.72758226, 32928.1115097 ], # Halo 10, host halo 1
[ 29459.32290246, 30768.32556725, 32357.26078864], # Halo 3783, host halo 3610
])
particle_positions *= 1./(1. + self.redshift)/self.hubble
expected = {
'host_halo_id': np.array( [-1, 1, 3610] ),
'halo_id': np.array( [0, 10, 3783] ),
'host_gal_id': np.array( [-1, 1, 3610] ),
'gal_id': np.array( [0, 10, 3783] ),
'mt_gal_id': np.array( [0, -2, -2] ),
'mt_halo_id': np.array( [0, 1, 0] ),
}
# Prepare an halo_data to pass along.
halo_data = analyze_halos.HaloData(
self.kwargs['halo_data_dir'],
)
# Muck it up by making it try to retrieve data
halo_data.data_reader.get_halos( 600 )
halo_data.data_reader.get_mtree_halos( 600, tag='smooth' )
# Do the actual calculation
galaxy_linker = general_galaxy_linker.GalaxyLinker( particle_positions, halo_data=halo_data, **self.kwargs )
actual = galaxy_linker.find_ids()
for key in expected.keys():
print(key)
npt.assert_allclose( expected[key], actual[key] )
########################################################################
def test_find_d_gal( self ):
'''This tests we can find the shortest distance to the nearest galaxy.
'''
# Setup the distance so we don't have to calculate it.
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 0.5, 1.0, 0.5, ],
[ 15., 5., 3., ],
[ 0.2, 2.5e-4, 4., ],
])
actual = self.galaxy_linker.find_d_gal()
expected = np.array([ 0.5, 3., 2.5e-4, ])
npt.assert_allclose( expected, actual )
########################################################################
def test_find_d_other_gal( self ):
'''This tests we can find the shortest distance to the nearest galaxy.
'''
# Setup the distance so we don't have to calculate it.
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 0.5, 1.0, 0.75, ],
[ 15., 5., 3., ],
[ 0.2, 2.5e-4, 4., ],
])
actual = self.galaxy_linker.find_d_other_gal()
expected = np.array([ 0.75, 3., 2.5e-4, ])
npt.assert_allclose( expected, actual )
########################################################################
def test_find_d_other_gal_main_halo_id_not_0( self ):
'''This tests we can find the shortest distance to the nearest galaxy.
'''
self.galaxy_linker.main_mt_halo_id = 1
# Setup the distance so we don't have to calculate it.
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 0.5, 1.0, 0.75, ],
[ 15., 5., 3., ],
[ 0.2, 2.5e-4, 4., ],
])
actual = self.galaxy_linker.find_d_other_gal()
expected = np.array([ 0.5, 3., 0.2, ])
npt.assert_allclose( expected, actual )
########################################################################
def test_find_d_other_gal_early_universe( self ):
'''This tests we can find the shortest distance to the nearest galaxy.
'''
self.galaxy_linker.snum = 1
# Setup the distance so we don't have to calculate it.
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 0.5, 1.0, 0.75, ],
[ 15., 5., 3., ],
[ 0.2, 2.5e-4, 4., ],
])
actual = self.galaxy_linker.find_d_other_gal()
expected = np.array([ 0.5, 3., 2.5e-4, ])
npt.assert_allclose( expected, actual )
########################################################################
def test_find_d_other_gal_scaled( self ):
'''This tests we can find the shortest distance to the nearest galaxy.
'''
# Setup dummy data
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([ 1., 2., 3., 4., 5., ])
self.galaxy_linker._valid_halo_inds = np.array([ 0, 1, 2, 3, ])
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 2., 4., 6., 8. ],
[ 4., 3., 2., 1., ],
[ 10., 8., 6., 7., ],
])
# Make sure we set the number of particles correctly, to match the number we're using
self.galaxy_linker.n_particles = 3
actual = self.galaxy_linker.find_d_other_gal( scaled=True )
expected = np.array([ 2., 0.25, 2., ])
npt.assert_allclose( expected, actual )
########################################################################
def test_find_d_other_gal_scaled_early_universe( self ):
'''This tests we can find the shortest distance to the nearest galaxy.
'''
# Setup dummy data
self.galaxy_linker.snum = 1
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([ 1., 2., 3., 4., 5., ])
self.galaxy_linker._valid_halo_inds = np.array([ 0, 1, 2, 3, ])
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 2., 4., 6., 8. ],
[ 4., 3., 2., 1., ],
[ 10., 8., 6., 7., ],
])
# Make sure we set the number of particles correctly, to match the number we're using
self.galaxy_linker.n_particles = 3
actual = self.galaxy_linker.find_d_other_gal( scaled=True )
expected = np.array([ 2., 0.25, 2., ])
npt.assert_allclose( expected, actual )
########################################################################
def test_find_d_other_gal_only_valid_halo_is_main( self ):
'''Test that things still work when there are other halos, but the
only valid halo is the main halo.
'''
# Setup dummy data
self.galaxy_linker.minimum_value = 10
self.galaxy_linker.snum = 10
self.galaxy_linker.halo_data.data_reader.data_dir = \
'./tests/data/analysis_dir5'
self.galaxy_linker.halo_data.data_reader.get_halos( 10 )
# Make sure we set the number of particles correctly, to match the number we're using
#self.galaxy_linker.n_particles = 3
actual = self.galaxy_linker.find_d_other_gal()
expected = np.array([ -2., -2., ])
npt.assert_allclose( expected, actual )
########################################################################
def test_find_d_other_gal_scaled_no_halos( self ):
'''This tests we can find the shortest distance to the nearest galaxy.
'''
# Setup dummy data
self.galaxy_linker.snum = 1
self.galaxy_linker.halo_data.data_reader.data_dir = './tests/data/analysis_dir4'
self.galaxy_linker.halo_data.data_reader.get_halos( 1 )
# Make sure we set the number of particles correctly, to match the number we're using
#self.galaxy_linker.n_particles = 3
actual = self.galaxy_linker.find_d_other_gal( scaled=True )
expected = np.array([ -2., -2., ])
npt.assert_allclose( expected, actual )
########################################################################
@mock.patch( 'galaxy_dive.read_data.ahf.AHFReader.get_halos_add' )
def test_find_d_other_gal_scaled_no_halos_with_sufficient_mass( self, mock_get_halos_add ):
'''This tests we can find the shortest distance to the nearest galaxy.
'''
# Setup dummy data
self.galaxy_linker.snum = 12
self.galaxy_linker.minimum_value = 10
self.galaxy_linker.halo_data.data_reader.data_dir = './tests/data/analysis_dir4'
self.galaxy_linker.halo_data.data_reader.get_halos( 12 )
# Make sure we set the number of particles correctly, to match the number we're using
#self.galaxy_linker.n_particles = 3
actual = self.galaxy_linker.find_d_other_gal( scaled=True )
expected = np.array([ -2., -2., ])
npt.assert_allclose( expected, actual )
########################################################################
def test_find_d_other_gal_scaled_main_halo_id_not_0( self ):
'''This tests we can find the shortest distance to the nearest galaxy.
'''
# Setup dummy data
self.galaxy_linker.main_mt_halo_id = 3
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([ 1., 2., 3., 4., 5., ])
self.galaxy_linker._valid_halo_inds = np.array([ 0, 1, 2, 3, ])
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 2., 4., 6., 8. ],
[ 4., 3., 2., 1., ],
[ 10., 8., 6., 7., ],
])
# Make sure we set the number of particles correctly, to match the number we're using
self.galaxy_linker.n_particles = 3
actual = self.galaxy_linker.find_d_other_gal( scaled=True )
expected = np.array([ 2., 2./3., 2., ])
npt.assert_allclose( expected, actual )
########################################################################
class TestGalaxyLinkerMinimumStellarMass( unittest.TestCase ):
'''Test that we're properly applying a minimum stellar mass for a halo to be counted as containing a galaxy.'''
def setUp( self ):
gal_linker_kwargs_min_mstar = {
'length_scale': 'r_scale',
'minimum_criteria': 'M_star',
'minimum_value': 1e6,
'redshift': 6.1627907799999999,
'snum': 50,
'hubble': 0.70199999999999996,
'halo_data_dir': './tests/data/analysis_dir3',
'mtree_halos_index': 600,
'halo_file_tag': 'smooth',
'main_mt_halo_id': 0,
'galaxy_cut': 0.1,
'length_scale': 'Rvir',
'ids_to_return': [ 'halo_id', 'host_halo_id', 'gal_id', 'host_gal_id', 'mt_halo_id', 'mt_gal_id', 'd_gal', 'd_other_gal_scaled', ],
}
# Get input data
comoving_particle_positions = np.array([
[ 30252.60118534, 29483.51635481, 31011.17715464], # Right in the middle of mt halo 0 (AHF halo id 3) at snum 50.
# This halo contains a galaxy with 1e6.7 Msun of stars at this redshift.
[ 28651.1193359, 29938.7253038, 32168.1380575], # Right in the middle of mt halo 19 (AHF halo id 374) at snum 50
# This halo no stars at this redshift.
])
self.redshift = gal_linker_kwargs_min_mstar['redshift']
self.hubble = gal_linker_kwargs_min_mstar['hubble']
particle_positions = comoving_particle_positions/(1. + self.redshift)/self.hubble
# Make the necessary kwargs
self.kwargs = gal_linker_kwargs_min_mstar
self.galaxy_linker = general_galaxy_linker.GalaxyLinker( particle_positions, **self.kwargs )
# Get the necessary reader.
self.galaxy_linker.halo_data.data_reader = read_ahf.AHFReader( self.kwargs['halo_data_dir'] )
# Get the full needed ahf info.
self.galaxy_linker.halo_data.data_reader.get_halos( 50 )
########################################################################
def test_find_containing_halos( self ):
actual = self.galaxy_linker.find_containing_halos( 1. )
# Build the expected output
n_halos = self.galaxy_linker.halo_data.data_reader.halos.index.size
expected = np.zeros( (self.galaxy_linker.particle_positions.shape[0], n_halos) ).astype( bool )
expected[ 0, 3 ] = True # Should only be in the galaxy with sufficient stellar mass.
npt.assert_allclose( expected, actual )
########################################################################
def test_find_mt_containing_halos( self ):
actual = self.galaxy_linker.find_mt_containing_halos( 1. )
# Build the expected output
n_halos = len( self.galaxy_linker.halo_data.data_reader.mtree_halos )
expected = np.zeros( (self.galaxy_linker.particle_positions.shape[0], n_halos) ).astype( bool )
expected[ 0, 0 ] = True # Should only be in the galaxy with sufficient stellar gas.
npt.assert_allclose( expected, actual )
########################################################################
def test_find_ids_custom( self ):
self.galaxy_linker.ids_to_return = [
'gal_id',
'halo_id',
'0.1_Rvir',
'1.0_Rvir',
'5_r_scale',
]
results = self.galaxy_linker.find_ids()
npt.assert_allclose( results['gal_id'], results['0.1_Rvir'] )
npt.assert_allclose( results['halo_id'], results['1.0_Rvir'] )
########################################################################
########################################################################
class TestGalaxyLinkerMinimumNumStars( unittest.TestCase ):
'''Test that we're properly applying a minimum number of stars for a halo to be counted as containing a galaxy.'''
def setUp( self ):
gal_linker_kwargs_min_nstar = {
'minimum_criteria': 'n_star',
'minimum_value': 10,
'redshift': 6.1627907799999999,
'snum': 50,
'hubble': 0.70199999999999996,
'halo_data_dir': './tests/data/analysis_dir3',
'mtree_halos_index': 600,
'halo_file_tag': 'smooth',
'main_mt_halo_id': 0,
'galaxy_cut': 0.1,
'length_scale': 'Rvir',
'ids_to_return': [ 'halo_id', 'host_halo_id', 'gal_id', 'host_gal_id', 'mt_halo_id', 'mt_gal_id', 'd_gal', 'd_other_gal_scaled', ],
}
# Get input data
comoving_particle_positions = np.array([
[ 30252.60118534, 29483.51635481, 31011.17715464], # Right in the middle of mt halo 0 (AHF halo id 3) at snum 50.
# This halo contains a galaxy with 1e6.7 Msun of stars at this redshift.
[ 28651.1193359, 29938.7253038, 32168.1380575], # Right in the middle of mt halo 19 (AHF halo id 374) at snum 50
# This halo no stars at this redshift.
])
self.redshift = gal_linker_kwargs_min_nstar['redshift']
self.hubble = gal_linker_kwargs_min_nstar['hubble']
particle_positions = comoving_particle_positions/(1. + self.redshift)/self.hubble
# Make the necessary kwargs
self.kwargs = gal_linker_kwargs_min_nstar
self.galaxy_linker = general_galaxy_linker.GalaxyLinker( particle_positions, **self.kwargs )
# Get the necessary reader.
self.galaxy_linker.halo_data.data_reader = read_ahf.AHFReader( self.kwargs['halo_data_dir'] )
# Get the full needed ahf info.
self.galaxy_linker.halo_data.data_reader.get_halos( 50 )
########################################################################
def test_find_containing_halos( self ):
actual = self.galaxy_linker.find_containing_halos( 1. )
# Build the expected output
n_halos = self.galaxy_linker.halo_data.data_reader.halos.index.size
expected = np.zeros( (self.galaxy_linker.particle_positions.shape[0], n_halos) ).astype( bool )
expected[ 0, 3 ] = True # Should only be in the galaxy with sufficient stellar mass.
npt.assert_allclose( expected, actual )
########################################################################
def test_find_mt_containing_halos( self ):
actual = self.galaxy_linker.find_mt_containing_halos( 1. )
# Build the expected output
n_halos = len( self.galaxy_linker.halo_data.data_reader.mtree_halos )
expected = np.zeros( (self.galaxy_linker.particle_positions.shape[0], n_halos) ).astype( bool )
expected[ 0, 0 ] = True # Should only be in the galaxy with sufficient stellar gas.
npt.assert_allclose( expected, actual )
########################################################################
########################################################################
class TestFindMassRadii( unittest.TestCase ):
def setUp( self ):
# Test Data
self.kwargs = dict( gal_linker_kwargs )
self.kwargs['particle_masses'] = np.array([ 1., 2., 3., 4., ])
particle_positions = np.array([
[ 0., 0., 0., ],
[ 0., 0., 0., ],
[ 0., 0., 0., ],
[ 0., 0., 0., ],
]) # These shouldn't ever be used directly, since we're relying on the results of previous functions.
self.galaxy_linker = general_galaxy_linker.GalaxyLinker( particle_positions, **self.kwargs )
########################################################################
def test_mass_inside_galaxy_cut( self ):
# Test Data
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([ 200., 10., 100., 50., ])
self.galaxy_linker._valid_halo_inds = np.array([ 0, 1, 2, ])
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 0., 10., 500., ],
[ 15., 5., 485., ],
[ 10., 0., 490., ],
[ 500., 490., 0., ],
])
expected = np.array([ 6., 3., 4., ])
actual = self.galaxy_linker.mass_inside_galaxy_cut
npt.assert_allclose( expected, actual )
########################################################################
def test_mass_inside_galaxy_cut_no_valid_inds( self ):
'''Test we still get a reasonable result out, even when there's not a single valid halo.'''
# Test Data
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([ 200., 10., 100., 50., ])
self.galaxy_linker._valid_halo_inds = np.array([])
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 0., 10., 500., ],
[ 15., 5., 485., ],
[ 10., 0., 490., ],
[ 500., 490., 0., ],
])
actual = self.galaxy_linker.mass_inside_galaxy_cut
expected = np.array([])
npt.assert_allclose( expected, actual )
########################################################################
def test_mass_inside_galaxy_cut_no_halos( self ):
'''Test we still get a reasonable result out, even when there aren't any halos formed yet.'''
# Test Data
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([])
self.galaxy_linker._valid_halo_inds = np.array([])
actual = self.galaxy_linker.mass_inside_galaxy_cut
expected = np.array([])
npt.assert_allclose( expected, actual )
########################################################################
def test_mass_inside_galaxy_cut_no_inside_cut( self ):
'''Make sure we give the right results when no particles are inside the cut.'''
# Test Data
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([ 0.1, 0.1, 0.1, 0.1, ])
self.galaxy_linker._valid_halo_inds = np.array([ 0, 1, 2, ])
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 100., 10., 500., ],
[ 15., 5., 485., ],
[ 10., 100., 490., ],
[ 500., 490., 100., ],
])
expected = np.array([ 0., 0., 0., ])
actual = self.galaxy_linker.mass_inside_galaxy_cut
npt.assert_allclose( expected, actual )
########################################################################
def test_mass_inside_all_halos( self ):
# Test Data
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([ 200., 10., 100., 50., np.nan, ])
self.galaxy_linker._valid_halo_inds = np.array([ 0, 1, 2, 4, ])
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 0., 10., 500., np.nan, ],
[ 15., 5., 485., np.nan, ],
[ 10., 0., 490., np.nan, ],
[ 500., 490., 0., np.nan, ],
])
expected = np.array([ 6., 3., 4., np.nan, np.nan ])
actual = self.galaxy_linker.mass_inside_all_halos
npt.assert_allclose( expected, actual )
########################################################################
def test_mass_inside_all_halos_no_valid_gals( self ):
# Test Data
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([ 200., 10., 100., 50., np.nan, ])
self.galaxy_linker._valid_halo_inds = np.array([])
expected = np.array([ np.nan, np.nan, np.nan, np.nan, np.nan ])
actual = self.galaxy_linker.mass_inside_all_halos
npt.assert_allclose( expected, actual )
########################################################################
def test_cumlulative_mass_valid_halos( self ):
# Test Data
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 0., 500., ],
[ 480., 450., ],
[ 50., 20., ],
[ 490., 10., ],
])
expected = np.array([
[ 1., 10., ],
[ 6., 9., ],
[ 4., 7., ],
[ 10., 4., ],
])
actual = self.galaxy_linker.cumulative_mass_valid_halos
npt.assert_allclose( expected, actual )
########################################################################
def test_get_mass_radius( self ):
# Test Data
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([ 0., 0., 0. ]) # Values shouldn't matter here
self.galaxy_linker._valid_halo_inds = np.array([ 0, 1, ])
self.galaxy_linker._mass_inside_galaxy_cut = np.array([ 10., 19, ])
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 0., 500., ],
[ 480., 450., ],
[ 50., 20., ],
[ 490., 10., ],
])
self.galaxy_linker._cumulative_mass_valid_halos = np.array([
[ 1., 10., ],
[ 6., 9., ],
[ 4., 7., ],
[ 10., 4., ],
]),
# Expected result, in comoving coords.
expected = np.array( [ 50., 450., np.nan ] )*( 1. + 0.16946003 )*0.702
actual = self.galaxy_linker.get_mass_radius( 0.5 )
npt.assert_allclose( expected, actual )
########################################################################
########################################################################
class TestSummedQuantityInsideGalaxy( unittest.TestCase ):
'''Test that we can calculate a more general attribute inside a galaxy.'''
def setUp( self ):
# Test Data
self.kwargs = dict( gal_linker_kwargs )
self.kwargs['particle_masses'] = np.array([ 2., 2., 1., 3., ])
particle_positions = np.array([
[ 0., 0., 0., ],
[ 0., 0., 0., ],
[ 0., 0., 0., ],
[ 0., 0., 0., ],
]) # These shouldn't ever be used directly, since we're relying on the results of previous functions.
self.galaxy_linker = general_galaxy_linker.GalaxyLinker( particle_positions, **self.kwargs )
########################################################################
def test_summed_quantity_inside_galaxy_valid_halos( self ):
# Test Data
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([ 200., 10., 100., 50., ])
self.galaxy_linker._valid_halo_inds = np.array([ 0, 1, 2, ])
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 0., 10., 500., ],
[ 15., 5., 485., ],
[ 10., 0., 490., ],
[ 500., 490., 0., ],
])
particle_quantities = np.array([ 1., 2., 3., 4., ])
actual = self.galaxy_linker.summed_quantity_inside_galaxy_valid_halos( particle_quantities, np.nan )
expected = np.array([ 6., 3., 4., ])
npt.assert_allclose( expected, actual )
########################################################################
def test_summed_quantity_valid_halos_no_inside_cut( self ):
'''Make sure we give the right results when no particles are inside the cut.'''
# Test Data
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([ 0.1, 0.1, 0.1, 0.1, ])
self.galaxy_linker._valid_halo_inds = np.array([ 0, 1, 2, ])
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 100., 10., 500., ],
[ 15., 5., 485., ],
[ 10., 100., 490., ],
[ 500., 490., 100., ],
])
particle_quantities = np.array([ 1., 2., 3., 4., ])
expected = np.array([ np.nan, np.nan, np.nan, ])
actual = self.galaxy_linker.summed_quantity_inside_galaxy_valid_halos( particle_quantities, np.nan )
npt.assert_allclose( expected, actual )
########################################################################
@mock.patch( 'galaxy_dive.galaxy_linker.linker.GalaxyLinker.dist_to_all_valid_halos_fn' )
def test_summed_quantity_inside_galaxy_low_memory_mode( self, mock_dist_all_valid ):
'''Test that we can get the summed quantity inside the galaxy, but reduce the memory consumption
(at the cost of speed) by doing getting the sum for less galaxies at a given time.
'''
# Test Data
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([ 200., 10., 100., 50., ])
self.galaxy_linker._valid_halo_inds = np.array([ 0, 1, 2, ])
mock_dist_all_valid.side_effect = [
np.array( [ [ 0., 10., 500., ], ] ),
np.array( [ [ 15., 5., 485., ], ] ),
np.array( [ [ 10., 0., 490., ], ] ),
np.array( [ [ 500., 490., 0., ], ] ),
np.array( [] ),
np.array( [] ),
np.array( [] ),
np.array( [] ),
np.array( [] ),
np.array( [] ),
]
particle_quantities = np.array([ 1., 2., 3., 4., ])
# Change parameters of galaxy finder to run low memory node.
self.galaxy_linker.low_memory_mode = True
actual = self.galaxy_linker.summed_quantity_inside_galaxy_valid_halos( particle_quantities )
expected = np.array([ 6., 3., 4., ])
npt.assert_allclose( expected, actual )
########################################################################
def test_summed_quantity_inside_galaxy( self ):
# Test Data
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([ 200., 10., 100., 50., np.nan, ])
self.galaxy_linker._valid_halo_inds = np.array([ 0, 1, 2, 4, ])
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 0., 10., 500., np.nan, ],
[ 15., 5., 485., np.nan, ],
[ 10., 0., 490., np.nan, ],
[ 500., 490., 0., np.nan, ],
])
particle_quantities = np.array([ 1., 2., 3., 4., ])
expected = np.array([ 6., 3., 4., np.nan, np.nan ])
actual = self.galaxy_linker.summed_quantity_inside_galaxy(
particle_quantities,
np.nan,
)
npt.assert_allclose( expected, actual )
########################################################################
def test_weighted_summed_quantity_inside_galaxy( self ):
# Test Data
self.galaxy_linker._ahf_halos_length_scale_pkpc = np.array([ 200., 10., 100., 50., np.nan, ])
self.galaxy_linker._valid_halo_inds = np.array([ 0, 1, 2, 4, ])
self.galaxy_linker._dist_to_all_valid_halos = np.array([
[ 0., 10., 500., np.nan, ],
[ 15., 5., 485., np.nan, ],
[ 10., 0., 490., np.nan, ],
[ 500., 490., 0., np.nan, ],
])
particle_quantities = np.array([ 1., 2., 3., 4., ])
particle_weights = np.array([ 4., 3., 2., 1., ])
actual = self.galaxy_linker.weighted_summed_quantity_inside_galaxy( particle_quantities, particle_weights, np.nan )
expected = np.array([ 16./9., 3., 4., np.nan, np.nan, ])
npt.assert_allclose( expected, actual )
|
{"hexsha": "10f57350fb7b26cf453486ea9ad46dcf88ba8301", "size": 54829, "ext": "py", "lang": "Python", "max_stars_repo_path": "galaxy_dive/tests/test_galaxy_linker/test_linker.py", "max_stars_repo_name": "zhafen/galaxy-dive", "max_stars_repo_head_hexsha": "e1127da25d10f699b3ada01b1b4635255f4f3917", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "galaxy_dive/tests/test_galaxy_linker/test_linker.py", "max_issues_repo_name": "zhafen/galaxy-dive", "max_issues_repo_head_hexsha": "e1127da25d10f699b3ada01b1b4635255f4f3917", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-12-17T21:11:18.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-17T21:11:18.000Z", "max_forks_repo_path": "galaxy_dive/tests/test_galaxy_linker/test_linker.py", "max_forks_repo_name": "zhafen/galaxy-dive", "max_forks_repo_head_hexsha": "e1127da25d10f699b3ada01b1b4635255f4f3917", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7311594203, "max_line_length": 194, "alphanum_fraction": 0.5352459465, "include": true, "reason": "import numpy", "num_tokens": 14759}
|
/*****************************************************************************/
/* Copyright (c) 2017, Aleksandrs Ecins */
/* All rights reserved. */
/* */
/* Redistribution and use in source and binary forms, with or without */
/* modification, are permitted provided that the following conditions */
/* are met: */
/* */
/* 1. Redistributions of source code must retain the above copyright */
/* notice, this list of conditions and the following disclaimer. */
/* */
/* 2. Redistributions in binary form must reproduce the above copyright */
/* notice, this list of conditions and the following disclaimer in the */
/* documentation and/or other materials provided with the distribution. */
/* */
/* 3. Neither the name of the copyright holder nor the names of its */
/* contributors may be used to endorse or promote products derived from */
/* this software without specific prior written permission. */
/* */
/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */
/* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */
/* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR */
/* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT */
/* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, */
/* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT */
/* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */
/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY */
/* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */
/* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE */
/* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
/*****************************************************************************/
#ifndef MIN_CUT_HPP
#define MIN_CUT_HPP
// Boost includes
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/boykov_kolmogorov_max_flow.hpp>
// Utilities includes
#include <graph/graph_weighted.hpp>
// Typedefs
// NOTE! This should be moved inside the functions, so that they don't spread these typedefs outside this function.
// The likely solution is to put the whole mincut algorithm into it's own class
typedef boost::adjacency_list_traits < boost::vecS, boost::vecS, boost::directedS > Traits;
typedef boost::adjacency_list < boost::vecS, // Container used for vertices
boost::vecS, // Container used for edges
boost::directedS, // Directional graph
boost::property < boost::vertex_name_t, std::string, // Vertex properties
boost::property < boost::vertex_index_t, long,
boost::property < boost::vertex_color_t, boost::default_color_type,
boost::property < boost::vertex_distance_t, long,
boost::property < boost::vertex_predecessor_t, Traits::edge_descriptor > > > > >,
boost::property < boost::edge_capacity_t, float, // Edge properties
boost::property < boost::edge_residual_capacity_t, float,
boost::property < boost::edge_reverse_t, Traits::edge_descriptor > > > > GraphBoost;
typedef boost::property_map< GraphBoost, boost::edge_capacity_t >::type CapacityMap;
typedef boost::property_map< GraphBoost, boost::edge_reverse_t>::type ReverseEdgeMap;
typedef boost::property_map< GraphBoost, boost::vertex_color_t, boost::default_color_type>::type VertexColorMap;
////////////////////////////////////////////////////////////////////////////////
bool addEdge (Traits::vertex_descriptor &v1, Traits::vertex_descriptor &v2, GraphBoost &graph, const float weight, CapacityMap &capacity_map, ReverseEdgeMap &reverse_edge_map)
{
Traits::edge_descriptor edge, reverse_edge;
bool edge_was_added, reverse_edge_was_added;
boost::tie (edge, edge_was_added) = boost::add_edge ( v1, v2, graph );
boost::tie (reverse_edge, reverse_edge_was_added) = boost::add_edge ( v2, v1, graph );
if ( !edge_was_added || !reverse_edge_was_added )
return (false);
capacity_map[edge] = weight;
capacity_map[reverse_edge] = weight;
reverse_edge_map[edge] = reverse_edge;
reverse_edge_map[reverse_edge] = edge;
return true;
}
namespace utl
{
/** \brief Perform a min cut on a graph
* \param[in] source_potentials weights between nodes and source node
* \param[in] sink_potentials weights between nodes and sink node
* \param[in] binary_potentials binary potential structure and weights
* \param[out] source_points points belonging to source
* \param[out] sink_points points belonging to sink
*/
double mincut ( const std::vector<float> &source_potentials,
const std::vector<float> &sink_potentials,
const utl::GraphWeighted &binary_potentials,
std::vector<int> &source_points,
std::vector<int> &sink_points
)
{
////////////////////////////////////////////////////////////////////////////
// Check input
if (! ( (source_potentials.size() == sink_potentials.size()) &&
(source_potentials.size() == binary_potentials.getNumVertices())))
{
std::cout << "[utl::minCut] number of vertices in source potentials, sink potentials and binary potentials are not equal." << std::endl;
return -1.0;
}
////////////////////////////////////////////////////////////////////////////
// Build graph
int numVertices = source_potentials.size();
GraphBoost graph;
std::vector< Traits::vertex_descriptor > vertices;
Traits::vertex_descriptor source;
Traits::vertex_descriptor sink;
CapacityMap capacity = boost::get (boost::edge_capacity, graph);
ReverseEdgeMap reverseEdgeMap = boost::get (boost::edge_reverse, graph);
VertexColorMap vertexColorMap = boost::get (boost::vertex_color, graph);
// Add vertices
vertices.resize(numVertices + 2);
for (size_t i = 0; i < static_cast<size_t>(numVertices + 2); i++)
vertices[i] = boost::add_vertex(graph);
source = vertices[source_potentials.size()];
sink = vertices[source_potentials.size()+1];
// Add source and sink edges
for (size_t i = 0; i < static_cast<size_t>(numVertices); i++)
{
addEdge(vertices[i], source, graph, source_potentials[i], capacity, reverseEdgeMap);
addEdge(vertices[i], sink, graph, sink_potentials[i], capacity, reverseEdgeMap);
}
// Add binary edges
for (size_t edgeId = 0; edgeId < binary_potentials.getNumEdges(); edgeId++)
{
// Get edge information
int vtx1Id, vtx2Id;
float weight;
if (!binary_potentials.getEdge(edgeId, vtx1Id, vtx2Id, weight))
{
std::cout << "[utl::minCut] could not add binary edges to Boost graph." << std::endl;
abort();
}
// Add it to Boost graph
Traits::vertex_descriptor v1 = vertices[vtx1Id];
Traits::vertex_descriptor v2 = vertices[vtx2Id];
addEdge(v1, v2, graph, weight, capacity, reverseEdgeMap);
}
////////////////////////////////////////////////////////////////////////////
// Compute maximim flow
double flow = boost::boykov_kolmogorov_max_flow(graph, source, sink);
////////////////////////////////////////////////////////////////////////////
// Find foreground and background points
source_points.clear();
sink_points.clear();
for (size_t i = 0; i < static_cast<size_t>(numVertices); i++)
{
if (vertexColorMap(vertices[i]) == 0)
source_points.push_back(i);
else
sink_points.push_back(i);
}
return flow;
}
}
# endif // MIN_CUT_HPP
|
{"hexsha": "02c7ebf358a27fbf5a7813e90a74a15f55c13473", "size": 9022, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "utilities/graph/min_cut.hpp", "max_stars_repo_name": "Cznielsen/symseg", "max_stars_repo_head_hexsha": "b1c1e1e2f21f6a3d8b65e4f68d3516bc0bbbf06e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 51.0, "max_stars_repo_stars_event_min_datetime": "2017-05-15T15:16:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T22:37:29.000Z", "max_issues_repo_path": "utilities/graph/min_cut.hpp", "max_issues_repo_name": "Cznielsen/symseg", "max_issues_repo_head_hexsha": "b1c1e1e2f21f6a3d8b65e4f68d3516bc0bbbf06e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 5.0, "max_issues_repo_issues_event_min_datetime": "2017-05-31T05:32:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-08T01:35:49.000Z", "max_forks_repo_path": "utilities/graph/min_cut.hpp", "max_forks_repo_name": "Cznielsen/symseg", "max_forks_repo_head_hexsha": "b1c1e1e2f21f6a3d8b65e4f68d3516bc0bbbf06e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 12.0, "max_forks_repo_forks_event_min_datetime": "2017-08-08T17:43:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-29T17:59:08.000Z", "avg_line_length": 50.9717514124, "max_line_length": 175, "alphanum_fraction": 0.5428951452, "num_tokens": 1760}
|
import numpy as np
from layers.base import Layer
class Output(Layer):
def __init__(self, input_layers, output_shape, loss_function=None, learning_rate=0.1):
super().__init__(input_layers, output_shape)
self.loss_function = loss_function
self.cur_y_true = None
self.learning_rate = learning_rate
self.delta = np.zeros([0, self.output_shape])
self.cur_loss = None
self.cur_accuracy = None
def get_cur_outputs(self):
return self.cur_outputs
def forward(self):
# print(self.cur_inputs)
self.cur_outputs = self.cur_inputs[0]
self.clear_cur_inputs_flags()
def backward(self):
self.cur_loss, self.delta, self.cur_accuracy = self.loss_function(self.cur_y_true, self.cur_outputs,
self.learning_rate)
list(map(lambda layer: layer.append_cur_delta(self, np.array(self.delta)), self.input_layers))
self.clear_cur_deltas_flags()
def set_cur_y_true(self, y_values):
self.cur_y_true = y_values
|
{"hexsha": "051900c2ab26a6db2fae2e94a6048f0db0789230", "size": 1099, "ext": "py", "lang": "Python", "max_stars_repo_path": "layers/output.py", "max_stars_repo_name": "WallFacer5/ArtifIdiot", "max_stars_repo_head_hexsha": "698aac564901f64138b1e6287ab1996792a8f2fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "layers/output.py", "max_issues_repo_name": "WallFacer5/ArtifIdiot", "max_issues_repo_head_hexsha": "698aac564901f64138b1e6287ab1996792a8f2fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-02-22T07:58:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-24T09:20:37.000Z", "max_forks_repo_path": "layers/output.py", "max_forks_repo_name": "WallFacer5/models", "max_forks_repo_head_hexsha": "698aac564901f64138b1e6287ab1996792a8f2fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4516129032, "max_line_length": 108, "alphanum_fraction": 0.6460418562, "include": true, "reason": "import numpy", "num_tokens": 228}
|
# Information Retrieval in High Dimensional Data
## Lab #6, 23.11.2017
## Principal Component Analysis
### Task 1
In this task we will once again work with the MNIST training set as provided on Moodle. Chose three digit classes, e.g. 1, 2 and 3 and load N=1000 images from each of the clsses to the workspace. Store the data in a normalized matrix $X$ of type double and size(784, 3\*N). Furthermore, generate color label matrix $C$ of dimensions (3\*N, 3). Each row of $C$ assigns an RGB color vector to the repective column of $X$ as an indicator of the digit class. Choose [0, 0, 1], [0, 1, 0] and [1, 0, 0] for the three digit classes.
```python
import os
import numpy as np
import matplotlib.pyplot as plt
import imageio
```
```python
N = 1000
X = np.zeros((784, 3*N), dtype='float64')
for i in range(1, 4):
path = 'mnist/d{}/'.format(i)
filenames = sorted((fn for fn in os.listdir(path) if fn.endswith('.png')))
for idx, fn in enumerate(filenames):
im = imageio.imread(path + fn)
X[:, idx + N*(i-1)] = np.reshape(im, 784)
if idx == 999:
break
```
```python
labels = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]])
C = np.zeros((3*N, 3))
for i in range(3):
for j in range(1000):
C[N*i + j, :] = labels[i]
C
```
array([[ 0., 0., 1.],
[ 0., 0., 1.],
[ 0., 0., 1.],
...,
[ 1., 0., 0.],
[ 1., 0., 0.],
[ 1., 0., 0.]])
a) Compute the row-wise mean mu of $X$ and substract it from each column of $X$.
Save the results as X_c.
```python
mu = np.mean(X, axis=1)
X_c = X-np.expand_dims(mu, axis=1)
```
b) Use np.linalg.svd with full_matrices=False to compute the singular value decomposition [U, SIgma, VT] of X_c. Make sure the matrices are sorted in descending order with respect to the singular values.
```python
U, Sigma, VT = np.linalg.svd(X_c, full_matrices=False)
```
c) Use reshape in order to convert mu and the first three columns of U to (28, 28)-matrices. Plot the resulting images. What do you see?
```python
plt.subplot(141)
plt.imshow(np.reshape(mu, (28,28)))
plt.subplot(142)
plt.imshow(np.reshape(U[:, 0], (28,28)))
plt.subplot(143)
plt.imshow(np.reshape(U[:, 1], (28,28)))
plt.subplot(144)
plt.imshow(np.reshape(U[:, 2], (28,28)))
plt.show()
```
d) Compute the matrix S=np.dot(np.diag(Sigma), VT)). Note that ths yields the same result s S=np.dot(U.T, X_c). The S matrix contains the 3\*N scores for the principal components 1 to 784. Create a 2D scatter plot with C as its color parameter in order to plot the scores for the first $two$ rincipal components of the data.
```python
S = np.dot(np.diag(Sigma), VT)
plt.scatter(S[:,:2], X_c[:, :2], c=C)
plt.show()
```
### Task 2
In this task we consider the problem of choosing he number of principal vectors. Assuming that $\mathbf{X} \in \mathbb{R}^{p\times N}$ is the centered data matrix and $ \mathbf{P} = \mathbf{U}_k \mathbf{U}_k^T $ is the projector onto the $k$-dimensional principal subspace, the dimension $k$ is chosen such that the fraction of overall energy contained in the projection error does not exceed $\epsilon$, i.e.
\begin{equation} \frac{\|\mathbf{X} - \mathbf{PX}\|_F^2}{\| \mathbf{X}\|_F^2} = \frac{\sum_{i=1}^M \|\mathbf{x}_i - \mathbf{Px}_i\|^2}{\sum_{i=1}^N \| \mathbf{x}_i\|^2} \leq \epsilon ,\end{equation}
where $\epsilon$ is usually chosen to be between $0.01$ and $0.2$.
The MIT VisTex database as provided on Moodle consists of a set of 167 RGB texture images of sizes (512, 512, 3). Download the ZIP file, unpack it and make yourself familiar with the directory structure.
a) After preprocessing the entire image set (converting to normalized grayscale matrices), divide the images into non overlapping tiles of sizes (64, 64) and create a centered data matrix X_c of size (p, N) from them, where P=64\*64 and N=167\*(512/64)\*(512/64).
```python
```
b) Compute the SVD of X_c and make sure the singular values are sorted in descending order.
```python
```
c) Plot the fraction of energy contained in the projection error for the principal subspace dimensions 0 to p. How many principal vectors do you need to retain 80%, 90%, 95% or 99% of the original data energy?
```python
```
d) Discuss: Can you imagine a scenario, where energy is a bad measure of useful information?
```python
```
|
{"hexsha": "0cf86713f443311e6980a06107baf12f561c033f", "size": 37081, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "information-retrieval-exercises/lab06.ipynb", "max_stars_repo_name": "achmart/inforet", "max_stars_repo_head_hexsha": "3596ff971207728a42b335e71608b0b96e241228", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "information-retrieval-exercises/lab06.ipynb", "max_issues_repo_name": "achmart/inforet", "max_issues_repo_head_hexsha": "3596ff971207728a42b335e71608b0b96e241228", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "information-retrieval-exercises/lab06.ipynb", "max_forks_repo_name": "achmart/inforet", "max_forks_repo_head_hexsha": "3596ff971207728a42b335e71608b0b96e241228", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 118.8493589744, "max_line_length": 16156, "alphanum_fraction": 0.8606564009, "converted": true, "num_tokens": 1340}
|
##################################################################
# Match two patterns in one two-pattern image
# Works for color images only
#
# Copyright (c) 2017 Alexey Yastrebov
# MIT License, see LICENSE file.
##################################################################
#coding=cp1251
from keras.models import load_model
from keras.preprocessing import image as image_utils
import numpy as np
import cv2
import os
import sys
cls_win_width, cls_win_height = 32, 60
def matchPatterns(img):
h,w,c = img.shape
images = []
np_images = []
#img = img.transpose((2,0,1))
#for j in range(0,h):
# for i in range(0,w):
# print(img[j][i][0],img[j][i][1],img[j][i][2])
img1 = np.expand_dims(img, axis=0)
images.append(img1)
np_images = np.vstack(images)
res = model.predict(np_images)
#print("response: {}".format(res[0][0]))
return res[0][0]
def matchPatternsFromFile(imagefn):
img = cv2.imread(imagefn,)
img = cv2.resize(img,(cls_win_width,cls_win_height))
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = img.astype(float)/255
match = matchPatterns(img)
return match
def matchPatternsFromDir(dir):
fns= os.listdir(dir)
print("")
for fn in fns:
if fn.endswith(".png") or fn.endswith(".bmp") or fn.endswith(".jpg"):
v = matchPatternsFromFile(dir+"/"+fn)
print("{} {}".format(fn,v))
#===== main =====
model = load_model(sys.argv[1])
match = matchPatternsFromDir(sys.argv[2])
|
{"hexsha": "e52e633efc095d22e93630767e43fde1b80f5149", "size": 1509, "ext": "py", "lang": "Python", "max_stars_repo_path": "classifier/match_patterns.py", "max_stars_repo_name": "a-yastrebov/keratools", "max_stars_repo_head_hexsha": "6a20564174e11e0a8430edc052b60f3acca2b732", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-07-27T07:59:09.000Z", "max_stars_repo_stars_event_max_datetime": "2017-07-27T07:59:09.000Z", "max_issues_repo_path": "classifier/match_patterns.py", "max_issues_repo_name": "a-yastrebov/keratools", "max_issues_repo_head_hexsha": "6a20564174e11e0a8430edc052b60f3acca2b732", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "classifier/match_patterns.py", "max_forks_repo_name": "a-yastrebov/keratools", "max_forks_repo_head_hexsha": "6a20564174e11e0a8430edc052b60f3acca2b732", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.5223880597, "max_line_length": 72, "alphanum_fraction": 0.5878064944, "include": true, "reason": "import numpy", "num_tokens": 389}
|
from mod_copeland_yateesh import sample_complexity
args = {}
# args['heuristic'] = 'random'
args['heuristic'] = 'greedy'
# args['heuristic'] = 'mod_dcb'
args['n_voters'] = 4639
args['alpha'] = 0.05
args['seed'] = 42
args['ques_limit'] = 5
args['gamma'] = 0.5
args['probs'] = [0.05, 0.1, 0.2, 0.4]
q_limits = [1, 2, 3, 5, 8, 10, 13, 15, 20, 25, 30]
# q_limits = [1]
# gammas = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
# gammas = [0.0]
greedy_itrs = []
random_itrs = []
seeds = [0, 1, 2, 3, 4]
# seeds = [0]
# for seed in seeds:
# args['seed'] = seed
# itr, winner = sample_complexity(args)
# print("seed", seed, "itr", itr, "winner", winner)
# random_itrs.append(itr)
#
# print(random_itrs)
# print(sum(random_itrs)/6)
# for seed in seeds:
# seed_vals = []
# args['seed'] = seed
# for q_limit in q_limits:
# args['ques_limit'] = q_limit
# print("Que. limit ", q_limit, "started")
# itr, winner = sample_complexity(args)
# print("seed", seed, "itr", itr, "winner", winner)
# seed_vals.append(itr)
# greedy_itrs.append(seed_vals)
# ###
# print(greedy_itrs)
# print(sample_complexity(args))
greedy_itrs = [[283, 199, 167, 167, 167, 167, 167, 167, 167, 167, 167], [209, 160, 109, 93, 93, 93, 93, 93, 93, 93, 93], [216, 169, 112, 104, 104, 110, 104, 104, 104, 104, 104], [228, 124, 116, 116, 116, 116, 116, 116, 116, 116, 116], [479, 362, 363, 363, 362, 362, 363, 363, 363, 363, 363]]
dcb_itrs = [[499, 373, 343, 170, 180, 179, 180, 180, 180, 180, 180], [893, 714, 660, 546, 547, 468, 340, 79, 79, 79, 79], [672, 298, 231, 201, 207, 180, 166, 169, 169, 169, 169], [940, 432, 310, 310, 116, 175, 194, 198, 198, 198, 198], [481, 523, 357, 352, 446, 365, 346, 345, 345, 345, 345]]
dcb_mod_itrs = [[589, 385, 183, 157, 168, 168, 175, 179, 178, 175, 174], [711, 558, 553, 469, 427, 299, 291, 57, 117, 119, 118], [454, 349, 267, 214, 168, 168, 165, 153, 103, 72, 103], [648, 440, 302, 310, 117, 120, 181, 180, 198, 197, 28], [564, 364, 448, 361, 345, 447, 363, 56, 345, 345, 345]]
import numpy as np
import matplotlib.pyplot as plt
def convert(lst):
lst = [np.array(i) for i in lst]
lst = sum(lst)
lst = [i/5 for i in lst]
return lst
print(convert(greedy_itrs))
print(convert(dcb_itrs))
print(convert(dcb_mod_itrs))
import seaborn as sns
sns.set_theme()
plt.plot(q_limits, convert(greedy_itrs), label="Greedy (ours)")
# plt.plot(q_limits, convert(dcb_itrs), label="DCB")
plt.plot(q_limits, convert(dcb_mod_itrs), label="DCB Extended")
plt.xlabel("Num of questions asked")
plt.ylabel("Avg sample complexity")
plt.title("US election 2012 data (16 candidates)")
plt.legend()
plt.savefig("us_comp.png")
plt.show()
# greedy_itrs = [[257, 307, 377, 424, 297, 453], [252, 303, 377, 424, 297, 453], [251, 307, 377, 424, 297, 453], [254, 307, 377, 424, 297, 453], [254, 307, 377, 424, 297, 453], [254, 307, 377, 424, 297, 453]]
#
# print([sum(i)/6 for i in greedy_itrs])
#
# import matplotlib.pyplot as plt
#
# plt.plot(gammas, [sum(i)/6 for i in greedy_itrs])
# plt.xlabel("Gamma")
# plt.ylabel("Average sample complexity")
# plt.show()
# res = [[649, 496, 496, 496, 496, 496], [565, 496, 496, 496, 496, 496], [524, 747, 782, 526, 526, 526]]
#
# def suml(l1, l2):
# return [(l1[i] + l2[i]) for i in range(len(l1))]
#
# resf = suml(suml(res[0], res[1]), res[2])
# resf = [i/3 for i in resf]
#
# import matplotlib.pyplot as plt
#
# plt.plot(gammas, resf)
# plt.xlabel("Gamma Values")
# plt.ylabel("Sample complexity averaged over 3 seeds")
# plt.show()
|
{"hexsha": "40a0ac9ef1f0942324ba2dd8c8677bdc56cbdcb2", "size": 3525, "ext": "py", "lang": "Python", "max_stars_repo_path": "cswor/runner_us_data.py", "max_stars_repo_name": "satvikmashkaria/CS748_project", "max_stars_repo_head_hexsha": "57185b6a467e638d6db96cf1c7d3dbe8d6bf5032", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-16T18:39:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-16T18:39:51.000Z", "max_issues_repo_path": "cswor/runner_us_data.py", "max_issues_repo_name": "satvikmashkaria/CS748_project", "max_issues_repo_head_hexsha": "57185b6a467e638d6db96cf1c7d3dbe8d6bf5032", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cswor/runner_us_data.py", "max_forks_repo_name": "satvikmashkaria/CS748_project", "max_forks_repo_head_hexsha": "57185b6a467e638d6db96cf1c7d3dbe8d6bf5032", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.900990099, "max_line_length": 296, "alphanum_fraction": 0.6164539007, "include": true, "reason": "import numpy", "num_tokens": 1456}
|
from contextlib import contextmanager
from typing import Tuple
import numpy as np
import scipy.sparse as sp
@contextmanager
def random_state_context(seed: int):
state = np.random.get_state()
try:
np.random.seed(seed)
yield
finally:
np.random.set_state(state)
def with_cliques(
adjacency: sp.csr_matrix, clique_size: int, num_cliques: int = 1
) -> Tuple[sp.csr_matrix, np.ndarray]:
"""
Get adjacency matrix of dataset with cliques.
A clique is defined as a set of nodes where each node is a neighbor of every other
node.
Args:
adjacency: adjacency matrix to start with.
clique_size: size of each clique.
num_cliques: number of cliques to add.
Returns:
augmented_adjacency: adjacency with cliques added.
cliques: [num_cliques, clique_size] int32 array of indices of clique nodes.
"""
num_nodes = adjacency.shape[0]
adjacency = adjacency.tolil()
dtype = adjacency.dtype
rows = adjacency.rows
data = adjacency.data
cliques = np.empty((num_cliques, clique_size), dtype=np.int32)
for i in range(num_cliques):
clique = np.random.choice(num_nodes, clique_size, replace=False)
clique.sort()
cliques[i] = clique
for c in clique:
row = set(rows[c])
contains_c = c in row
row.update(clique)
if not contains_c:
row.remove(c)
rows[c] = sorted(row)
data[c] = np.ones((len(row),), dtype=dtype)
return adjacency.tocsr(), cliques
def with_attribute_anomolies(
node_attrs: sp.csr_matrix, num_candidates: int, num_anomolies: int = 1
) -> Tuple[sp.csr_matrix, np.ndarray]:
"""
Get attribute matrix with some rows replaced with others.
For each anomoly, we replace the attributes with those of the node with attributes
furthest away from the original w.r.t. Euclidean norm from `num_candidates`
candidates of the original.
Args:
node_attrs: [num_nodes, num_attrs] sparse attributes.
num_candidates: number of candidates per anomoly.
num_anomolies: number of anomolies to overwrite.
Returns:
augmented_node_attrs: node attributes with anomolous node attributes replaced.
mapping: [num_anomolies, 2] int32 array, where
`augmented_node_attrs[mapping[i, 1]] == node_attrs[mapping[i, 0]]`
"""
num_nodes = node_attrs.shape[0]
node_attrs_lil = node_attrs.tolil()
anomolies = np.random.choice(num_nodes, num_anomolies, replace=False)
anomolies.sort()
mapping = np.empty((num_anomolies, 2), dtype=np.int32)
for i, a in enumerate(anomolies):
candidates = np.random.choice(num_nodes, num_candidates, replace=False)
norms = np.linalg.norm(
node_attrs[a].todense() - node_attrs[candidates].todense(), axis=-1
)
max_norm = np.argmax(norms)
replacement = candidates[max_norm]
node_attrs_lil[a] = node_attrs[replacement]
mapping[i] = a, replacement
return node_attrs_lil.tocsr(), mapping
|
{"hexsha": "d92c3e021641a226137786a5342b693d51c519a6", "size": 3103, "ext": "py", "lang": "Python", "max_stars_repo_path": "gud/anomolies.py", "max_stars_repo_name": "jackd/gud", "max_stars_repo_head_hexsha": "7bd4befca53fa5a6b9d14a60e11d0898ade78267", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gud/anomolies.py", "max_issues_repo_name": "jackd/gud", "max_issues_repo_head_hexsha": "7bd4befca53fa5a6b9d14a60e11d0898ade78267", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gud/anomolies.py", "max_forks_repo_name": "jackd/gud", "max_forks_repo_head_hexsha": "7bd4befca53fa5a6b9d14a60e11d0898ade78267", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7282608696, "max_line_length": 86, "alphanum_fraction": 0.664840477, "include": true, "reason": "import numpy,import scipy", "num_tokens": 753}
|
# google imports
# standard library imports
import sys
import copy
import pickle
import os
from collections import Counter
from io import BytesIO
from zipfile import ZipFile
import copy
import pickle
from math import ceil
import importlib
import urllib.request
# math imports
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
sns.set()
# Jupyter Imports
from IPython.display import display
# from google.colab import files
# ML imports
# models
from sklearn.naive_bayes import CategoricalNB
from sklearn.tree import ExtraTreeClassifier, DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from xgboost import XGBClassifier
from sklearn.linear_model import RidgeCV, SGDRegressor
from sklearn.svm import LinearSVR
# preprocessing
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder, KBinsDiscretizer
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.model_selection import train_test_split
# sampling
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler, EditedNearestNeighbours, RepeatedEditedNearestNeighbours
from imblearn.combine import SMOTEENN, SMOTETomek
# metrics
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import plot_precision_recall_curve, plot_confusion_matrix, plot_roc_curve
from sklearn.metrics import f1_score, roc_auc_score, roc_curve, accuracy_score
# other
from imblearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
# custom imports
import feature_utils as feat_util
def response_boxplot(df, category, verbose=False):
print('\n'+category)
fig, axs = plt.subplots(1, 3, figsize=(20, 10))
qs = ['EFL_yes_no', 'skill_low_med_high', 'enjoy_high_med_low_none']
for i, f in enumerate(['R0_quiz_response', 'R1_quiz_response', 'R2_quiz_response', ]):
if verbose:
print(qs[i])
bp = df.boxplot(column=category, by=df[f].astype(
'category'), ax=axs[i])
bp.set_xlabel('')
for choice in range(df[f].min(), df[f].max()+1):
query = f"{f}=={choice}"
cat_df = df.query(query)[category]
num_chose = len(cat_df)
mean = cat_df.mean()
std = cat_df.std()
if verbose:
print(
f'{f} # chose {choice}: {num_chose} ({round(num_chose/len(df)*100)}%). Avg {mean}, std {std}.')
plt.suptitle(f'{category} Boxplot')
fig.show()
def group_by_func(df, func, title='', show=True):
r0_groups = {0: 'native', 1: 'nonnative'}
r1_groups = {0: 'not very good skill',
1: 'okay skill', 2: 'very good skill'}
r2_groups = {0: 'really enjoy', 1: 'enjoy', 2: 'okay', 3: 'not enjoy'}
def group_string(r0, r1, r2): return ', '.join(
[r0_groups[r0], r1_groups[r1], r2_groups[r2]])
result_dfs = [pd.DataFrame(index=r1_groups.values(), columns=r2_groups.values(
)), pd.DataFrame(index=r1_groups.values(), columns=r2_groups.values())]
if show:
print(f'{"-"*6} {title} {"-"*6}')
for r0 in [0, 1]:
subtitle = "Nonnatives" if r0 else "Natives"
if show:
print(f'\n{subtitle}:')
tdf0 = df.query(f"R0_quiz_response == {r0}")
for r1 in [0, 1, 2]:
tdf1 = tdf0.query(f"R1_quiz_response == {r1}")
for r2 in [0, 1, 2, 3]:
tdf2 = tdf1.query(f"R2_quiz_response == {r2}")
result_dfs[r0].loc[r1_groups[r1], r2_groups[r2]
] = func(df, tdf0, tdf1, tdf2)
if show:
display(result_dfs[r0])
return result_dfs
def standard_group_by_func(fulldf, per_category_stats_list=None):
per_category_stats_list = None or ['sess_count_clicks',
'sess_count_hovers',
'sess_meaningful_action_count',
'sess_EventCount',
'sess_count_notebook_uses',
'sess_avg_time_between_clicks',
'sess_first_enc_words_read',
'sess_first_enc_boxes_read',
'sess_num_enc',
'sess_first_enc_duration',
'sess_first_enc_avg_wps',
'sess_first_enc_var_wps',
'sess_first_enc_avg_tbps',
'sess_first_enc_var_tbps',
'sess_start_obj',
'sess_end_obj',
'start_level',
'max_level',
'sessDuration']
dfs_list = []
title_list = []
def df_func(df, tdf0, tdf1, tdf2): return len(tdf2)
title = 'count'
dfs = group_by_func(fulldf, df_func, title)
dfs_list.append(dfs)
title_list.append(title)
def df_func(df, tdf0, tdf1, tdf2): return round(len(tdf2)/len(df)*100, 2)
title = 'percent total pop'
dfs = group_by_func(fulldf, df_func, title)
dfs_list.append(dfs)
title_list.append(title)
def df_func(df, tdf0, tdf1, tdf2): return round(len(tdf2)/len(tdf0)*100, 2)
title = 'percent native class pop'
dfs = group_by_func(fulldf, df_func, title)
dfs_list.append(dfs)
title_list.append(title)
for category in per_category_stats_list:
df_func = get_avg_std_df_func(category)
title = f'(avg, std) {category}'
dfs = group_by_func(fulldf, df_func, title)
dfs_list.append(dfs)
title_list.append(title)
return title_list, dfs_list
def get_avg_std_df_func(category_name):
def inner(df, tdf0, tdf1, tdf2):
mean = tdf2[category_name].mean()
std = tdf2[category_name].std()
if not pd.isna(mean):
mean = round(mean, 2)
if not pd.isna(std):
std = round(std, 2)
return (mean, std)
return inner
def html_stats(df):
html_strs = ['<div class="container">', '<h3>{Stats}</h3>']
qs = ['EFL_yes_no', 'skill_low_med_high', 'enjoy_high_med_low_none']
html_strs.append(f'<p> Total pop {len(df)} </p>')
for i, f in enumerate(['R0_quiz_response', 'R1_quiz_response', 'R2_quiz_response', ]):
html_strs.append(f'<p> {qs[i]}</p>')
for choice in range(df[f].min(), df[f].max()+1):
query = f"{f}=={choice}"
cat_df = df.query(query)
num_chose = len(cat_df)
html_strs.append(
f'<p>{f} # chose {choice}: {num_chose} ({round(num_chose/len(df)*100)}%).</p>')
return '\n'.join(html_strs+['</div>'])
def full_html(base_df, title_list, dfs_list, suptitle=None):
HEADER = '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
<style>
.flex-container {
display: flex;
flex-wrap: wrap;
}
.container {
border: thick solid black;
padding: 10px;
margin: 5px;
}
.container table:nth-of-type(2) td {
background-color: rgb(161, 161, 230);
}
.container table:nth-of-type(2) th {
background-color: rgb(20, 20, 194);
color: white;
}
.container table:nth-of-type(2n-1) td {
background-color: rgb(235, 158, 158);
}
.container table:nth-of-type(2n-1) th {
background-color: rgb(160, 11, 11);
color: white;
}
.break {
flex-basis: 100%;
height: 0;
}
</style>
<div class="flex-container">'''
FOOTER = ''' </div>
</body>
</html>'''
def table_header(title): return f''' <div class="container">
<h3>{title}</h3>'''
table_footer = ''' </div>'''
def table_html(title, dfs): return '\n'.join([table_header(
title), "<p>Natives:</p>", dfs[0].to_html(), "<p>Nonnatives:</p>", dfs[1].to_html(), table_footer])
if suptitle is not None:
suptitle = f'<h2>{suptitle}</h2>\n<div class="break"></div> <!-- break -->'
else:
suptitle = ''
return '\n'.join([HEADER, suptitle, html_stats(base_df)] +
[table_html(t, dfs) for t, dfs in zip(title_list, dfs_list)] +
[FOOTER])
def download_full_html(base_df, title_list, dfs_list, filename, suptitle=None):
with open(filename, 'w+') as f:
f.write(full_html(base_df, title_list, dfs_list, suptitle=suptitle))
print("Wrote to", filename)
files.download(filename)
onext_int_feats = [f'obj{i}_onext_int' for i in range(80)]
onext_int_cats = [["nan", 1],
["nan", 11],
["nan", 12, 86, 111, 125],
["nan", 13, 14, 113, 116, 118],
["nan", 14, 15, 113, 114, 116, 118],
["nan", 13, 15, 113, 114, 116, 118],
["nan", 16, 86, 115, 118, 132, 161],
["nan", 17, 86, 115, 118, 128, 161],
["nan", 18, 86, 115, 118, 161],
["nan", 19, 86, 117, 118, 127, 133, 134, 161],
["nan", 20, 133, 134, 136],
["nan", 2, 80, 81, 82, 83],
["nan", 21, 86, 117, 127, 136, 137, 161],
["nan", 22, 137, 141],
["nan", 23, 24, 86, 117, 127, 136, 161],
["nan", 23, 24, 117, 127, 136, 161],
["nan", 25, 86, 117, 118, 127, 136, 140, 147, 151, 161],
["nan", 26, 142, 145],
["nan", 27, 143],
["nan", 28, 86, 117, 118, 136, 140, 150, 161],
["nan", 29, 119, 130],
["nan", 29, 30, 35, 86, 117, 118, 126, 136, 140, 149],
["nan", 3, 80, 82, 83, 86, 87, 88, 93],
["nan", 31, 38],
["nan", 32, 153],
["nan", 33, 154],
["nan", 34, 155],
["nan", 35, 156],
["nan", 36, 157],
["nan", 37, 158],
["nan", 30],
["nan", 39, 163],
["nan", 40, 160],
["nan", 3],
["nan", 41, 164, 166],
["nan", 42, 166],
["nan", 30],
["nan", 44, 85, 125],
["nan", 29, 45, 47, 84, 118, 125, 136, 140, 149, 168, 169, 184],
["nan", 45, 46, 169, 170],
["nan", 29, 45, 47, 92, 118, 136, 140, 149, 169, 184],
["nan", 29, 45, 48, 92, 118, 140, 149, 168, 184],
["nan", 46, 49, 168],
["nan", 46, 50, 168, 170],
["nan", 5, 80, 82, 83, 86, 89, 91, 95, 97, 125],
["nan", 29, 51, 92, 118, 136, 140, 149, 168, 184],
["nan", 52, 92, 118, 136, 149, 171, 184],
["nan", 53, 54, 92, 118, 136, 140, 149, 184],
["nan", 53, 54, 55, 59, 60, 90, 92, 94,
118, 136, 140, 149, 168, 184],
["nan", 53, 55, 59, 60, 90, 92, 94, 118, 136, 140, 149, 184],
["nan", 55, 56, 59, 60, 149, 174],
["nan", 57, 59, 60, 174],
["nan", 58, 59, 60, 136, 172, 174, 184],
["nan", 29, 59, 60, 61, 92, 118, 136, 149, 168, 172, 184],
["nan", 55, 56, 57, 58, 60, 61, 140, 172, 174, 184],
["nan", 6, 80, 82, 83, 86, 98, 100, 125],
["nan", 55, 56, 57, 58, 59, 61, 92, 118,
136, 140, 149, 172, 174, 184],
["nan", 59, 62, 136, 140, 149, 172, 173, 175, 184],
["nan", 63, 64, 176],
["nan", 64, 66, 149, 175, 184],
["nan", 29, 65, 66, 92, 118, 136, 140, 172, 175, 177, 184],
["nan", 66, 67, 68, 92, 118, 136, 140, 146, 175, 177, 184],
["nan", 67, 144],
["nan", 29, 64, 65, 68, 92, 118, 131, 136,
140, 148, 149, 172, 175, 177, 184],
["nan", 92, 118, 122, 123, 124, 131, 136, 140,
146, 148, 168, 172, 175, 177, 184],
["nan", 70],
["nan", 7],
["nan", 71, 178],
["nan", 72, 179],
["nan", 73, 180],
["nan", 74, 181],
["nan", 75, 182],
["nan", 69],
["nan", 77, 78, 185],
["nan", 78, 185],
["nan", 79],
[0],
["nan", 8],
["nan", 9, 103],
["nan", 104, 105, 108]]
QA_1_feats = [f'Q{i}_A1' for i in range(19)]
QA_1_cats = [['0', 'A', 'B', 'C', 'D'],
['0', 'A', 'B', 'C', 'D'],
['0', 'A', 'B', 'C', 'D'],
['0', 'A', 'B', 'C', 'D'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', '?', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q'],
['0', 'A', 'B', 'C', 'D', 'F', 'G', 'I', 'M', 'N', 'O', 'P', 'Q',
'R', 'S', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f'],
['0', 'Q', 'V', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f'],
['0', '?', 'X', 'Y', 'Z', 'b', 'c', 'd', 'e', 'f'],
['0', 'X', 'Y', 'b', 'c', 'd', 'e', 'f'],
['0', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f']]
def get_preprocessor(df, scaler=StandardScaler(), imputer=SimpleImputer(strategy='constant'), bool_dtype='int64'):
"""
By default has a number of steps:
1. drops columns from use in preprocessor if present:
- [f'Q{q}_answers' for q in range(19)]
- ["play_year", "play_month", "play_day", "play_hour", "play_minute", "play_second"]
- ["_continue", "continue", "save_code", "music", "hq", "fullscreen", "persistentSessionID"]
2. Creates a preprocessor for all non-y columns and non-boolean columns with the following steps:
a. Standard Scaler (0 mean, 1 std)
b. Simple Imputer(strategy='constant') (fill NaN with 0)
3. Fits the preprocessor to the given X
4. returns the unfitted preprocessor (sklearn pipeline), and the unprocessed X dataframe
:param df: jowilder dataframe
:param scaler: sklearn compatible scaler
:param imputer: sklearn compatible imputer
:return: the unfitted preprocessor (sklearn pipeline), and the unprocessed X dataframe
"""
df = df.drop(
[f'Q{q}_answers' for q in range(19)] + ["play_year", "play_month", "play_day", "play_hour", "play_minute",
"play_second",
"_continue", "continue", "save_code", "music", "hq", "fullscreen",
"persistentSessionID", ], axis=1, errors='ignore').copy()
y_cols, bool_cols, num_cols = separate_columns(df, bool_dtype=bool_dtype)
X = df.loc[:, num_cols+bool_cols]
# too complicated to allow for pipeline order
# pipeline_strings = [pipeline_order[i:i+2] for i in range(0,len(pipeline_order),2)]
# transformers = []
# num_sa, num_sc, num_im = 0,0,0
# for s in pipeline_strings:
# if s == 'Sa':
# transformer = make_pipeline(sampler)
# cols = num_cols + bool_cols
# name = f'{s}{num_sa}'
# num_sa += 1
# elif s == 'Sc':
# transformer = scaler
# name = f'{s}{num_sc}'
# cols = num_cols
# num_sc += 1
# elif s == 'Im':
# transformer = imputer
# name = f'{s}{num_im}'
# cols = num_cols
# num_im += 1
# else:
# raise ValueError("Pipeline substrings must be Sa Sc or Im")
# transformers.append((name, transformer, cols))
def col_str_to_int(col_strs): return [
X.columns.get_loc(s) for s in col_strs]
column_transformer = ColumnTransformer(
transformers=[
('num', make_pipeline(scaler, imputer), col_str_to_int(num_cols)),
('bool', 'passthrough', col_str_to_int(bool_cols))
],
remainder='drop')
return column_transformer, X
def get_ys(df):
"""
:rtype: dictionary of y columns (df series). keys: y0,y1,y2,y1_bin,y2_bin,y1_bin_x,y2_bin_x
"""
ys = {}
for key, y_col in [
('y0', 'R0_quiz_response'),
('y1', 'R1_quiz_response'),
('y2', 'R2_quiz_response'),
('y1_bin', 'R1_quiz_response_bin'),
('y1_bin_0v12', 'R1_quiz_response_0v12'),
('y1_bin_01v2', 'R1_quiz_response_01v2'),
('y1_bin_x', 'R1_quiz_response_bin_x'),
('y2_bin', 'R2_quiz_response_bin'),
('y2_bin_x', 'R2_quiz_response_bin_x'),
('y2_bin_0v123', 'R2_quiz_response_bin0v123'),
('y2_bin_01v23', 'R2_quiz_response_bin01v23'),
('y2_bin_012v3', 'R2_quiz_response_bin012v3'),
]:
if y_col in df.columns:
ys[key] = df.loc[:, y_col].astype('category').copy()
return ys
def separate_columns(df, bool_dtype='int64', expect_bool_cols = True) -> (list, list, list):
"""
:param df:
:param bool_dtype: Defaults to 'int64'. Should be int64 if coming from import csv otherwise could be 'uint8'
if coming from the pd dummies.
:return: tuple of lists of column names for y_columns, bool_columns, and integer_columns
"""
y_cols = [col for col in df.columns if 'quiz_response' in col]
bool_cols = [col for col in df.select_dtypes(include=[bool_dtype])
if np.isin(df[col].dropna().unique(), [0, 1]).all() and
col not in y_cols]
num_cols = [
col for col in df.columns if col not in bool_cols and col not in y_cols]
if not bool_cols and expect_bool_cols:
print('Warning! No bool columns. Consider changing bool_dtype="int_64" to "uint8"')
return y_cols, bool_cols, num_cols
end_obj_to_last_Q = {
9: 0,
10: 3,
11: 3,
12: 3,
13: 3,
14: 3,
15: 3,
16: 3,
17: 3,
18: 3,
19: 3,
20: 3,
21: 3,
22: 3,
23: 3,
24: 3,
25: 3,
26: 3,
27: 3,
28: 3,
29: 3,
30: 3,
31: 3,
32: 4,
33: 5,
34: 6,
35: 7,
36: 8,
37: 9,
38: 9,
39: 10,
40: 11,
41: 12,
42: 13,
43: 13,
44: 13,
45: 13,
46: 13,
47: 13,
48: 13,
49: 13,
50: 13,
51: 13,
52: 13,
53: 13,
54: 13,
55: 13,
56: 13,
57: 13,
58: 13,
59: 13,
60: 13,
61: 13,
62: 13,
63: 13,
64: 13,
65: 13,
66: 13,
67: 13,
68: 13,
69: 13,
70: 13,
71: 14,
72: 15,
73: 16,
74: 17,
75: 18,
76: 18,
77: 18,
78: 18,
79: 18,
}
end_obj_to_last_lvl = {
0: 0,
1: 0,
2: 0,
3: 1,
4: 2,
5: 2,
6: 3,
7: 3,
8: 4,
9: 4,
10: 4,
11: 4,
12: 5,
13: 6,
14: 6,
15: 6,
16: 6,
17: 6,
18: 6,
19: 7,
20: 7,
21: 8,
22: 8,
23: 9,
24: 9,
25: 9,
26: 10,
27: 10,
28: 11,
29: 11,
30: 12,
31: 12,
32: 12,
33: 12,
34: 12,
35: 12,
36: 12,
37: 12,
38: 12,
39: 12,
40: 12,
41: 12,
42: 12,
43: 13,
44: 13,
45: 14,
46: 15,
47: 15,
48: 16,
49: 16,
50: 17,
51: 17,
52: 18,
53: 18,
54: 18,
55: 18,
56: 18,
57: 18,
58: 18,
59: 18,
60: 18,
61: 18,
62: 19,
63: 19,
64: 19,
65: 20,
66: 20,
67: 21,
68: 21,
69: 22,
70: 22,
71: 22,
72: 22,
73: 22,
74: 22,
75: 22,
76: 22,
77: 23,
78: 23,
79: 23,
}
class GridSearcher():
def __init__(self, csv_fpath=None, df=None, preprocessor=None, fillna=0, meta=[], expect_bool_cols=True):
# either give csv_fpath or df.
assert csv_fpath or not df.empty
print(f'Loading from {csv_fpath}...')
# load df
if df is None:
print(f'Loading from {csv_fpath}...')
self.df, self.meta = feat_util.open_csv_from_path_with_meta(
csv_fpath, index_col=0)
else:
self.df, self.meta = df, meta
# set X and ys, and preprocessor
if not preprocessor:
self.preprocessor, self.X = get_preprocessor(self.df)
self.X = self.X.fillna(fillna)
else:
_, bool_cols, num_cols = separate_columns(self.df, expect_bool_cols=expect_bool_cols)
self.X = df[bool_cols+num_cols]
self.preprocessor = preprocessor
self.ys = get_ys(self.df)
# set object vars
self.model_dict = {}
self.cur_model = None
def split_data(self):
nonnull_X, nonnull_y = feat_util.remove_nan_labels(self.X, self.y)
X_train, X_test, y_train, y_test = train_test_split(
nonnull_X, nonnull_y, test_size=0.2, random_state=1)
self.X_train, self.X_test, self.y_train, self.y_test = X_train, X_test, y_train, y_test
def set_y(self, y_key=None, other_col=None):
if y_key:
print(f'Switching to {y_key}...')
self.y = self.ys[y_key]
elif other_col:
self.y = self.X[other_col]
self.X = self.X.drop(other_col, axis=1)
else:
print("Did not change y. Invalid inputs.")
self.split_data()
def run_fit(self, classifier, sampler=None, verbose=False, preprocess_twice=True, sampler_index=None, full_pipeline=False):
# fit self.cur_model as a pipeline of the given preprocessor, sampler, preprocessor, classifer
# if preprocess_twice is false, self.cur_model is sampler, preprocessor, classifier
# if full_pipeline and sampler index, self.cur_model is the classifier
# (must be a pipeline containing a sampler or a placeholder (None) for the sampler)
if full_pipeline:
assert sampler_index is not None
clf = classifier
elif preprocess_twice:
clf = make_pipeline(self.preprocessor, sampler,
copy.deepcopy(self.preprocessor), classifier)
sampler_index = 1
else:
clf = make_pipeline(sampler, self.preprocessor, classifier)
sampler_index = 0
self._sampling_pipeline = clf[:sampler_index+1]
self._classifying_pipeline = clf[sampler_index+1:]
if clf[sampler_index] is not None:
self.X_train_sampled, self.y_train_sampled = self._sampling_pipeline.fit_resample(
self.X_train, self.y_train)
else:
self.X_train_sampled, self.y_train_sampled = self.X_train, self.y_train
clf = self._classifying_pipeline
# model_name = f'{sampler} {classifier}'
# if verbose:
# print(f'Running {model_name}.')
self._classifying_pipeline.fit(
self.X_train_sampled, self.y_train_sampled)
self.cur_model = clf
# if verbose:
# print("model trained to: %.3f" %
# clf.score(self.X_train, self.y_train))
# print("model score: %.3f" % clf.score(self.X_test, self.y_test))
return clf
def metrics(self, graph_dir=None, graph_prefix=None, binary_classification=True):
# return list of (metric: float, metric_name: str) tuples of metrics of given classifier (default: self.cur_model)
# can only do metrics for binary classification as of right now
assert binary_classification
metric_list = []
clf = self.cur_model
# label metrics
if graph_prefix:
for flipped_labels in [False, True]:
flipped_labels_suffix = '' if not flipped_labels else '_flipped'
fig, axes = plt.subplots(3, 3, figsize=(20, 20))
for i, (yarray, Xarray, label) in enumerate([(self.y_test, self.X_test, 'test'),
(self.y_train_sampled,
self.X_train_sampled, 'train'),
(self.y_train,
self.X_train, 'train_raw'),
]):
for j, (graph_type, func) in enumerate([
('', plot_confusion_matrix),
('_PR', plot_precision_recall_curve),
('_ROC', plot_roc_curve),
]):
ax = axes[j, i]
graph_yarray = yarray.astype(bool)
if flipped_labels:
graph_yarray = ~graph_yarray
disp = func(clf, Xarray, graph_yarray, ax=ax)
title = f'{label}{graph_type}{flipped_labels_suffix}'
ax.set_title(title)
if graph_type in ['_PR', '_ROC']:
ax.set_xlim(-0.05, 1.05)
ax.set_ylim(-0.05, 1.05)
ax.set_aspect('equal', adjustable='box')
suptitle = f'{graph_prefix}{flipped_labels_suffix}'
plt.suptitle(suptitle)
savepath = os.path.join(graph_dir, f'{suptitle}.png')
fig.savefig(savepath, dpi=100)
plt.close()
for i, (yarray, Xarray, label) in enumerate([(self.y_test, self.X_test, 'test'),
(self.y_train_sampled,
self.X_train_sampled, 'train'),
(self.y_train,
self.X_train, 'train_raw'),
]):
y_pred = clf.predict(Xarray)
y_prob = clf.predict_proba(Xarray)[:, 1]
y_true = yarray
X_shape = Xarray.shape
metric_list.extend(feat_util.binary_metric_list(
y_true=y_true, y_pred=y_pred, y_prob=y_prob, X_shape=X_shape,
label_prefix=f'{label}_'
))
return metric_list
def model_stats(self, classifier=None, graph=True):
# counter, auc, and optional graph of given classifer (default: self.cur_model)
classifier = classifier or self.cur_model
y_prob = classifier.predict_proba(self.X_test)[:, 1]
print(f"dimension y_prob: {y_prob.shape}")
print(f"dimension y_test: {self.y_test.shape}")
print(f'Predicts:', Counter(list(classifier.predict(self.X_test))))
print(f'True Labels:', Counter(self.y_test))
if graph:
fpr, tpr, thres = roc_curve(self.y_test, y_prob)
plt.plot(fpr, tpr, color='green')
plt.plot([0, 1], [0, 1], color='red', linestyle='--')
plt.show()
roc_auc = roc_auc_score(self.y_test, y_prob)
print(f"ROC-AUC Score: {roc_auc}")
def classification_report(self):
# classification report on current model
y_true = self.y_test
y_pred = self.cur_model.predict(self.X_test)
print(classification_report(y_true, y_pred))
class JWWindowSelector:
ycols = ['R0_quiz_response','R1_quiz_response','R2_quiz_response','R1_quiz_response_bin',
'R1_quiz_response_0v12','R1_quiz_response_01v2','R1_quiz_response_bin_x',
'R2_quiz_response_bin','R2_quiz_response_bin_x','R2_quiz_response_bin0v123',
'R2_quiz_response_bin01v23','R2_quiz_response_bin012v3']
INTERACTION = 0
LEVEL = 1
QUIZ = 2
OBJECTIVE = 3
def __init__(self, csv_fpath=None, df=None, meta=None):
assert csv_fpath is not None or df is not None
# load df
if df is None:
print(f'Loading from {csv_fpath}...')
self.df, self.meta = feat_util.open_csv_from_path_with_meta(
csv_fpath, index_col=0)
else:
self.df = df
self.meta = meta or []
self.df_cols = list(df.columns)
@staticmethod
def get_abbrev(window_type):
if window_type == JWWindowSelector.INTERACTION:
return 'int'
if window_type == JWWindowSelector.LEVEL:
return 'lvl'
if window_type == JWWindowSelector.QUIZ:
return 'q'
if window_type == JWWindowSelector.OBJECTIVE:
return 'obj'
@staticmethod
def get_prefix(n, window_type):
if window_type == JWWindowSelector.INTERACTION:
return f'int{n}_i'
if window_type == JWWindowSelector.LEVEL:
return f'lvl{n}_'
if window_type == JWWindowSelector.QUIZ:
return f'Q{n}_'
if window_type == JWWindowSelector.OBJECTIVE:
return f'obj{n}_o'
@staticmethod
def get_window_range(window_type, skip_Q23=False):
if window_type == JWWindowSelector.INTERACTION:
return range(189)
if window_type == JWWindowSelector.LEVEL:
return range(24)
if window_type == JWWindowSelector.QUIZ:
if not skip_Q23:
return range(19)
else:
return [0,1,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18]
if window_type == JWWindowSelector.OBJECTIVE:
return range(80)
def cols_startwith(self, prefix):
return [c for c in self.df_cols if c.startswith(prefix)]
def get_feats(self, n, window_type):
prefix = self.get_prefix(n, window_type)
feats = self.cols_startwith(prefix)
return feats
def get_filter_queries(self, n, window_type, max_seconds_per_word=2):
prefix = JWWindowSelector.get_prefix(n, window_type)
queries = [f"R1_quiz_response == R1_quiz_response"]
if window_type in [JWWindowSelector.INTERACTION, JWWindowSelector.LEVEL]:
queries.extend([
f"{prefix}first_enc_duration == {prefix}first_enc_duration",
f"{prefix}first_enc_duration > 0",
])
if window_type == JWWindowSelector.QUIZ:
queries.extend([
f'{prefix}A1_nan!=1'
])
elif window_type == JWWindowSelector.INTERACTION:
num_words = self.df[f"int{n}_ifirst_enc_words_read"].max()
queries.extend([
f"{prefix}first_enc_words_read == {num_words}",
f"{prefix}time_to > 0",
f"{prefix}first_enc_duration < {prefix}first_enc_words_read*{max_seconds_per_word}",
])
elif window_type == JWWindowSelector.OBJECTIVE:
if n < 79:
queries.append(f'obj{n}_onext_int_nan==0')
queries.append(f"obj{n}_otime_to_next_obj < 600")
queries.append(f"obj{n}_otime_to_next_obj > 0 ")
elif window_type == JWWindowSelector.LEVEL:
queries.append(f"{prefix}time_in_level < 1200")
queries.append(f"{prefix}time_in_level > 0")
queries.extend([f"R{i}_quiz_response == R{i}_quiz_response" for i in [0,1,2]])
return queries
def get_base_meta(self):
return self.meta
@staticmethod
def join_XY(X,Y):
return X.join(Y)
def get_X_Y_meta(self, n, window_type, max_seconds_per_word=2,nbins=0, drop_first_next_int_col = True):
meta = []
prefix = JWWindowSelector.get_prefix(n, window_type)
Xfeats = self.get_feats(n, window_type)
meta.append(f'Using feats: {Xfeats}')
if window_type==JWWindowSelector.INTERACTION:
total_words = self.df[f"int{n}_ifirst_enc_words_read"].max()
if total_words is np.nan:
return None, None, meta
elif total_words < 10:
print('Total words < 10!')
queries = self.get_filter_queries(n, window_type, max_seconds_per_word=max_seconds_per_word)
filtered_df, filtered_df_meta = feat_util.filter_df(self.df[Xfeats+JWWindowSelector.ycols], query_list=queries, verbose=True, fillna=None)
meta.extend(filtered_df_meta)
X = filtered_df[Xfeats].fillna(0).copy()
meta.append(f'Filled X with 0')
Y = filtered_df[JWWindowSelector.ycols].copy()
drop_cols = []
if window_type in [JWWindowSelector.INTERACTION, JWWindowSelector.LEVEL]:
drop_cols = [
f"{prefix}first_enc_boxes_read",
f"{prefix}first_enc_words_read",
]
if window_type==JWWindowSelector.INTERACTION:
drop_cols.extend([
f"{prefix}time_to",
f"{prefix}total_duration"
])
if window_type==JWWindowSelector.OBJECTIVE:
drop_cols.append(f"{prefix}next_int_nan")
# if window_type==JWWindowSelector.QUIZ:
# drop_cols.append(f"{prefix}answers")
X = X.drop(columns=drop_cols)
meta.append(f"Dropped drop_cols: {drop_cols}")
constant_cols = X.columns[X.nunique()==1]
X = X.drop(columns=constant_cols)
meta.append(f'Dropped constant_cols: {constant_cols}')
if not len(X):
return None, None, meta
if window_type == JWWindowSelector.OBJECTIVE and drop_first_next_int_col:
next_int_cols = [c for c in X.columns if 'next_int' in c]
if next_int_cols:
X = X.drop(columns=next_int_cols[0])
meta.append(f'Dropped onehot column {next_int_cols[0]} from {next_int_cols}')
## does not bin by default
if nbins:
est = KBinsDiscretizer(n_bins=nbins, encode='onehot-dense', strategy='quantile')
bin_feats = [f'{prefix}first_enc_avg_tbps',
f'{prefix}first_enc_avg_wps',
# f'{prefix}first_enc_duration',
f'{prefix}first_enc_var_tbps',
f'{prefix}first_enc_var_wps']
bin_feats = [c for c in bin_feats if c in X.columns]
if bin_feats:
Xt = est.fit_transform(X[bin_feats])
new_feat_names = [f'{feat}>{x:.2f}' for bins,feat in zip(est.bin_edges_,bin_feats) for x in list(bins)[:-1]]
Xt_df = pd.DataFrame(Xt, index=X.index, columns=new_feat_names)
X = X.join(Xt_df)
X = X.drop(columns=bin_feats)
meta.append(f'Quantized n_bins={nbins} feats {bin_feats} to {new_feat_names}')
return (X, Y, meta)
def get_X_Y_meta_range(self, ns, window_type, max_seconds_per_word=2,nbins=0, drop_first_next_int_col = True, verbose=True):
X, Y, meta = None, None, []
for n in ns:
tX, tY, tmeta = self.get_X_Y_meta(n, window_type, max_seconds_per_word=max_seconds_per_word, nbins=nbins, drop_first_next_int_col=drop_first_next_int_col)
X, Y, meta = JWWindowSelector.join_X_Y_meta(X, Y, meta, tX, tY, tmeta, copy=False)
print('Join Size:', X.shape)
X, Y = X.copy(), Y.copy()
return X, Y, meta
@staticmethod
def join_X_Y_meta(X1, Y1, meta1, X2, Y2, meta2, copy=True):
meta = meta1+meta2
if X1 is None:
X = X2
Y = Y2
elif X2 is None:
X = X1
Y = Y1
else:
X = X1.join(X2, how='inner')
Y = Y1.loc[X.index, :]
meta = meta1+['--Inner Join--']+meta2+[f'Resultant Join Shape: {X.shape}']
if copy and X is not None:
X, Y = X.copy(), Y.copy()
return X, Y, meta
|
{"hexsha": "a41e0939fa40849de09879bb18a2ace11e3c95ba", "size": 37272, "ext": "py", "lang": "Python", "max_stars_repo_path": "jowilder_utils.py", "max_stars_repo_name": "fielddaylab/OGDUtils", "max_stars_repo_head_hexsha": "7f8252cafb0783f706db5adebf167d2edf069d00", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "jowilder_utils.py", "max_issues_repo_name": "fielddaylab/OGDUtils", "max_issues_repo_head_hexsha": "7f8252cafb0783f706db5adebf167d2edf069d00", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "jowilder_utils.py", "max_forks_repo_name": "fielddaylab/OGDUtils", "max_forks_repo_head_hexsha": "7f8252cafb0783f706db5adebf167d2edf069d00", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.7573964497, "max_line_length": 166, "alphanum_fraction": 0.5210345568, "include": true, "reason": "import numpy,from scipy", "num_tokens": 10745}
|
"""Read pkl files created by process_*_catalog.py and *_ML.py and make plots of chromatic biases
as functions of redshift, both before and after photometric corrections are estimated. Run
`python plot_bias.py --help` for a list of command line options.
"""
import cPickle
from argparse import ArgumentParser
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.cm
cmap = matplotlib.cm.get_cmap('jet')
fontsize = 8
# Some annotation arrow properties
arrowdict = dict(facecolor='black', shrink=0.1, width=1.5, headwidth=4, frac=0.2)
# hardcode some requirements, order is [DES, LSST]
r2sqr_gal = np.r_[0.4, 0.3]**2
r2sqr_PSF = np.r_[0.8, 0.7]**2
mean_m_req = np.r_[0.008, 0.003]
# C variance sufficient
var_c_sufficient = 1.8e-7 * np.r_[(5000/18000.)**(-0.5) * (12./30)**(-0.5) * (0.68/0.82)**(-0.6),
1.0]
mean_DeltaRbarSqr_req = mean_m_req / 2.0
var_DeltaRbarSqr_sufficient = var_c_sufficient / 1.0**2
mean_DeltaV_req = r2sqr_gal * mean_m_req
var_DeltaV_sufficient = var_c_sufficient * 4 * r2sqr_gal**2 * 0.5
# last factor of 0.5 needed to account for possible rotation of DeltaV from being purely real.
# see appendix of Meyers+Burchat15.
mean_dS_m02_req = mean_m_req * r2sqr_gal / r2sqr_PSF
epsf = 0.05
var_dS_m02_sufficient = var_c_sufficient / (epsf / 2.0 * r2sqr_PSF / r2sqr_gal)**2 * 0.5
# last factor of 0.5 needed to account for possible rotation of DeltaV from being purely real.
# see appendix of Meyers+Burchat15.
m_Euclid = 0.001
r2gal_Euclid = 0.23**2 # where did I get this from?
r2psf_Euclid = 0.2**2
mean_dS_p06_req = m_Euclid * r2gal_Euclid / r2psf_Euclid
mean_dS_p10_req = m_Euclid * r2gal_Euclid / r2psf_Euclid
print
print
print "DES reqs"
print "<m>: {}".format(mean_m_req[0])
print "<dRbarSqr>: {}".format(mean_DeltaRbarSqr_req[0])
print "<dV>: {}".format(mean_DeltaV_req[0])
print "<dS>: {}".format(mean_dS_m02_req[0])
print "var(c): {}".format(var_c_sufficient[0])
print "var(dRbarSqr): {}".format(var_DeltaRbarSqr_sufficient[0])
print "var(dV): {}".format(var_DeltaV_sufficient[0])
print "var(dS): {}".format(var_dS_m02_sufficient[0])
print
print "LSST reqs"
print "<m>: {}".format(mean_m_req[1])
print "<dRbarSqr>: {}".format(mean_DeltaRbarSqr_req[1])
print "<dV>: {}".format(mean_DeltaV_req[1])
print "<dS>: {}".format(mean_dS_m02_req[1])
print "var(c): {}".format(var_c_sufficient[1])
print "var(dRbarSqr): {}".format(var_DeltaRbarSqr_sufficient[1])
print "var(dV): {}".format(var_DeltaV_sufficient[1])
print "var(dS): {}".format(var_dS_m02_sufficient[1])
print
def hist_with_peak(x, bins=None, range=None, ax=None, orientation='vertical',
histtype=None, log=False, **kwargs):
"""Plot a histogram normalized to unit peak.
"""
if ax is None:
ax = plt.gca()
if log:
x = np.log(x)
range = [np.log(r) for r in range]
hist, bin_edges = np.histogram(x, bins=bins, range=range)
if log:
bin_edges = [np.exp(b) for b in bin_edges]
width = bin_edges
else:
width = bin_edges[1] - bin_edges[0]
hist_n = hist * 1.0/hist.max()
x = np.ravel(zip(bin_edges[:-1], bin_edges[1:]))
y = np.ravel(zip(hist_n, hist_n))
x = np.concatenate([[x[0]],x])
y = np.concatenate([[0],y])
if histtype == 'step':
if orientation == 'vertical':
ax.plot(x, y, **kwargs)
elif orientation == 'horizontal':
ax.plot(y, x, **kwargs)
else:
raise ValueError
elif histtype == 'stepfilled':
if orientation == 'vertical':
ax.fill(x, y, **kwargs)
elif orientation == 'horizontal':
ax.fill(y, x, **kwargs)
else:
raise ValueError
else:
raise ValueError
def set_range(x):
""" Return a plotting range 30% larger than the interval containing 99% of the data.
"""
xs = sorted(x)
n = len(xs)
low = xs[int(0.005*n)]
high = xs[int(0.995*n)]
span = high-low
return [low - 0.3*span, high + 0.3*span]
def panel_getaxes(fig, grid):
inner_grid = gridspec.GridSpecFromSubplotSpec(100, 100, subplot_spec=grid,
wspace=0.0, hspace=0.0)
var_ax = plt.Subplot(fig, inner_grid[:19, 11:])
fig.add_subplot(var_ax)
scatter_ax = plt.Subplot(fig, inner_grid[19:, 11:])
fig.add_subplot(scatter_ax)
hist_ax = plt.Subplot(fig, inner_grid[19:, :11])
fig.add_subplot(hist_ax)
cbar_ax = plt.Subplot(fig, inner_grid[55:95, 80:83])
fig.add_subplot(cbar_ax)
return var_ax, scatter_ax, hist_ax, cbar_ax
def setup_scatter_panel(ax, xlim, ylim, log=False):
# Setup scatter plot limits and text properties
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if log:
ax.set_yscale('log')
ax.set_xlabel("redshift", fontsize=fontsize)
# clean up tick labels
ax.yaxis.set_ticklabels([])
ax.fill_between(xlim, [ylim[0]]*2, [ylim[1]]*2, color='#BBBBBB', zorder=1)
for label in ax.get_xticklabels():
label.set_fontsize(fontsize)
def setup_variance_panel(ax, xlim, ylim):
# Setup variance plot limits and text properties
# variance axis
ax.set_xlim(xlim)
ax.xaxis.set_ticklabels([])
ax.set_ylabel("$\sqrt{\mathrm{Var}}$", fontsize=fontsize)
ax.set_ylim(ylim)
ax.fill_between(xlim, [ylim[0]]*2, [ylim[1]]*2, color='#BBBBBB', zorder=1)
for label in ax.get_yticklabels():
label.set_fontsize(fontsize)
ax.locator_params(axis='y', nbins=4, prune='lower')
def setup_histogram_panel(ax, xlim, ylim, ylabel, log=False):
ax.set_ylim(ylim)
hist_xlim = [0.0, 1.0]
ax.set_xlim(hist_xlim)
if log:
ax.set_yscale('log')
ax.xaxis.set_ticklabels([])
ax.set_ylabel(ylabel, fontsize=fontsize)
for label in ax.get_yticklabels():
label.set_fontsize(fontsize)
def fill_requirements(xlim, yreq, ax):
ax.fill_between(xlim, [-yreq[0]]*2, [yreq[0]]*2, color='#999999', zorder=2)
ax.fill_between(xlim, [-yreq[1]]*2, [yreq[1]]*2, color='#777777', zorder=2)
ax.axhline(-yreq[0], c='k', alpha=0.1, zorder=10, lw=0.5)
ax.axhline(-yreq[1], c='k', alpha=0.3, zorder=10, lw=0.5)
ax.axhline(yreq[0], c='k', alpha=0.1, zorder=10, lw=0.5)
ax.axhline(yreq[1], c='k', alpha=0.3, zorder=10, lw=0.5)
def RbarSqr_process(stars, gals, band, log=False, corrected=False):
# get *uncorrected* bias measurements in order to set ylimits, even if
# corrected measurements are requested for plot.
stardata = stars['Rbar'][band] * 180/np.pi * 3600
galdata = gals['Rbar'][band] * 180/np.pi * 3600
norm = np.mean(stardata) # normalize by the mean stellar data
stardata -= norm
galdata -= norm
stardata **= 2
galdata **= 2
ylim = set_range(np.concatenate([stardata, galdata]))
# make sure to plot at least the entire LSST region
if log:
if ylim[1] < mean_DeltaRbarSqr_req[1]*10:
ylim[1] = mean_DeltaRbarSqr_req[1]*10
ylim[0] = 1.e-7
else:
if ylim[1] < mean_DeltaRbarSqr_req[1]*1.2:
ylim[1] = mean_DeltaRbarSqr_req[1]*1.2
ylim[0] = 0.0
# same concept for variance axis: set range based on uncorrected data
nbins = int(len(galdata)**0.4)
xbins = np.linspace(0.0, np.max(gals.redshift), nbins+1)
zs = 0.5*(xbins[1:] + xbins[:-1])
sqrt_vars = [np.sqrt(np.var(galdata[(gals.redshift > xbins[i])
& (gals.redshift < xbins[i+1])]))
for i in range(nbins)]
var_ylim = [0, np.max(sqrt_vars)*1.2]
if var_ylim[1] < np.sqrt(var_DeltaRbarSqr_sufficient[1])*1.2:
var_ylim[1] = np.sqrt(var_DeltaRbarSqr_sufficient[1])*1.2
# then replace with corrected measurements if requested
if corrected:
stardata = (stars['Rbar'][band] - stars['photo_Rbar'][band]) * 180/np.pi * 3600
ungaldata = galdata
galdata = (gals['Rbar'][band] - gals['photo_Rbar'][band]) * 180/np.pi * 3600
# d((DR)^2) = 2 DR d(DR)
stardata = np.abs(2 * (stars['Rbar'][band] * 180/np.pi * 3600 - norm) * stardata)
galdata = np.abs(2 * (gals['Rbar'][band] * 180/np.pi * 3600 - norm) * galdata)
# running variance
sqrt_vars = [np.sqrt(np.var(galdata[(gals.redshift > xbins[i])
& (gals.redshift < xbins[i+1])]))
for i in range(nbins)]
# running mean
means = [np.mean(galdata[(gals.redshift > xbins[i])
& (gals.redshift < xbins[i+1])])
for i in range(nbins)]
return stardata, galdata, ylim, var_ylim, zs, means, sqrt_vars
def RbarSqr_panel(gals, stars, band, cbands, fig, grid, log=False, corrected=False, **kwargs):
xlim = (0.0, 2.5) # redshift range
# Process the data
process = RbarSqr_process(stars, gals, band, log=log, corrected=corrected)
stardata, galdata, ylim, var_ylim, zs, means, sqrt_vars = process
if corrected:
ylabel = r"$|\delta((\Delta \overline{\mathrm{R}})^2)|$ (arcsec$^2$)"
else:
ylabel = r"$\left(\Delta \overline{\mathrm{R}}\right)^2$ (arcsec$^2$)"
# Get axes
var_ax, scatter_ax, hist_ax, cbar_ax = panel_getaxes(fig, grid)
# get colors and color range to store for later
c=gals['magCalc'][cbands[0]] - gals['magCalc'][cbands[1]]
clim = set_range(c)
clim[1] += 0.1 * (clim[1]-clim[0])
# Scatter plot
setup_scatter_panel(scatter_ax, xlim, ylim, log=log)
rand_order = np.random.shuffle(np.arange(len(gals.redshift)))
im = scatter_ax.scatter(gals.redshift[rand_order], galdata[rand_order], c=c[rand_order],
vmin=clim[0], vmax=clim[1], zorder=4, cmap=cmap, **kwargs)
im.set_rasterized(True)
scatter_ax.plot(zs, means, color='red', linestyle='-', linewidth=2, zorder=10)
fill_requirements(xlim, mean_DeltaRbarSqr_req, scatter_ax)
# annotate scatter plot
for i, text in enumerate(["DES", "LSST"]):
if log:
xytext = (0.18, mean_DeltaRbarSqr_req[i]/2.1)
else:
xytext = (0.18, mean_DeltaRbarSqr_req[i]-0.0001)
scatter_ax.annotate(text+" requirement",
xy=(0.1, mean_DeltaRbarSqr_req[i]),
xytext=xytext,
arrowprops=arrowdict,
zorder=10,
fontsize=fontsize)
scatter_ax.text(0.83, 0.93, band.replace('LSST_','')+' band', transform=scatter_ax.transAxes,
fontsize=fontsize)
# Variance plot
setup_variance_panel(var_ax, xlim, var_ylim)
fill_requirements(xlim, np.sqrt(var_DeltaRbarSqr_sufficient), var_ax)
var_ax.plot(zs, sqrt_vars, color='blue', linewidth=2)
# Histogram plot
hist_xlim = [0.0, 1.0]
setup_histogram_panel(hist_ax, hist_xlim, ylim, ylabel, log=log)
# plot this histograms
hist_with_peak(stardata, bins=200, ax=hist_ax, range=ylim, orientation='horizontal',
histtype='stepfilled', log=log, color='blue')
hist_with_peak(galdata, bins=200, ax=hist_ax, range=ylim, orientation='horizontal',
histtype='step', log=log, color='red')
# annotate histogram plot
hist_ax.text(0.1, 0.93,
"stars", fontsize=fontsize, color='blue', transform=hist_ax.transAxes)
hist_ax.text(0.1, 0.88,
"gals", fontsize=fontsize, color='red', transform=hist_ax.transAxes)
# colorbar
cbar = plt.colorbar(im, cax=cbar_ax)
for label in cbar_ax.get_yticklabels():
label.set_fontsize(fontsize)
cbar_ax.set_ylabel("{} - {}".format(cbands[0].replace('LSST_',''),
cbands[1].replace('LSST_','')),
fontsize=fontsize)
def V_process(stars, gals, band, corrected=False):
# get *uncorrected* bias measurements in order to set ylimits, even if
# corrected measurements are requested for plot.
stardata = stars['V'][band] * (180/np.pi * 3600)**2
galdata = gals['V'][band] * (180/np.pi * 3600)**2
norm = np.mean(stardata)
stardata -= norm
galdata -= norm
ylim = set_range(np.concatenate([stardata, galdata]))
# make sure to plot at least the entire LSST region
if ylim[0] > -mean_DeltaV_req[1]*1.2:
ylim[0] = -mean_DeltaV_req[1]*1.2
if ylim[1] < mean_DeltaV_req[1]*1.2:
ylim[1] = mean_DeltaV_req[1]*1.2
# same concept for variance axis: set range based on uncorrected data
nbins = int(len(galdata)**0.4)
xbins = np.linspace(0.0, np.max(gals.redshift), nbins+1)
zs = 0.5*(xbins[1:] + xbins[:-1])
sqrt_vars = [np.sqrt(np.var(galdata[(gals.redshift > xbins[i])
& (gals.redshift < xbins[i+1])]))
for i in range(nbins)]
var_ylim = [0, np.max(sqrt_vars)*1.2]
if var_ylim[1] < np.sqrt(var_DeltaV_sufficient[1])*1.2:
var_ylim[1] = np.sqrt(var_DeltaV_sufficient[1])*1.2
# then replace with corrected measurements if requested
if corrected:
stardata = (stars['V'][band] - stars['photo_V'][band]) * (180/np.pi * 3600)**2
ungaldata = galdata
galdata = (gals['V'][band] - gals['photo_V'][band]) * (180/np.pi * 3600)**2
# running variance
sqrt_vars = [np.sqrt(np.var(galdata[(gals.redshift > xbins[i])
& (gals.redshift < xbins[i+1])]))
for i in range(nbins)]
# running mean
means = [np.mean(galdata[(gals.redshift > xbins[i])
& (gals.redshift < xbins[i+1])])
for i in range(nbins)]
return stardata, galdata, ylim, var_ylim, zs, means, sqrt_vars
def V_panel(gals, stars, band, cbands, fig, grid, corrected=False, **kwargs):
xlim = (0.0, 2.5) # redshift range
# Process the data
process = V_process(stars, gals, band, corrected=corrected)
stardata, galdata, ylim, var_ylim, zs, means, sqrt_vars = process
if corrected:
ylabel = r"$\delta(\Delta V)$ (arcsec$^2$)"
else:
ylabel = r"$\Delta V$ (arcsec$^2$)"
# Get axes
var_ax, scatter_ax, hist_ax, cbar_ax = panel_getaxes(fig, grid)
# get colors and color range to store for later
c=gals['magCalc'][cbands[0]] - gals['magCalc'][cbands[1]]
clim = set_range(c)
clim[1] += 0.1 * (clim[1]-clim[0])
# Scatter plot
setup_scatter_panel(scatter_ax, xlim, ylim)
rand_order = np.random.shuffle(np.arange(len(gals.redshift)))
im = scatter_ax.scatter(gals.redshift[rand_order], galdata[rand_order], c=c[rand_order],
vmin=clim[0], vmax=clim[1], zorder=4, cmap=cmap, **kwargs)
im.set_rasterized(True)
scatter_ax.plot(zs, means, color='red', linestyle='-', linewidth=2, zorder=10)
fill_requirements(xlim, mean_DeltaV_req, scatter_ax)
# annotate scatter plot
if band == 'LSST_i':
scatter_ax.annotate("LSST requirement",
xy=(0.1, mean_DeltaV_req[1]),
xytext=(0.18, mean_DeltaV_req[1]-5.e-5),
arrowprops=arrowdict,
zorder=10,
fontsize=fontsize)
else:
scatter_ax.annotate("LSST requirement",
xy=(0.1, mean_DeltaV_req[1]),
xytext=(0.18, mean_DeltaV_req[1]+2.e-4),
arrowprops=arrowdict,
zorder=10,
fontsize=fontsize)
scatter_ax.annotate("DES requirement",
xy=(0.1, mean_DeltaV_req[0]),
xytext=(0.18, mean_DeltaV_req[0]-2.e-4),
arrowprops=arrowdict,
zorder=10,
fontsize=fontsize)
scatter_ax.text(0.83, 0.93, band.replace('LSST_','')+' band', transform=scatter_ax.transAxes,
fontsize=fontsize)
# Variance plot
setup_variance_panel(var_ax, xlim, var_ylim)
fill_requirements(xlim, np.sqrt(var_DeltaV_sufficient), var_ax)
var_ax.plot(zs, sqrt_vars, color='blue', linewidth=2)
# Histogram plot
hist_xlim = [0.0, 1.0]
setup_histogram_panel(hist_ax, hist_xlim, ylim, ylabel)
# plot this histograms
hist_with_peak(stardata, bins=200, ax=hist_ax, range=ylim, orientation='horizontal',
histtype='stepfilled', color='blue')
hist_with_peak(galdata, bins=200, ax=hist_ax, range=ylim, orientation='horizontal',
histtype='step', color='red')
# annotate histogram plot
hist_ax.text(0.1, 0.93,
"stars", fontsize=fontsize, color='blue', transform=hist_ax.transAxes)
hist_ax.text(0.1, 0.88,
"gals", fontsize=fontsize, color='red', transform=hist_ax.transAxes)
# colorbar
cbar = plt.colorbar(im, cax=cbar_ax)
for label in cbar_ax.get_yticklabels():
label.set_fontsize(fontsize)
cbar_ax.set_ylabel("{} - {}".format(cbands[0].replace('LSST_',''),
cbands[1].replace('LSST_','')),
fontsize=fontsize)
def S_m02_process(stars, gals, band, corrected=False):
# get *uncorrected* bias measurements in order to set ylimits, even if
# corrected measurements are requested for plot.
stardata = stars['S_m02'][band]
galdata = gals['S_m02'][band]
starmean = np.mean(stardata)
stardata = (stardata - starmean)/starmean
galdata = (galdata - starmean)/starmean
ylim = set_range(np.concatenate([stardata, galdata]))
# make sure to plot at least the entire LSST region
if ylim[0] > -mean_dS_m02_req[1]*1.2:
ylim[0] = -mean_dS_m02_req[1]*1.2
if ylim[1] < mean_dS_m02_req[1]*1.2:
ylim[1] = mean_dS_m02_req[1]*1.2
# same concept for variance axis: set range based on uncorrected data
nbins = int(len(galdata)**0.4)
xbins = np.linspace(0.0, np.max(gals.redshift), nbins+1)
zs = 0.5*(xbins[1:] + xbins[:-1])
sqrt_vars = [np.sqrt(np.var(galdata[(gals.redshift > xbins[i])
& (gals.redshift < xbins[i+1])]))
for i in range(nbins)]
var_ylim = [0, np.max(sqrt_vars)*1.2]
if var_ylim[1] < np.sqrt(var_dS_m02_sufficient[1])*1.2:
var_ylim[1] = np.sqrt(var_dS_m02_sufficient[1])*1.2
# then replace with corrected measurements if requested
if corrected:
stardata = (stars['S_m02'][band] - stars['photo_S_m02'][band]) / stars['photo_S_m02'][band]
ungaldata = galdata
galdata = (gals['S_m02'][band] - gals['photo_S_m02'][band]) / gals['photo_S_m02'][band]
ylabel = "$\delta(\Delta r^2_\mathrm{PSF}/r^2_\mathrm{PSF})$"
# running variance
sqrt_vars = [np.sqrt(np.var(galdata[(gals.redshift > xbins[i])
& (gals.redshift < xbins[i+1])]))
for i in range(nbins)]
# running mean
means = [np.mean(galdata[(gals.redshift > xbins[i])
& (gals.redshift < xbins[i+1])])
for i in range(nbins)]
return stardata, galdata, ylim, var_ylim, zs, means, sqrt_vars
def S_m02_panel(gals, stars, band, cbands, fig, grid, corrected=False, **kwargs):
xlim = (0.0, 2.5) # redshift range
# Process the data
process = S_m02_process(stars, gals, band, corrected=corrected)
stardata, galdata, ylim, var_ylim, zs, means, sqrt_vars = process
if corrected:
ylabel = "$\delta(\Delta r^2_\mathrm{PSF}/r^2_\mathrm{PSF})$"
else:
ylabel = "$\Delta r^2_\mathrm{PSF}/r^2_\mathrm{PSF}$"
# Get axes
var_ax, scatter_ax, hist_ax, cbar_ax = panel_getaxes(fig, grid)
# get colors and color range to store for later
c=gals['magCalc'][cbands[0]] - gals['magCalc'][cbands[1]]
clim = set_range(c)
clim[1] += 0.1 * (clim[1]-clim[0])
# Scatter plot
setup_scatter_panel(scatter_ax, xlim, ylim)
rand_order = np.random.shuffle(np.arange(len(gals.redshift)))
im = scatter_ax.scatter(gals.redshift[rand_order], galdata[rand_order], c=c[rand_order],
vmin=clim[0], vmax=clim[1], zorder=4, cmap=cmap, **kwargs)
im.set_rasterized(True)
scatter_ax.plot(zs, means, color='red', linestyle='-', linewidth=2, zorder=10)
fill_requirements(xlim, mean_dS_m02_req, scatter_ax)
# annotate scatter plot
for i, text in enumerate(["DES", "LSST"]):
scatter_ax.annotate(text+" requirement",
xy=(0.1, mean_dS_m02_req[i]),
xytext=(0.18, mean_dS_m02_req[i]+2.e-3),
arrowprops=arrowdict,
zorder=10,
fontsize=fontsize)
scatter_ax.text(0.83, 0.93, band.replace('LSST_','')+' band', transform=scatter_ax.transAxes,
fontsize=fontsize)
# Variance plot
setup_variance_panel(var_ax, xlim, var_ylim)
fill_requirements(xlim, np.sqrt(var_dS_m02_sufficient), var_ax)
var_ax.plot(zs, sqrt_vars, color='blue', linewidth=2)
# Histogram plot
hist_xlim = [0.0, 1.0]
setup_histogram_panel(hist_ax, hist_xlim, ylim, ylabel)
# plot this histograms
hist_with_peak(stardata, bins=200, ax=hist_ax, range=ylim, orientation='horizontal',
histtype='stepfilled', color='blue')
hist_with_peak(galdata, bins=200, ax=hist_ax, range=ylim, orientation='horizontal',
histtype='step', color='red')
# annotate histogram plot
hist_ax.text(0.1, 0.93,
"stars", fontsize=fontsize, color='blue', transform=hist_ax.transAxes)
hist_ax.text(0.1, 0.88,
"gals", fontsize=fontsize, color='red', transform=hist_ax.transAxes)
# colorbar
cbar = plt.colorbar(im, cax=cbar_ax)
for label in cbar_ax.get_yticklabels():
label.set_fontsize(fontsize)
cbar_ax.set_ylabel("{} - {}".format(cbands[0].replace('LSST_',''),
cbands[1].replace('LSST_','')),
fontsize=fontsize)
def plot_bias_panel(args, **kwargs):
gals = cPickle.load(open(args.galfile))
stars = cPickle.load(open(args.starfile))
fig = plt.figure(figsize=(9, 10))
outer_grid = gridspec.GridSpec(len(args.bias), len(args.band),
left=0.1, right=0.95,
top = 0.93, bottom=0.07,
wspace=0.3, hspace=0.2)
for iband, band in enumerate(args.band):
for ibias, bias in enumerate(args.bias):
if bias == 'LnRbarSqr':
RbarSqr_panel(gals, stars, band, args.color, fig, outer_grid[ibias, iband],
log=True, corrected=args.corrected, **kwargs)
if bias == 'RbarSqr':
RbarSqr_panel(gals, stars, band, args.color, fig, outer_grid[ibias, iband],
log=False, corrected=args.corrected, **kwargs)
if bias == 'V':
V_panel(gals, stars, band, args.color, fig, outer_grid[ibias, iband],
corrected=args.corrected, **kwargs)
if bias == 'S_m02':
S_m02_panel(gals, stars, band, args.color, fig, outer_grid[ibias, iband],
corrected=args.corrected, **kwargs)
plt.savefig(args.outfile, dpi=400)
if __name__ == '__main__':
s=3
parser = ArgumentParser()
parser.add_argument('--galfile', default = "output/corrected_galaxy_data.pkl",
help="input galaxy file. Default 'output/corrected_galaxy_data.pkl'")
parser.add_argument('--starfile', default = "output/corrected_star_data.pkl",
help="input star file. Default 'output/corrected_star_data.pkl'")
parser.add_argument('--corrected', action='store_true',
help="plot learning residuals instead of G5v residuals.")
parser.add_argument('--bias', default = ['LnRbarSqr', 'V', 'S_m02'], nargs='*',
help="which biases (and their order) to include")
parser.add_argument('--band', default = ['LSST_r', 'LSST_i'], nargs='*',
help="which band (and their order) to include")
parser.add_argument('--color', default=['LSST_r', 'LSST_i'], nargs=2,
help="color to use for symbol color (Default: ['LSST_r', 'LSST_i'])")
parser.add_argument('--outfile', default="output/bias_panel.png",
help="output filename (Default: 'output/bias_panel.png')")
args = parser.parse_args()
plot_bias_panel(args, s=s)
|
{"hexsha": "d490ea85277b4207f2cd60d40b9482e8b2f3cd2c", "size": 24733, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/analytic/catalog/plot_bias_panel.py", "max_stars_repo_name": "DarkEnergyScienceCollaboration/chroma", "max_stars_repo_head_hexsha": "64fc123a065334b307654f29b3bea52885b46ec8", "max_stars_repo_licenses": ["BSD-2-Clause", "BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-10-22T14:57:27.000Z", "max_stars_repo_stars_event_max_datetime": "2016-08-25T08:16:02.000Z", "max_issues_repo_path": "bin/analytic/catalog/plot_bias_panel.py", "max_issues_repo_name": "DarkEnergyScienceCollaboration/chroma", "max_issues_repo_head_hexsha": "64fc123a065334b307654f29b3bea52885b46ec8", "max_issues_repo_licenses": ["BSD-2-Clause", "BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-08-28T14:42:46.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-28T16:08:37.000Z", "max_forks_repo_path": "bin/analytic/catalog/plot_bias_panel.py", "max_forks_repo_name": "DarkEnergyScienceCollaboration/chroma", "max_forks_repo_head_hexsha": "64fc123a065334b307654f29b3bea52885b46ec8", "max_forks_repo_licenses": ["BSD-2-Clause", "BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.4965635739, "max_line_length": 99, "alphanum_fraction": 0.6036469494, "include": true, "reason": "import numpy", "num_tokens": 7258}
|
program test_ewald2
! This test compares lattice energy 'eew' calculated from the Ewald summation
! against the energy calculated using the Madelung constant for various lattice
! constants L. The agreement is essentially to machine precision.
! Similar to test_ewald, but here we the diagonal Na atom is moved by 3/8
! towards the Cl atom.
use types, only: dp
use constants, only: ang2bohr, kJmol2Ha
use ewald_sums, only: ewald, ewald2, fred2fcart
use utils, only: assert, init_random
implicit none
integer :: natom, ntypat
! Various NaCl lattice constants in A
real(dp), parameter :: Llist(5) = [5.6402_dp, 5.5_dp, 4.5_dp, 6.5_dp, 10._dp]
! The components of the correct force and stress tensor (multiplied by L)
real(dp), parameter :: fcorrect0 = 10.180280846982683_dp
real(dp), parameter :: stress1 = -6.6903565769534099_dp
real(dp), parameter :: stress2 = -5.7797035916557675_dp
real(dp) :: ucvol
integer, allocatable :: typat(:)
real(dp) :: gmet(3, 3), rmet(3, 3), rprim(3, 3), gprim(3, 3), stress(6)
real(dp) :: eew
real(dp), allocatable :: xred(:, :), zion(:), grewtn(:, :), fcart(:, :)
real(dp) :: L, alpha, E_ewald, E_madelung
integer :: i, j
! Madelung constant for NaCl, where the diagonal Na atom is moved by 3/8
! towards the Cl atom
alpha = 2.5088837163575413_dp
! Conventional cell:
natom = 8
ntypat = 2
allocate(xred(3, natom), zion(ntypat), grewtn(3, natom), typat(natom), &
fcart(3, natom))
! Cl^-
xred(:, 1) = [0, 0, 0]
xred(:, 2) = [1, 1, 0] / 2._dp
xred(:, 3) = [1, 0, 1] / 2._dp
xred(:, 4) = [0, 1, 1] / 2._dp
! Na^+
xred(:, 5) = [1, 1, 1] / 2._dp
xred(:, 6) = [1, 0, 0] / 2._dp
xred(:, 7) = [0, 1, 0] / 2._dp
xred(:, 8) = [0, 0, 1] / 2._dp
xred(:, 5:8) = xred(:, 5:8) - spread([3, 3, 3] / 8._dp, 2, 4)
xred(:, 6:8) = xred(:, 6:8) - floor(xred(:, 6:8))
typat = [1, 1, 1, 1, 2, 2, 2, 2]
zion = [-1, +1]
do i = 1, size(Llist)
L = Llist(i) * ang2bohr
print *, L
rmet = 0
rmet(1, 1) = L**2
rmet(2, 2) = L**2
rmet(3, 3) = L**2
! gmet = inv(rmet)
gmet = 0
gmet(1, 1) = 1/L**2
gmet(2, 2) = 1/L**2
gmet(3, 3) = 1/L**2
! ucvol = sqrt(det(rmet))
ucvol = L**3
! Reciprocal primitive vectors (without 2*pi) in cartesian coordinates.
! gmet = matmul(transpose(gprim), gprim)
gprim = 0
gprim(1, 1) = 1 / L
gprim(2, 2) = 1 / L
gprim(3, 3) = 1 / L
! Real space primitive vectors
rprim = 0
rprim(1, 1) = L
rprim(2, 2) = L
rprim(3, 3) = L
call ewald(eew,gmet,grewtn,natom,ntypat,rmet,typat,ucvol,xred,zion)
E_ewald = eew / (natom/ntypat)
E_madelung = -2*alpha/L
call ewald2(gmet,natom,ntypat,rmet,rprim,gprim,stress,typat,ucvol, &
xred,zion)
print *, "a =", L/ang2bohr*100, "pm"
print *, "Madelung:", E_madelung / kJmol2Ha, "kJ/mol"
print *, "Ewald: ", E_ewald / kJmol2Ha, "kJ/mol"
print *, "error: ", abs(E_ewald - E_madelung), "a.u."
call assert(abs(E_ewald - E_madelung) < 1e-14_dp)
stress = -stress * ucvol
call assert(all(abs(stress - [stress1, stress1, stress1, &
stress2, stress2, stress2]/L) < 1e-10_dp))
call fred2fcart(fcart, grewtn, gprim)
do j = 1, 4
call assert(all(abs(fcart(:, j) - (-fcorrect0/L**2)) < 1e-10_dp))
end do
do j = 5, 8
call assert(all(abs(fcart(:, j) - (+fcorrect0/L**2)) < 1e-10_dp))
end do
end do
deallocate(xred, zion, grewtn, typat, fcart)
print *, "--------"
! Primitive cell (FCC lattice)
natom = 2
ntypat = 2
allocate(xred(3, natom), zion(ntypat), grewtn(3, natom), typat(natom), &
fcart(3, natom))
! Cl^-
xred(:, 1) = [0, 0, 0]
! Na^+
xred(:, 2) = [1, 1, 1] / 2._dp
xred(:, 2) = xred(:, 2) - [3, 3, 3] / 8._dp
typat = [1, 2]
zion = [-1._dp, 1._dp]
do i = 1, size(Llist)
L = Llist(i) * ang2bohr
rmet(1, :) = [2, 1, 1]
rmet(2, :) = [1, 2, 1]
rmet(3, :) = [1, 1, 2]
rmet = rmet * L**2 / 4
! gmet = inv(rmet)
gmet(1, :) = [ 3, -1, -1]
gmet(2, :) = [-1, 3, -1]
gmet(3, :) = [-1, -1, 3]
gmet = gmet / L**2
! ucvol = sqrt(det(rmet))
ucvol = L**3 / 4
! Reciprocal primitive vectors (without 2*pi) in cartesian coordinates.
! gmet = matmul(transpose(gprim), gprim)
gprim(:, 1) = [ 1, 1, -1] / L
gprim(:, 2) = [-1, 1, 1] / L
gprim(:, 3) = [ 1, -1, 1] / L
rprim(:, 1) = [1, 0, 1] * L / 2
rprim(:, 2) = [1, 1, 0] * L / 2
rprim(:, 3) = [0, 1, 1] * L / 2
call ewald(eew,gmet,grewtn,natom,ntypat,rmet,typat,ucvol,xred,zion)
E_ewald = eew / (natom/ntypat)
E_madelung = -2*alpha/L
call ewald2(gmet,natom,ntypat,rmet,rprim,gprim,stress,typat,ucvol, &
xred,zion)
print *, "a =", L/ang2bohr*100, "pm"
print *, "Madelung:", E_madelung / kJmol2Ha, "kJ/mol"
print *, "Ewald: ", E_ewald / kJmol2Ha, "kJ/mol"
print *, "error: ", abs(E_ewald - E_madelung), "a.u."
call assert(abs(E_ewald - E_madelung) < 1e-14_dp)
call fred2fcart(fcart, grewtn, gprim)
call assert(all(abs(fcart(:, 1) - (-fcorrect0/L**2)) < 1e-10_dp))
call assert(all(abs(fcart(:, 2) - (+fcorrect0/L**2)) < 1e-10_dp))
stress = -stress * L**3
call assert(all(abs(stress - [stress1, stress1, stress1, &
stress2, stress2, stress2]/L) < 1e-10_dp))
end do
deallocate(xred, zion, grewtn, typat, fcart)
end program
|
{"hexsha": "7e5f456ade68d8c38a7899bf7ed161c24e4a7ac1", "size": 5302, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/tests/test_ewald2.f90", "max_stars_repo_name": "certik/hfsolver", "max_stars_repo_head_hexsha": "b4c50c1979fb7e468b1852b144ba756f5a51788d", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2015-03-24T13:06:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T00:14:02.000Z", "max_issues_repo_path": "src/tests/test_ewald2.f90", "max_issues_repo_name": "certik/hfsolver", "max_issues_repo_head_hexsha": "b4c50c1979fb7e468b1852b144ba756f5a51788d", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2015-03-25T04:59:43.000Z", "max_issues_repo_issues_event_max_datetime": "2017-06-06T23:00:09.000Z", "max_forks_repo_path": "src/tests/test_ewald2.f90", "max_forks_repo_name": "certik/hfsolver", "max_forks_repo_head_hexsha": "b4c50c1979fb7e468b1852b144ba756f5a51788d", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2016-01-20T13:38:22.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-24T15:35:43.000Z", "avg_line_length": 29.7865168539, "max_line_length": 79, "alphanum_fraction": 0.5775179178, "num_tokens": 2276}
|
#!/usr/bin/python
# coding=utf-8
# Copyright 2016-2019 Angelo Ziletti
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
__author__ = "Angelo Ziletti"
__copyright__ = "Copyright 2016, The NOMAD Project"
__maintainer__ = "Angelo Ziletti"
__email__ = "ziletti@fhi-berlin.mpg.de"
__date__ = "13/03/18"
import logging
from ai4materials.descriptors.base_descriptor import Descriptor
from ai4materials.descriptors.base_descriptor import is_descriptor_consistent
import numpy as np
import os
from pymatgen.analysis.diffraction.xrd import XRDCalculator
from pymatgen.io.ase import AseAtomsAdaptor
logger = logging.getLogger('ai4materials')
class Diffraction1D(Descriptor):
def __init__(self, configs, wavelength="CuKa"):
super(Diffraction1D, self).__init__(configs=configs)
self.wavelength = wavelength
def calculate(self, structure, show=False):
c = XRDCalculator(wavelength=self.wavelength)
xrd_pattern = c.get_xrd_pattern(AseAtomsAdaptor.get_structure(structure))
if show:
c.show_xrd_plot(AseAtomsAdaptor.get_structure(structure))
descriptor_data = dict(descriptor_name=self.name, descriptor_info=str(self), xrd_pattern=xrd_pattern)
structure.info['descriptor'] = descriptor_data
return structure
def write(self, structure, tar, write_xrd_pattern=True):
desc_folder = self.configs['io']['desc_folder']
if not is_descriptor_consistent(structure, self):
raise Exception('Descriptor not consistent. Aborting.')
if write_xrd_pattern:
xrd_pattern = structure.info['descriptor']['xrd_pattern']
xrd_pattern_filename_npy = os.path.abspath(
os.path.normpath(os.path.join(desc_folder, structure.info['label'] + self.desc_metadata.ix['xrd_pattern']['file_ending'])))
np.save(xrd_pattern_filename_npy, xrd_pattern)
structure.info['xrd_pattern_filename_npy'] = xrd_pattern_filename_npy
tar.add(structure.info['xrd_pattern_filename_npy'])
|
{"hexsha": "edc73565f43a66ea14e152a2fcf0bf5fc3fe1881", "size": 2569, "ext": "py", "lang": "Python", "max_stars_repo_path": "ai4materials/descriptors/diffraction1d.py", "max_stars_repo_name": "hpleva/ai4materials", "max_stars_repo_head_hexsha": "5b5548f4fbfd4751cd1f9d57cedaa1e1d7ca04b2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2019-12-23T14:47:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T10:50:18.000Z", "max_issues_repo_path": "ai4materials/descriptors/diffraction1d.py", "max_issues_repo_name": "hpleva/ai4materials", "max_issues_repo_head_hexsha": "5b5548f4fbfd4751cd1f9d57cedaa1e1d7ca04b2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-12-16T21:08:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:56:46.000Z", "max_forks_repo_path": "ai4materials/descriptors/diffraction1d.py", "max_forks_repo_name": "hpleva/ai4materials", "max_forks_repo_head_hexsha": "5b5548f4fbfd4751cd1f9d57cedaa1e1d7ca04b2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2018-11-21T14:05:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-10T11:28:46.000Z", "avg_line_length": 37.7794117647, "max_line_length": 139, "alphanum_fraction": 0.7395873881, "include": true, "reason": "import numpy", "num_tokens": 599}
|
import numpy as np
import pandas as pd
import xarray as xr
def istat_deaths_to_pandas(path):
istat = pd.read_csv(path, encoding="8859", na_values="n.d.", dtype={"GE": str})
# make a date index from GE
def ge2month_day(x):
return f"{x[:2]}-{x[2:]}"
month_day = istat["GE"].map(ge2month_day).values
istat["month_day"] = month_day
def cl_eta2age(x):
if x <= 1:
return x
elif x <= 21:
age = x - 1
return age * 5
else:
raise ValueError(f"unknown age class {x}")
istat["age"] = istat["CL_ETA"].apply(cl_eta2age)
return istat
def read_istat_deaths(path):
istat = istat_deaths_to_pandas(path).rename(columns={"NOME_COMUNE": "location"})
data = None
for yy in range(11, 21):
tmp = istat.groupby(["month_day", "age", "location"]).agg(
**{
"f": (f"F_{yy}", sum),
"m": (f"M_{yy}", sum),
}
)
if yy % 4 != 0:
tmp = tmp.drop(index="02-29")
tmp = tmp.reset_index()
tmp["time"] = tmp["month_day"].map(lambda x: np.datetime64(f"20{yy}-{x}"))
tmp = tmp.set_index(["time", "age", "location"]).drop(columns="month_day")
xtmp = tmp.to_xarray().to_array("sex").fillna(0)
if data is None:
data = xtmp
else:
data = xr.concat([data, xtmp], dim="time", fill_value=0)
coords = {
"region": (
"location",
"Italy / " + istat.groupby(["location"])["NOME_REGIONE"].first(),
),
"province": (
"location",
istat.groupby(["location"])["NOME_PROVINCIA"].first(),
),
}
data = data.assign_coords(coords)
return istat, data
def istat_deaths_to_italy_year(istat):
deaths_italy = istat.sum("location")
deaths_italy = deaths_italy.resample(time="Y", label="left", loffset="1D").sum()
deaths_italy = deaths_italy.assign_coords(year=deaths_italy.time.dt.year)
return deaths_italy.swap_dims(time="year").drop_vars("time")
|
{"hexsha": "08fa4f3d8c2484a9c0197f48167adb6018f2835d", "size": 2089, "ext": "py", "lang": "Python", "max_stars_repo_path": "xpop/data/italy.py", "max_stars_repo_name": "alexamici/xpop", "max_stars_repo_head_hexsha": "940f935dfd125d5d51ab7b71a281196c55b29da4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "xpop/data/italy.py", "max_issues_repo_name": "alexamici/xpop", "max_issues_repo_head_hexsha": "940f935dfd125d5d51ab7b71a281196c55b29da4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xpop/data/italy.py", "max_forks_repo_name": "alexamici/xpop", "max_forks_repo_head_hexsha": "940f935dfd125d5d51ab7b71a281196c55b29da4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0138888889, "max_line_length": 84, "alphanum_fraction": 0.5528961225, "include": true, "reason": "import numpy", "num_tokens": 595}
|
import Pkg; Pkg.activate(joinpath(@__DIR__, "../../../../../"))
using Distributions
using PyPlot
"""
Paper: Bayesian inference for finite mixtures of univariate and multivariate
skew-normal and skew-t distributions, Biostatistics 2010.
skew (delta): a real number in (-1, 1)
"""
function rand_skewnormal(loc, scale, skew)
z = rand(TruncatedNormal(0, 1, 0, Inf))
return loc + scale * skew * z + scale * sqrt(1 - skew ^ 2) * randn()
end
function rand_skewnormal(loc, scale, skew, dims...)
z = rand(TruncatedNormal(0, 1, 0, Inf), dims...)
return loc .+ scale * skew * z + scale * sqrt(1 - skew ^ 2) * randn(dims...)
end
x = rand_skewnormal(1, .5, -.97, 100000)
plt.hist(x, bins=100);
mean(x .< 0)
mean(x), std(x)
|
{"hexsha": "d57867463fd6192147031c96695cf7aca8c38ee7", "size": 740, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "runs/sim-study/configs/test-sim-6-7-19/notebook/scripts/sketch-data-gen.jl", "max_stars_repo_name": "luiarthur/CytofRepFAM.jl", "max_stars_repo_head_hexsha": "1f997d1620d74861c5bde5559ebdd1e6c449b9e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "runs/sim-study/configs/test-sim-6-7-19/notebook/scripts/sketch-data-gen.jl", "max_issues_repo_name": "luiarthur/CytofRepFAM.jl", "max_issues_repo_head_hexsha": "1f997d1620d74861c5bde5559ebdd1e6c449b9e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-02-05T01:26:53.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-16T04:13:03.000Z", "max_forks_repo_path": "runs/sim-study/configs/test-sim-6-7-19/notebook/scripts/sketch-data-gen.jl", "max_forks_repo_name": "luiarthur/CytofRepFAM.jl", "max_forks_repo_head_hexsha": "1f997d1620d74861c5bde5559ebdd1e6c449b9e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4285714286, "max_line_length": 80, "alphanum_fraction": 0.6432432432, "num_tokens": 233}
|
######################
##### ROSENBROOK #####
######################
function rosenbrook1()
model = Model()
@variable(model, x)
@variable(model, y)
@NLobjective(model, Min, (2.0 - x)^2 + 100 * (y - x^2)^2)
return model
end
function test_rosenbrook1(solver)
@testset "test_rosenbrook1" begin
model = rosenbrook1()
setsolver(model,solver)
@test_broken solve(model) == :Optimal
end
end
function rosenbrook2()
model = Model()
@variable(model, x >= 0.0)
@variable(model, y >= 0.0)
@NLobjective(model, Min, (2.0 - x)^2 + 100 * (y - x^2)^2)
@constraint(model, x + y >= 0.1)
return model
end
function test_rosenbrook2(solver)
@testset "test_rosenbrook2" begin
model = rosenbrook2()
setsolver(model,solver)
@test solve(model) == :Optimal
check_rosenbrook(model)
end
end
function rosenbrook3()
model = Model()
@variable(model, x >= 0.0)
@variable(model, y >= 0.0)
@NLobjective(model, Min, (2.0 - x)^2 + 100 * (y - x^2)^2)
@constraint(model, x^2 + y^2 >= 0.5)
return model
end
function test_rosenbrook3(solver)
@testset "test_rosenbrook3" begin
model = rosenbrook3()
setsolver(model,solver)
@test solve(model) == :Optimal
check_rosenbrook(model)
end
end
function rosenbrook4()
model = Model()
@variable(model, x >= 0.0)
@variable(model, y >= 0.0)
@NLobjective(model, Min, (2.0 - x)^2 + 100 * (y - x^2)^2)
return model
end
function test_rosenbrook4(solver)
@testset "test_rosenbrook4" begin
model = rosenbrook4()
setsolver(model,solver)
@test_broken solve(model) == :Optimal
end
end
function check_rosenbrook(model)
tol = 1e-3
@test abs(getvalue(model[:x]) - 2.0) < tol
@test abs(getvalue(model[:y]) - 4.0) < tol
end
########################
##### FEASIBLE LPS #####
########################
function toy_lp1()
model = Model()
@variable(model, x >= 0.0)
@variable(model, y >= 0.0)
@NLobjective(model, Min, -x - 100 * y)
@constraint(model, x + y <= 1.0)
return model
end
function check_toy_lp1(model)
tol = 1e-3
@test abs(getvalue(model[:x]) - 0.0) < tol
@test abs(getvalue(model[:y]) - 1.0) < tol
end
function test_toy_lp1(solver)
model = toy_lp1()
setsolver(model,solver)
status = solve(model)
@test status == :Optimal
check_toy_lp1(model)
end
function toy_lp2()
model = Model()
@variable(model, 0.0 <= x <= 1.0)
@variable(model, 0.0 <= y <= 1.0)
@NLobjective(model, Min, -x - 100 * y)
@constraint(model, x + y <= 2.0)
return model
end
function check_toy_lp2(model)
tol = 1e-3
@test abs(getvalue(model[:x]) - 1.0) < tol
@test abs(getvalue(model[:y]) - 1.0) < tol
end
function test_toy_lp2(solver)
model = toy_lp2()
setsolver(model,solver)
status = solve(model)
@test status == :Optimal
check_toy_lp2(model)
end
function toy_lp3()
model = Model()
@variable(model, 0.0 <= x <= 1.0)
@variable(model, 0.0 <= y <= 1.0)
@NLobjective(model, Min, x)
@constraint(model, 1.0 <= x + y <= 2.0)
return model
end
function check_toy_lp3(model)
tol = 1e-3
@test abs(getvalue(model[:x]) - 0.0) < tol
@test abs(getvalue(model[:y]) - 1.0) < tol
end
function test_toy_lp3(solver)
model = toy_lp3()
setsolver(model,solver)
status = solve(model)
@test status == :Optimal
check_toy_lp3(model)
end
function check_toy_lp4(model)
tol = 1e-3
@test abs(getvalue(model[:x]) - 0.0) < tol
@test abs(getvalue(model[:y]) - 1.0) < tol
end
function toy_lp4()
model = Model()
@variable(model, x, lowerbound=0.0, upperbound=1.0)
@variable(model, y, lowerbound=0.0, upperbound=1.0)
@NLobjective(model, Min, x)
@constraint(model, 1.0 <= x + y <= 2.0)
return model
end
function check_toy_lp4(model)
tol = 1e-3
@test abs(getvalue(model[:x]) - 0.0) < tol
@test abs(getvalue(model[:y]) - 1.0) < tol
end
function test_toy_lp4(solver)
model = toy_lp4()
setsolver(model,solver)
status = solve(model)
@test status == :Optimal
check_toy_lp4(model)
end
function toy_lp5()
model = Model()
@variable(model, x, lowerbound=0.0, upperbound=1.0)
@variable(model, y, lowerbound=0.0, upperbound=1.0)
@NLobjective(model, Min, x)
@constraint(model, x + y == 1.0)
@constraint(model, x * 32.5 + y * 32.5 == 32.5)
@constraint(model, 3.0 * x + 3.0 * y <= 3.0)
return model
end
function test_toy_lp5(solver)
model = toy_lp5()
setsolver(model,solver)
status = solve(model)
@test status == :Optimal
check_toy_lp4(model)
end
function toy_lp6()
model = Model()
@variable(model, x, lowerbound=0.0, upperbound=1.0)
@variable(model, y, lowerbound=0.0, upperbound=1.0)
@NLobjective(model, Min, x)
@constraint(model, x + y == 1.0)
@constraint(model, x * 5.5 + y * 5.5 == 5.5)
return model
end
function test_toy_lp6(solver)
model = toy_lp6()
setsolver(model,solver)
status = solve(model)
@test status == :Optimal
check_toy_lp4(model)
end
function toy_lp7()
model = Model()
@variable(model, x, lowerbound=0.0, upperbound=1.0)
@variable(model, y, lowerbound=0.0, upperbound=1.0)
@NLobjective(model, Min, x)
@constraint(model, 2.0 * x + y == 1.0)
return model
end
function test_toy_lp7(solver)
model = toy_lp7()
setsolver(model,solver)
status = solve(model)
@test status == :Optimal
check_toy_lp4(model)
end
function toy_lp8()
model = Model()
@variable(model, x, lowerbound=0.0, upperbound=1.0)
@variable(model, y, lowerbound=0.0, upperbound=1.0)
@NLobjective(model, Min, x)
@constraint(model, x + y >= 1.0)
@constraint(model, x * 5.5 + y * 5.5 <= 5.5)
return model
end
function test_toy_lp8(solver)
model = toy_lp8()
setsolver(model,solver)
status = solve(model)
@test status == :Optimal
check_toy_lp4(model)
end
##########################
##### INFEASIBLE LPS #####
##########################
function toy_lp_inf1()
model = Model()
@variable(model, x >= 0.0)
@variable(model, y >= 0.0)
@NLobjective(model, Min, x + 100 * y)
@constraint(model, x + 2 * y <= -1.0)
return model
end
function toy_lp_inf2()
model = Model()
@variable(model, x >= 0.0)
@variable(model, y >= 0.0)
@NLobjective(model, Min, x + 100 * y)
@constraint(model, x + 2 * y <= 2.0)
@constraint(model, x + 2 * y >= 4.0)
return model
end
#######################
##### CONVEX NLPS #####
#######################
function circle1()
model = Model()
@variable(model, x >= 0.0)
@variable(model, y >= 0.0)
@NLobjective(model, Min, x + 100 * y)
@NLconstraint(model, x^2 + y^2 <= 1.0)
@NLconstraint(model, (x-2.0)^2 + y^2 <= 1.0)
return model
end
function check_circle1(model)
tol = 1e-3
@test abs(getvalue(model[:x]) - 1.0) < tol
@test abs(getvalue(model[:y]) - 0.0) < tol
end
function circle2()
model = Model()
@variable(model, x >= 0.0)
@variable(model, y >= 0.0)
@NLobjective(model, Min, x^3 + y^3)
@NLconstraint(model, x^2 + y^2 <= 1.0)
return model
end
function check_circle2(model)
tol = 1e-2
@test abs(getvalue(model[:x]) - 0.0) < tol
@test abs(getvalue(model[:y]) - 0.0) < tol
end
function quad_opt()
model = Model()
@variable(model, x)
@variable(model, y)
@NLobjective(model, Min, y)
@NLconstraint(model, x^2 <= y)
return model
end
function check_quad_opt(model)
tol = 1e-2
@test abs(getvalue(model[:x]) - 0.0) < tol
@test abs(getvalue(model[:y]) - 0.0) < tol
end
##########################
##### NONCONVEX NLPS #####
##########################
function circle_nc1()
model = Model()
@variable(model, x >= 0.0)
@variable(model, y >= 0.0)
@NLobjective(model, Min, x + 100 * y)
@NLconstraint(model, x^2 + y^2 == 1.0)
@NLconstraint(model, (x-2.0)^2 + y^2 == 1.0)
return model
end
function check_circle_nc1(model)
tol = 1e-3
@test abs(getvalue(model[:x]) - 1.0) < tol
@test abs(getvalue(model[:y]) - 0.0) < tol
end
function circle_nc2()
model = Model()
@variable(model, x, start = 1.0)
@variable(model, y, start = 1.0)
@NLobjective(model, Min, x)
@NLconstraint(model, x^2 + y^2 == 1.0)
return model
end
function check_circle_nc2(model)
tol = 1e-3
@test abs(getvalue(model[:x]) + 1.0) < tol
@test abs(getvalue(model[:y]) - 0.0) < tol
end
function circle_nc_inf1()
model = Model()
@variable(model, x, start = 1.0)
@variable(model, y, start = 1.0)
@NLobjective(model, Min, x)
@NLconstraint(model, x^2 + y^2 == 1.0)
@NLconstraint(model, x^2 + 2 * y^2 == 4.0)
return model
end
##############################
##### UNBOUNDED PROBLEMS #####
##############################
function lp_unbd()
model = Model()
@variable(model, x >= 0.0)
@variable(model, y)
@NLobjective(model, Min, -x)
@NLconstraint(model, x - y <= 1.0)
return model
end
function circle_nc_unbd()
model = Model()
@variable(model, x)
@variable(model, y)
@NLobjective(model, Min, x + 0.1 * y)
@NLconstraint(model, x^2 + y^2 >= 1.0)
return model
end
function quad_unbd()
model = Model()
@variable(model, x)
@variable(model, y)
@NLobjective(model, Min, x)
@NLconstraint(model, x^2 <= y)
return model
end
#####################################
##### UNBOUNDED FEASIBLE REGION #####
#####################################
function unbd_feas()
model = Model()
@variable(model, x >= 0.0)
@variable(model, y >= 0.0)
@variable(model, z >= 0.0)
@NLobjective(model, Min, y)
@NLconstraint(model, x^2 <= y)
@NLconstraint(model, z >= 0.0)
return model
end
function test_unbd_feas(solver)
println("test_unbd_feas")
model = unbd_feas()
setsolver(model,solver)
status = solve(model)
@test status == :Optimal
@test getvalue(model[:z]) < 1e5
@show getvalue(model[:z])
end
###########################
##### STARTING POINTS #####
###########################
function starting_point_prob(start::Float64)
model = Model()
@variable(model, x, start = start)
@NLobjective(model, Min, -x^2)
@NLconstraint(model, -1.0 <= x <= 1.0)
return model
end
function test_starting_point(solver,starting_point::Float64)
if starting_point == 0.0
warn("don't select this as a starting point")
end
model = starting_point_prob(starting_point)
setsolver(model,solver)
status = solve(model)
@test status == :Optimal
if sign(starting_point) < 0.0
@test abs(getvalue(model[:x]) - 1.0) < 1e-4
else
@test abs(getvalue(model[:x]) + 1.0) < 1e-4
end
end
|
{"hexsha": "ce39a147d36cf8a2f4bbd531da89deeb74babd38", "size": 10840, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/problems.jl", "max_stars_repo_name": "ohinder/OnePhase.jl", "max_stars_repo_head_hexsha": "386f4c09ec43b3cb1d41d711bbb7b0bab097015c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2018-01-05T01:55:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-15T11:50:04.000Z", "max_issues_repo_path": "test/problems.jl", "max_issues_repo_name": "ohinder/OnePhase.jl", "max_issues_repo_head_hexsha": "386f4c09ec43b3cb1d41d711bbb7b0bab097015c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2018-01-11T17:43:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-15T04:08:36.000Z", "max_forks_repo_path": "test/problems.jl", "max_forks_repo_name": "ohinder/OnePhase.jl", "max_forks_repo_head_hexsha": "386f4c09ec43b3cb1d41d711bbb7b0bab097015c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-01-19T09:03:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-14T18:18:12.000Z", "avg_line_length": 23.8241758242, "max_line_length": 61, "alphanum_fraction": 0.5812730627, "num_tokens": 3577}
|
import numpy as np
from bokeh.plotting import figure, show
from bio_rtd import pdf, uo
# Define inlet profiles.
t = np.linspace(0, 10, 201) # time
c_in = np.ones([1, t.size]) # concentration (constant)
f = np.ones_like(t) * 3.5 # flow rate
# Define unit operation.
ft_uo = uo.fc_uo.FlowThrough(
t=t, uo_id="ft_example",
pdf=pdf.ExpModGaussianFixedDispersion(t, 0.3 ** 2 / 2, 1.0))
ft_uo.v_void = 2 * f[0] # set void volume (rt * flow rate)
# Simulation.
f_out, c_out = ft_uo.evaluate(f, c_in)
# Plot.
p = figure(plot_width=690, plot_height=350,
title="Unit Operation - Breakthrough",
x_axis_label="t [min]", y_axis_label="c [mg/mL]")
p.line(t, c_out[0], line_width=2, color='black',
legend_label='c [mg/mL]')
show(p)
|
{"hexsha": "622f1010f51f3369eaaef4ba1644437882692cf5", "size": 762, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/documentation/tutorial_1d.py", "max_stars_repo_name": "open-biotech/bio-rtd", "max_stars_repo_head_hexsha": "c3e2cf4d7d646bda719e5fc6f694a1cae0e412c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-03-30T13:26:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-02T07:10:49.000Z", "max_issues_repo_path": "examples/documentation/tutorial_1d.py", "max_issues_repo_name": "open-biotech/bio-rtd", "max_issues_repo_head_hexsha": "c3e2cf4d7d646bda719e5fc6f694a1cae0e412c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/documentation/tutorial_1d.py", "max_forks_repo_name": "open-biotech/bio-rtd", "max_forks_repo_head_hexsha": "c3e2cf4d7d646bda719e5fc6f694a1cae0e412c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-03T07:50:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-03T07:50:56.000Z", "avg_line_length": 29.3076923077, "max_line_length": 64, "alphanum_fraction": 0.6587926509, "include": true, "reason": "import numpy", "num_tokens": 253}
|
struct Quadrotor2DArm{I, T} <: Model{I, T}
n::Int
m::Int
d::Int
# body
lb # length
mb # mass
Jb # inertia
# link 1
l1
lc1
m1
J1
# link
l2
lc2
m2
J2
g # gravity
end
function kinematics(model::Quadrotor2DArm, q)
@SVector [q[1] + model.l1 * sin(q[3] + q[4]) + model.l2 * sin(q[3] + q[4] + q[5]),
q[2] - model.l1 * cos(q[3] + q[4]) - model.l2 * cos(q[3] + q[4] + q[5])]
end
function jacobian(model::Quadrotor2DArm, q)
a = model.l1 * cos(q[3] + q[4]) + model.l2 * cos(q[3] + q[4] + q[5])
b = model.l2 * cos(q[3] + q[4] + q[5])
c = model.l1 * sin(q[3] + q[4]) + model.l2 * sin(q[3] + q[4] + q[5])
d = model.l2 * sin(q[3] + q[4] + q[5])
@SMatrix [1.0 0.0 a a b;
0.0 1.0 c c d]
end
function B_func(model::Quadrotor2DArm, q)
a = model.l1 * cos(q[3] + q[4]) + model.l2 * cos(q[3] + q[4] + q[5])
b = model.l2 * cos(q[3] + q[4] + q[5])
c = model.l1 * sin(q[3] + q[4]) + model.l2 * sin(q[3] + q[4] + q[5])
d = model.l2 * sin(q[3] + q[4] + q[5])
@SMatrix [-0.5 * model.lb * sin(q[3]) -0.5 * model.lb * sin(q[3]) 0.0 0.0 1.0 0.0;
0.5 * model.lb * cos(q[3]) 0.5 * model.lb * cos(q[3]) 0.0 0.0 0.0 1.0;
0.0 0.0 0.0 0.0 a c;
0.0 0.0 1.0 0.0 a c;
0.0 0.0 0.0 1.0 b d]
end
|
{"hexsha": "19fc81e0f4ced912f09dc3e833b4ed8847fcdcd8", "size": 1370, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "models/quadrotor2D_arm.jl", "max_stars_repo_name": "jmichaux/motion_planning", "max_stars_repo_head_hexsha": "9a36f394261ff11ca8325d8a5e9d8a79f18b2744", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 35, "max_stars_repo_stars_event_min_datetime": "2021-02-07T10:46:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T05:30:20.000Z", "max_issues_repo_path": "models/quadrotor2D_arm.jl", "max_issues_repo_name": "jmichaux/motion_planning", "max_issues_repo_head_hexsha": "9a36f394261ff11ca8325d8a5e9d8a79f18b2744", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-10-07T05:36:17.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-11T17:16:28.000Z", "max_forks_repo_path": "models/quadrotor2D_arm.jl", "max_forks_repo_name": "thowell/motion_planning", "max_forks_repo_head_hexsha": "d42d80e705c1e64e45f5872917b96c6a980398cc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2021-01-25T19:23:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T06:43:01.000Z", "avg_line_length": 26.3461538462, "max_line_length": 86, "alphanum_fraction": 0.4496350365, "num_tokens": 654}
|
[STATEMENT]
lemma lift_resumption_bind: "lift_resumption (r \<bind> f) = lift_resumption r \<bind> lift_resumption \<circ> f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lift_resumption (r \<bind> f) = lift_resumption r \<bind> lift_resumption \<circ> f
[PROOF STEP]
by(coinduction arbitrary: r rule: gpv.coinduct_strong)
(auto simp add: lift_resumption.sel Done_bind split: resumption.split option.split del: rel_funI intro!: rel_funI)
|
{"llama_tokens": 164, "file": "CryptHOL_Generative_Probabilistic_Value", "length": 1}
|
from flask import Flask, render_template, request
import json
import plotly
import time
#import pandas as pd
import numpy as np
from plotly import graph_objects as go
import os
app = Flask(__name__)
app.debug = False
datapath = os.path.join('..',"data")
statusfile = os.path.join(datapath,"statusfile.txt")
def modifyline(graphid,fname,makefile=True,newfilename="",curtime=""):
"""go through the status file and turn on or off recording"""
statout=""
with open(fname,'r') as statfle:
for statlne in statfle:
#go line by line
statsplt = statlne.strip().split(",")
connected = statsplt[-1]
if(int(statsplt[1])==graphid):
if(connected=="0"):
raise OSError("sensor {} not connected!".format(graphid))
#this is the right row
if(statsplt[0]==""):
#in this case, the graph is not started.
#we pressed the start button, so we need to
#create a new file!
curtimestr = time.strftime("%Y%m%d%H%M")[2:]
if(curtime==""):
curtime = time.time()
if(newfilename=="" and makefile):
newfilename = os.path.join(curtimestr+"_"+str(graphid)+"_odvals.csv")
newtimestamp = curtime
statout+=','.join([newfilename,\
str(graphid),\
str(newtimestamp),\
str(connected)])+"\n"
else:
#this also happens if we are trying to delete the
#data file name from the status file
statout+=','.join([newfilename,\
str(graphid),\
"",\
str(connected)])+"\n"
#in this case, we have pressed the pause button. What happens?
#for now, nothing
else:
#for other rows, just keep going!
#save the line
statout+=statlne
with open(fname,'w') as statfle2:
statfle2.write(statout)
return True
@app.route('/')
def index():
graphs = [
dict(
data=[
dict(
x=(0,),
y=(0,)
)
],
layout=dict(
autosize=True,
title='graph {}'.format(a),
margin={'l':10,'r':10}
)
)
for a in range(8)]
# Add "ids" to each of the graphs to pass up to the client
# for templating
ids = ['graph-{}'.format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
return render_template('index.html',
ids=ids,
graphJSON=graphJSON)
@app.route('/background_process_test')
def background_process_test():
butid = request.args.get('butid')
butsplit = butid.split('-')
graphid = int(butsplit[-1].strip())
if(butsplit[0]=="startbut"):
#this means we pressed the start button.
#so in this case we need to load the status file
#and change the content!
modifyline(graphid,statusfile,makefile=True)
elif(butsplit[0]=="endbut"):
#this is the end button, so just remove the filename
modifyline(graphid,statusfile,makefile=False)
return( "nothing")
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
#app.run(host='127.0.0.1', port=5000)
|
{"hexsha": "68f085e7a13a697ab6e1d62131b07a4e3dc7fefd", "size": 3675, "ext": "py", "lang": "Python", "max_stars_repo_path": "flaskapp/testapp.py", "max_stars_repo_name": "dr3y/biomassSensorPy", "max_stars_repo_head_hexsha": "3b86af0b0811a39973b5da592ad19bbd35444c70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "flaskapp/testapp.py", "max_issues_repo_name": "dr3y/biomassSensorPy", "max_issues_repo_head_hexsha": "3b86af0b0811a39973b5da592ad19bbd35444c70", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "flaskapp/testapp.py", "max_forks_repo_name": "dr3y/biomassSensorPy", "max_forks_repo_head_hexsha": "3b86af0b0811a39973b5da592ad19bbd35444c70", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3365384615, "max_line_length": 93, "alphanum_fraction": 0.5072108844, "include": true, "reason": "import numpy", "num_tokens": 811}
|
"""
Stacked area plot for 1D arrays inspired by Douglas Y'barbo's stackoverflow
answer:
https://stackoverflow.com/q/2225995/
(https://stackoverflow.com/users/66549/doug)
"""
import numpy as np
from matplotlib import _api
__all__ = ['stackplot']
def stackplot(axes, x, *args,
labels=(), colors=None, baseline='zero',
**kwargs):
"""
Draw a stacked area plot.
Parameters
----------
x : (N,) array-like
y : (M, N) array-like
The data is assumed to be unstacked. Each of the following
calls is legal::
stackplot(x, y) # where y has shape (M, N)
stackplot(x, y1, y2, y3) # where y1, y2, y3, y4 have length N
baseline : {'zero', 'sym', 'wiggle', 'weighted_wiggle'}
Method used to calculate the baseline:
- ``'zero'``: Constant zero baseline, i.e. a simple stacked plot.
- ``'sym'``: Symmetric around zero and is sometimes called
'ThemeRiver'.
- ``'wiggle'``: Minimizes the sum of the squared slopes.
- ``'weighted_wiggle'``: Does the same but weights to account for
size of each layer. It is also called 'Streamgraph'-layout. More
details can be found at http://leebyron.com/streamgraph/.
labels : list of str, optional
A sequence of labels to assign to each data series. If unspecified,
then no labels will be applied to artists.
colors : list of color, optional
A sequence of colors to be cycled through and used to color the stacked
areas. The sequence need not be exactly the same length as the number
of provided *y*, in which case the colors will repeat from the
beginning.
If not specified, the colors from the Axes property cycle will be used.
data : indexable object, optional
DATA_PARAMETER_PLACEHOLDER
**kwargs
All other keyword arguments are passed to `.Axes.fill_between`.
Returns
-------
list of `.PolyCollection`
A list of `.PolyCollection` instances, one for each element in the
stacked area plot.
"""
y = np.row_stack(args)
labels = iter(labels)
if colors is not None:
axes.set_prop_cycle(color=colors)
# Assume data passed has not been 'stacked', so stack it here.
# We'll need a float buffer for the upcoming calculations.
stack = np.cumsum(y, axis=0, dtype=np.promote_types(y.dtype, np.float32))
_api.check_in_list(['zero', 'sym', 'wiggle', 'weighted_wiggle'],
baseline=baseline)
if baseline == 'zero':
first_line = 0.
elif baseline == 'sym':
first_line = -np.sum(y, 0) * 0.5
stack += first_line[None, :]
elif baseline == 'wiggle':
m = y.shape[0]
first_line = (y * (m - 0.5 - np.arange(m)[:, None])).sum(0)
first_line /= -m
stack += first_line
elif baseline == 'weighted_wiggle':
total = np.sum(y, 0)
# multiply by 1/total (or zero) to avoid infinities in the division:
inv_total = np.zeros_like(total)
mask = total > 0
inv_total[mask] = 1.0 / total[mask]
increase = np.hstack((y[:, 0:1], np.diff(y)))
below_size = total - stack
below_size += 0.5 * y
move_up = below_size * inv_total
move_up[:, 0] = 0.5
center = (move_up - 0.5) * increase
center = np.cumsum(center.sum(0))
first_line = center - 0.5 * total
stack += first_line
# Color between x = 0 and the first array.
color = axes._get_lines.get_next_color()
coll = axes.fill_between(x, first_line, stack[0, :],
facecolor=color, label=next(labels, None),
**kwargs)
coll.sticky_edges.y[:] = [0]
r = [coll]
# Color between array i-1 and array i
for i in range(len(y) - 1):
color = axes._get_lines.get_next_color()
r.append(axes.fill_between(x, stack[i, :], stack[i + 1, :],
facecolor=color, label=next(labels, None),
**kwargs))
return r
|
{"hexsha": "58c9b4fde5c0ffc23970031cef4426cd61c9ddda", "size": 4268, "ext": "py", "lang": "Python", "max_stars_repo_path": "venv/Lib/site-packages/matplotlib/stackplot.py", "max_stars_repo_name": "arnoyu-hub/COMP0016miemie", "max_stars_repo_head_hexsha": "59af664dcf190eab4f93cefb8471908717415fea", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "venv/Lib/site-packages/matplotlib/stackplot.py", "max_issues_repo_name": "arnoyu-hub/COMP0016miemie", "max_issues_repo_head_hexsha": "59af664dcf190eab4f93cefb8471908717415fea", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "venv/Lib/site-packages/matplotlib/stackplot.py", "max_forks_repo_name": "arnoyu-hub/COMP0016miemie", "max_forks_repo_head_hexsha": "59af664dcf190eab4f93cefb8471908717415fea", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.144, "max_line_length": 80, "alphanum_fraction": 0.570524836, "include": true, "reason": "import numpy", "num_tokens": 1066}
|
import tkinter as tk
import tkinter.ttk as ttk
import numpy as np
from ..functions import dp, pdd
from ..resources.language import Text
from .measurement_import import GetType
class RadCalc:
def __init__(self, filepath, parent):
self.filepath = filepath
self.parent = parent
self.filename = self.filepath.split("/")[-1][:-4]
data = np.loadtxt(self.filepath, delimiter=",", skiprows=11, unpack=True)
with open(self.filepath) as file:
head = [next(file) for x in range(11)]
for line in head:
if "Inplane" in line:
self.direction = "X"
elif "Crossplane" in line:
self.direction = "Y"
elif "PDD" in line:
self.direction = "Z"
self.axis, self.dose = np.array([i * 10 for i in data[0]]), data[1] # cm to mm
self.normpoint = max(self.dose)
self.std_dev = []
self.axis = self.axis.tolist()
self.axis = {True: self.axis[len(self.axis) // 2 :], False: self.axis}
self.dose = {True: self.dose[len(self.dose) // 2 :], False: self.dose}
if self.std_dev != None:
self.std_dev = self.std_dev = {
True: self.std_dev[len(self.std_dev) // 2 :],
False: self.std_dev,
}
else:
self.std_dev = {
True: self.std_dev,
False: self.std_dev,
}
def params(self):
if self.direction == "Z":
return pdd.calculate_parameters(
np.array(self.axis[False]),
self.dose[False] / max(self.dose[False]),
[],
)
else:
params = dp.calculate_parameters(
self.axis[False], self.dose[False] / max(self.dose[False])
)
self.cax = params[1]
return params
|
{"hexsha": "b8c8e117af7c0256e03aeebdb1fb30fbb88bbd3d", "size": 1898, "ext": "py", "lang": "Python", "max_stars_repo_path": "topasgraphsim/src/classes/radcalc_import.py", "max_stars_repo_name": "sebasj13/topasgraphsim", "max_stars_repo_head_hexsha": "6027c7c098b319159c32108dd4ec63f4b44e8676", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-19T13:02:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T20:04:17.000Z", "max_issues_repo_path": "topasgraphsim/src/classes/radcalc_import.py", "max_issues_repo_name": "sebasj13/topasgraphsim", "max_issues_repo_head_hexsha": "6027c7c098b319159c32108dd4ec63f4b44e8676", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 37, "max_issues_repo_issues_event_min_datetime": "2022-01-18T22:40:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-06T13:43:23.000Z", "max_forks_repo_path": "topasgraphsim/src/classes/radcalc_import.py", "max_forks_repo_name": "sebasj13/topasgraphsim", "max_forks_repo_head_hexsha": "6027c7c098b319159c32108dd4ec63f4b44e8676", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-09T11:34:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T11:34:36.000Z", "avg_line_length": 30.126984127, "max_line_length": 87, "alphanum_fraction": 0.5226554268, "include": true, "reason": "import numpy", "num_tokens": 446}
|
#!/usr/bin/env python
import os, re
import networkx as NX
import matplotlib.pyplot as PLT
import numpy, scipy
from numpy import random, array, triu, linalg
from scipy.sparse.linalg import eigs
from draggableNode import DraggableNode
from graph import Graph
class GraphCoOccurrence(Graph):
def __init__(self, parent = None):
Graph.__init__(self, parent)
# Build dictionary and term_list for each file in current dir
step_dict = dict()
term_lists = dict()
for f in self.dirlist:
if os.path.isfile(os.path.join(self.parent.step_path, f)) and self.is_step_file(f):
step_dict, term_lists = self.build_dict(f, step_dict, term_lists)
# Create an nxn matrix containing term co-occurrence strengths
n = len(term_lists)
A = numpy.zeros((n,n))
self.A = self.find_cooccurrences(A, term_lists)
self.ind, self.mag = self.indices(self.A) # Indices and magnitudes of files
self.separators = self.best_separation(self.ind, self.mag)
cl_matrices = self.cluster(self.A, self.ind, self.separators)
Ghs = self.gen_graphs(cl_matrices) # Temporary graphs, used to gather edge lists
# Temporary nodes+edges
nodes = [key for key,value in term_lists.items()]
edges = [g.edges() for g in Ghs]
# Each graph returns edges referencing nodes 0..n from graph.edges() above.
# Thus, if there are multiple independent graphs, we need to adjust indices
# such that they are appropriate for describing the list of all nodes
edges = self.unpack_edges(edges)
# Create real nodes+edges, using draggable nodes
self.edges = [(nodes[e[0]], nodes[e[1]]) for e in edges]
self.ecolors = [A[node[0], node[1]] for node in edges]
self.nodes = [DraggableNode(self,x) for x in nodes]
# May have to engineer something here. Have 2 options:
# 1) Rename each duplicate node reference from the edge list
# and change the structure of draggable node
# 2) Draw separate graphs for each cluster
# - Tabs, Windows, Split the frame
# Create graph and add nodes+edges
[self.Gh.add_node(x, obj=n) for x,n in zip(nodes, self.nodes)]
[self.Gh.add_edge(e[0], e[1], weight = w) for e,w in zip(self.edges, self.ecolors)]
self.nodelist = self.Gh.nodes()
self.pos = NX.spring_layout(self.Gh)
try:
# Array of xy positions of every node in nodelist
xy=numpy.asarray([self.pos[v] for v in self.nodelist])
except KeyError as e:
raise NX.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise NX.NetworkXError('Bad value in node positions.')
# DraggableNode order is not garaunteed coming out of the NX.spring_layout
# call, because it returns a hashtable. Here, we make sure each node is
# correctly numbered.
for o in self.nodes:
o.set_node_num(o, self.nodelist.index(o.name))
# Generate scatter graph, draw edges and labels
self.artist = self.axes.scatter(xy[:,0], xy[:,1], self.node_size_mult, c='b', alpha=0.5)
self.edges = NX.draw_networkx_edges(self.Gh, self.pos, ax=self.axes, width=1.0, alpha=.75, edge_color=self.ecolors,
edge_cmap=PLT.cm.Blues, edge_vmin = self.A.min(), edge_vmax = self.A.max())
if self.parent.draw_node_labels_tf:
NX.draw_networkx_labels(self.Gh, self.pos, ax=self.parent.axes, fontsize = 13)
def best_separation(self, ind, mag):
'''Determines the best place to segment the indices into
two clusters based on the magnitudes returned from eigs'''
if mag[0][1] < 0+0.j:
for v in xrange(len(mag)):
if mag[v][1] > 0+0.j:
break
else:
for v in xrange(len(mag)):
if mag[v][1] < 0+0.j:
break
return [v]
def build_dict(self, f, step_dict, term_lists):
'''Accepts a file, dictionary of terms, and dictionary of STEP files
and adds new terms and the file f to each dictionary'''
term_lists[f] = set()
for line in open(f):
line_parts = line.split("'")
for i in range(1,len(line_parts),2):
string = line_parts[i]
if string != '' and string[0] != '#':
tokens = re.split('\W+', string)
for token in tokens:
if token != '':
term_lists[f].add(token)
if token in step_dict:
step_dict[token] = step_dict[token] + 1
else:
step_dict[token] = 1
return step_dict, term_lists
def check_directory(self):
'''Dialog for step directory if not set'''
if self.parent.step_path == None:
filedialog = QtGui.QFileDialog()
tmp = filedialog.getExistingDirectory(None, 'Open Directory', '')
self.parent.set_step_path(str(tmp))
os.chdir(self.parent.step_path)
def cluster(self, A, ind, separators):
'''Returns a list of 1d lists, representing clustered portions of the matrix A using
the supplied separators and indices'''
clusters = len(separators) + 1
out = []
for s in xrange(clusters):
tmp = []
if s == 0:
for i in ind[0:separators[s]]:
for j in ind[0:separators[s]]:
tmp.append(A[i][j])
out.append(tmp)
elif s == len(separators):
for i in ind[separators[s-1]:]:
for j in ind[separators[s-1]:]:
tmp.append(A[i][j])
out.append(tmp)
else:
for i in ind[separators[s-1]:separators[s]]:
for j in ind[separators[s-1]:separators[s]]:
tmp.append(A[i][j])
out.append(tmp)
return out
def find_cooccurrences(self, A, term_lists):
'''Returns a matrix containing strength of term cooccurrence between every STEP file input'''
index1 = 0
for key1, value1 in term_lists.items():
#print index1, key1
index2 = 0
for key2, value2 in term_lists.items():
c = len(value1 & value2)
A[index1, index2] = c
A[index2, index1] = c
index2 = index2 + 1
index1 = index1 + 1
return A
def flatten(self, lst):
'''recursively flattens a nested list'''
return sum( ([x] if not isinstance(x, list) else self.flatten(x)
for x in lst), [] )
def gen_graphs(self, ms):
'''Returns a list of networkx graphs, representing clusters of a larger graph'''
# Convert each matrix in ms to 2d arrays
arrays = []
for m in ms:
_n = int(len(m)**(1/2.0))
tmp = scipy.zeros((_n,_n), float).tolist()
y=0
for x in xrange(len(m)):
tmp[x%_n][y] = m[x]
if (x+1)%_n == 0:
y += 1
arrays.append(array(tmp))
return [NX.Graph(data=a) for a in arrays]
def indices(self, A):
'''Returns the indices of files in , with a '/' in place where the files represented in A
should be split to form independent graphs. Also returns the list of magnitudes in order.'''
_A = triu(A, 1) # Upper triangle
_A += _A.T # += Transpose
def laplacian(A):
'''returns combinatorial laplacian matrix of an array'''
return (numpy.diag(sum(array(A), 2))-A)
L = laplacian(_A)
D,V = eigs(L, k=2, which='SR') # two smallest reals
V2 = [x[1] for x in V]
V_sorted = [x[1] for x in V]
V_sorted.sort()
ind = [V2.index(x) for x in V_sorted]
V_mag = [V[x] for x in ind]
return ind, V_mag
def redraw(self):
'''Redraws the current graph. Typically after a move or selection.'''
# Need to specify the functions for drawing artist (nodes) and edges during redraw
def artist_fn(xy, axes):
return self.axes.scatter(xy[:,0], xy[:,1], self.node_size_mult, alpha=0.5)
def edges_fn(g, pos, axes, ecolor='red'):
return NX.draw_networkx_edges(g, pos, ax=axes, width=1.0, alpha=.75, edge_color=self.ecolors,
edge_cmap=PLT.cm.Blues, edge_vmin = self.A.min(), edge_vmax = self.A.max())
super(GraphCoOccurrence, self).redraw(artist_fn, edges_fn)
def unpack_edges(self, edges, out = None):
'''Recursively organizes edges'''
if edges == []:
return out
elif out == None:
out = edges[0]
return self.unpack_edges(edges[1:], out)
else:
m = 0
for e in out:
if e[0] > m: m = e[0]
if e[1] > m: m = e[1]
tmp = []
for e in edges[0]:
tmp.append((e[0]+m+1, e[1]+m+1))
return self.unpack_edges(edges[1:], out+tmp)
|
{"hexsha": "e689575723f58294acf79e89836ee1e93c0aa597", "size": 9564, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/graphCoOccurrence.py", "max_stars_repo_name": "fdesjardins/nara-stepbrowser", "max_stars_repo_head_hexsha": "27703717e0e5a69fe16d3df3ba285d22b56595cb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-03T20:04:34.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-03T20:04:34.000Z", "max_issues_repo_path": "src/graphCoOccurrence.py", "max_issues_repo_name": "fdesjardins/nara-stepbrowser", "max_issues_repo_head_hexsha": "27703717e0e5a69fe16d3df3ba285d22b56595cb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/graphCoOccurrence.py", "max_forks_repo_name": "fdesjardins/nara-stepbrowser", "max_forks_repo_head_hexsha": "27703717e0e5a69fe16d3df3ba285d22b56595cb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.564516129, "max_line_length": 123, "alphanum_fraction": 0.5457967378, "include": true, "reason": "import numpy,from numpy,from scipy,import networkx", "num_tokens": 2283}
|
\documentclass{article}
\usepackage{epic}
\usepackage{eepic}
\title{Efficient Planning In Simple Cases:\\
Lessons From The Blocks World}
\author{Bart Massey}
\date{June 12, 1995}
\newcommand{\sbw}{{\em primitive-blocks-world}}
\newcommand{\astar}{{$\mbox{A}^{\!\mbox{\tt *}}$}}
\newcommand{\idastar}{{$\mbox{IDA}^{\!\mbox{\tt *}}$}}
\newcommand{\bigO}{{\cal O}}
\begin{document}
\maketitle
\section{Introduction}
\label{section-introduction}
In the 20 years since Winograd's seminal thesis on natural language
understanding \cite{winograd-phd,winograd-book},
AI researchers have focused a lot of attention on simplified
versions of the simple planning domain he employed, the
``blocks world''. The version of this domain with which
we will be concerned is that which
Gupta and Nau \cite{gupta-nau} refer to as \sbw{}.
The initial and final states of a planning
problem are stacks of uniquely labeled blocks on a table, and the planning
problem consists of finding a sequence of move operations which
takes the initial state to the final state. The move
operations are restricted to move only the topmost block in a
stack, and only atop another topmost block or the table.
Currently, the author has implemented a special-purpose planner
which finds shortest solutions to \sbw{} planning problems. This
is the fastest optimal \sbw{} planner known to the author, routinely
solving ``hard'' random 40 block problems. This shows that
reasonable special-purpose heuristics for \sbw{} do a very good job of
minimizing the constant factors involved in \sbw{} planning.
One could draw one of several further hypotheses:
\begin{enumerate}
\item[1)]
This speedup is uninteresting, since special-purpose planners
are {\em always} faster than general-purpose planners. The speedups
undoubtedly come entirely from problem-specific data structures and
heuristics which will utterly fail to generalize.
\item[2)]
This speedup shows that \sbw{} is not a good domain to test
general-purpose planning methods. The fact that special-purpose
techniques do so much better indicates that the domain is {\em too}
simple to give much validity to general-purpose planning results.
\item[3)]
This speedup is a consequence of the simplicity of the domain,
and thus suitable generalizations of the planner should work well
in other simple domains.
\item[4)]
This speedup is a consequence of a well-implemented good algorithm for
general-purpose planning, and thus a proper generalization of
the planner should work well in most any domain.
\end{enumerate}
Historical evidence indicates that (4) is unlikely.
(1) and (2)
are effectively null hypotheses
for the experiments the author proposes to perform, which involve
exploring (3).
The remainder of this document outlines the author's plan for
exploration of hypothesis (3) above.
\begin{description}
\item[Section \ref{section-sbw-techniques}]
describes the techniques responsible for the
good performance of the existing \sbw{}
planning engine.
\item[Section \ref{section-not-generalizable}]
outlines the argument against performing the proposed experiment.
\item[Section \ref{section-proposal}]
describes the properties of the proposed experimental system.
\item[Section \ref{section-subgoals}]
describes some specific tasks to be accomplished.
\item[Section \ref{section-conclusion}]
draws some conclusions about the proposed experiments.
\end{description}
\section{Keys To Speeding Up \sbw{}}
\label{section-sbw-techniques}
There are several techniques which combine to give such good
performance in the author's optimal \sbw{} planner. Among these
are
\begin{description}
\item[1)]
The planner is written in {\sc C}. Careful profiling and
bottleneck removal have been repeatedly applied to the inner loop
of a simple algorithm (\astar{} or \idastar{} \cite{ginsberg-astar}
on arbitrary directed graphs).
The current algorithm is capable of exploring
between 5,000 and 10,000 nodes per second in its inner loop,
which is respectable performance for a search engine.
It is notable that other successful implementations of algorithms which
were historically thought to be computationally infeasible, such
as Crawford's {\sc Tableau} \cite{tableaux} and
Selman's {\sc GSAT} \cite{GSAT}, have
applied essentially this same methodology.
\item[2)]
The state description used in the implementation was
deliberately made as simple as possible without sacrificing
performance. A minimal set of fluents necessary to describe
the state (an ``on'' relationship for each block)
was chosen, and the only redundant fluents added (a ``topmost'' block
for each tower, and a count of towers) reduced the computational
complexity of a number of heuristics without apparently adding
significant computational cost in the general case. The resulting
state description is small, which means that many states can be cached.
It is also cheap to manipulate, which helps the inner loop performance.
\item[3)]
Reasonably modern implementations of key data structures
required by the \astar{} search algorithm keep
the per-node computational complexity of the search low.
The list of visited nodes
is implemented as a balanced binary search tree keyed
on a state hash (and should be implemented
as a self-adjusting binary search tree \cite{tarjan-sabst}).
The priority queue of unvisited nodes is implemented as
a binomial heap \cite{cormen-heaps}
(and should be implemented as a Fibonacci
heap \cite{cormen-heaps}---the
potential performance gain here is considerable).
\item[4)]
The optimization problem for \sbw{} has an interesting property
noted by Gupta and Nau \cite{gupta-nau}, namely that it is never
necessary to move a block onto a tower unless that block is never
to move again after this. The \sbw{} planner automatically prunes
such moves, resulting in substantial performance savings.
\item[5)]
In \sbw{} there are distinguished
states (``deadlock states'') such that a set of moves taking one such state
to another can be made in virtually any order. Exploring
all possible orderings of these moves unsurprisingly consumes
significant amounts of time in the search, since the astar algorithm
will consider them all before backtracking over the initial
distinguished state. The planner has a heuristic which selects
these moves in a canonical order via a polytime (actually $\bigO(n)$)
method, greatly speeding the search. This technique is related
to one used e.g., in {\sc Graphplan} \cite{blum-furst}.
\item[6)]
The \astar{} scoring heuristic (underestimate of the distance from the
current state to the goal state) used in the planner is
very domain-specific and relatively accurate.
However, it does have an important
but potentially generalizable property, namely that
it is careful not to decrease the \astar{} heuristic score in situations
where no real progress has been made.
\begin{figure}
\centering
\input{false-progress-a.eepic}
\caption{The Sussman Test}
\label{false-progress-a}
\end{figure}
\begin{figure}
\centering
\input{false-progress-b.eepic}
\caption{Failing The Sussman Test}
\label{false-progress-b}
\end{figure}
Consider the ``Sussman Anomaly'' situation of Figure
\ref{false-progress-a}, where the goal is to make a single
stack of blocks in alphabetical order. The move to the
situation of Figure \ref{false-progress-b} looks superficially
promising; more fluents are correct after the move than
before. However, it is clear from inspection that, in order
to reach the goal state, this move must be undone. Thus, it
would be a mistake for the score to decrease. In general it
may sometimes be possible to identify this sort of ``local
minimum'' and score accordingly.
\end{description}
\section{A Critical View Of Generalizability}
\label{section-not-generalizable}
In considering the key features of the planner as described in
Section \ref{section-sbw-techniques}, it becomes clear that
they condense into two classes. A critic of the author's proposed
work has noted that both classes are problematic. The paraphrase
of those comments here is intended only to point up the potential
pitfalls in the proposal.
About techniques (1)--(3), the critic says
\begin{quote}
This is exactly what people were doing in 1970. It
may produce better numbers, but it will not {\em scale} any better
than it did then.
\end{quote}
These techniques are known
to be workable, and to provide very good speedups by constant
or polynomial factors. While these speedups are important,
they do not in any way address scalability. However, the author
believes these
techniques are still useful in two ways.
First, by allowing the planner to explore a larger
problem space, they provide a better picture of the
true complexity of the space and the asymptotically limiting factors.
Second, by allowing the planner to solve some realistic small problems,
they allow a demonstration of the advantages and disadvantages of the
planning approach.
About techniques (4)--(6), the critic says
\begin{quote}
There are no reasonable generalizations of these ideas,
short of solving the planning problem outright. If either
of these techniques could be extended to general planning, then
none of the rest of the proposed work would matter.
\end{quote}
These techniques are known to be very difficult to apply or
understand in general, and no one knows how to use them to
solve the planning problem. However, it seems reasonable to
expect that, when used in a heuristic fashion, they might
provide good speedups in a number of common cases, and be relatively
inexpensive. Thus, to the extent that they will generalize, they
may be very helpful in speeding up an already fast planning engine.
\section{A Proposal}
\label{section-proposal}
Having looked at the current \sbw{} planning system, at ways
in which it might extend, and at some possible pitfalls, it is
now time to propose an actual implementation, and at the
experiments that might be performed using this implementation.
Consider each of the elements of Section \ref{section-sbw-techniques},
and the ways they might be implemented in a general-purpose planner:
\begin{description}
\item[1--3)]
The current implementation of \astar{} with states as vectors
of integer-valued fluents should be retained. Careful consideration
is required in choosing additional information to be cached
along with the states. Too much redundant information leads to
serious problems of state access modification speed and of correct preservation of
invariants. Too little redundant information might slow performance.
The fundamental rule here is surely to not cache things until we see
that they are needed by (4) or (5).
\item[4)]
One way to preserve the heuristic properties of the \sbw{} planner
in a general-purpose planner is to consider states achievable
by plans originating from the current state, where a state should
be viewed as a set of fluent values.
If we could efficiently compute the reflexive transitive closure of
plan steps, giving a set of
possible states reachable from a given state, or even if we could
compute
sound approximations and complete approximations to this set,
we could make use of the following
property: If two states have the
property that the set of reachable states from one
is a superset of the set of reachable states from the
other, the weaker state may be pruned.
This would account for the fact that the \sbw{} heuristic
potentially prunes some optimal plans merely because they
may be more difficult to find.
\item[5)]
The author hopes, perhaps optimistically, that some kind
of notion of interchangeability of plan steps can be
developed which will lend itself to the exploitation
of symmetry in general plan search. The
fact that sequences of moves can often be performed in an arbitrary order
suggests to the author that some sort of automatic macro operator
construction might be useful, constructing generalized plan steps which
perform common subgoals. However, this is all currently very vague.
Much more thought is needed here.
\item[6)]
The notion of Hamming distance between states ({\em i.e.},
the number of fluents which must change to transform one
state to another) is by itself probably insufficient to produce
good heuristic scoring. One way to improve this scoring would
be to plan with some automatically weakened set of plan operators,
thus producing admissible scores without as much planning effort.
Some notion of local changes in score is also probably important
to good scoring. The fact that the minimum of some globally
admissible score and some locally accurate score must be used
does not seem to always inhibit local scoring adjustments.
\end{description}
Finally, it is worth noting that \astar{} was chosen for the
\sbw{} planner only because it was a simple algorithm which
produced optimal answers reasonably efficiently; this was important
for comparing our planner with another optimal \sbw{} planner.
Other search techniques, optimal and non-optimal, should
be tested as well.
\section{Some Subgoals}
\label{section-subgoals}
Having sketched the kind of experiments which should be performed,
it might pay to look at the sequence of events the author believes
is appropriate for the ensuing investigation.
\begin{enumerate}
\item
A really good version of the blocks-world planner should
be finished, if only for completeness' sake. It is sincerely believed that
this will take only a couple of days of further effort.
\item
The planner should be generalized to accept and manipulate
arbitrary ``{\sc STRIPS} style'' \cite{fikes-nilsson}
planning operator and state descriptions.
Heuristics and optimizations specific to \sbw{} should be
removed. The result should be a locally efficient ({\em i.e.}, tight
inner loop) but globally inefficient ({\em i.e.}, bad asymptotic
performance) general-purpose planner using \astar{} search.
This should take a couple of weeks.
\item
Search techniques other than \astar{} and \idastar{} should be
tried. In particular, the advantages and disadvantages of finding
non-optimal plans via more efficient search algorithms should be determined.
\item
The sorts of optimizations discussed under points (4), (5), and
(6) of Section \ref{section-proposal} should be designed and implemented.
This is deliberately open-ended, as the results of early experiments
will to a large extent determine the approach.
\item
Having settled on a set of techniques to be used, and having proved
that these techniques are effective, implement a ``production version''
of the general-purpose planner, publish the results, and distribute
the code.
\end{enumerate}
It is acknowledged that this is a vague and sketchy plan. At this
stage, little else is possible. There are many potential pitfalls,
and only time will tell which can be avoided. Indeed, it is possible
that a few days' or a few weeks' work will show that this whole
line of research should be abandoned. It is simply too early to tell.
\section{Conclusion}
\label{section-conclusion}
The author has built a fast optimal planner for \sbw{}. It is quite
possible that this will lead to fast planning in more general domains.
The reasons for this expectation, the case against it, some ideas for
how to meet it, and some steps to take to achieve it, have all been
outlined. Now, only the work remains.
\bibliographystyle{plain}
\bibliography{fastplan,tania-planning}
\end{document}
|
{"hexsha": "d9dcea60f173f22ba0c268ec310b98824e6da06f", "size": 15375, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/fastplan/fastplan.tex", "max_stars_repo_name": "BartMassey/blocks", "max_stars_repo_head_hexsha": "6dbb39186595b6e2b80c9a5dcd616056f6cb3117", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/fastplan/fastplan.tex", "max_issues_repo_name": "BartMassey/blocks", "max_issues_repo_head_hexsha": "6dbb39186595b6e2b80c9a5dcd616056f6cb3117", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/fastplan/fastplan.tex", "max_forks_repo_name": "BartMassey/blocks", "max_forks_repo_head_hexsha": "6dbb39186595b6e2b80c9a5dcd616056f6cb3117", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-15T18:45:13.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-15T18:45:13.000Z", "avg_line_length": 42.8272980501, "max_line_length": 82, "alphanum_fraction": 0.7942113821, "num_tokens": 3527}
|
# Copyright 2020 Magic Leap, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Originating Author: Zak Murez (zak.murez.com)
# last modification: Fengting Yang 03/21/2022
import argparse
import os
import numpy as np
import torch
from vPlaneRecover.data import SceneDataset, parse_splits_list
from vPlaneRecover.model import vPlaneRecNet
import vPlaneRecover.transforms as transforms
from vPlaneRecover.evaluation import project_to_mesh
import third_party.Scannet_eval.scannet_eval_util_3d as util_3d
import trimesh
from vPlaneRecover.backbone3d import build_backbone3d
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
def process(info_file, model, num_frames, save_path, total_scenes_index, total_scenes_count):
""" Run the netork on a scene and save output
Args:
info_file: path to info_json file for the scene
model: pytorch model that implemets Atlas
frames: number of frames to use in reconstruction (-1 for all)
save_path: where to save outputs
total_scenes_index: used to print which scene we are on
total_scenes_count: used to print the total number of scenes to process
"""
# do not inference twice if already there
cur_scene = os.path.basename(os.path.dirname(info_file))
if cur_scene[-2:] != '00':return
if os.path.isfile(os.path.join(save_path, '%s.npz'%cur_scene)):
return
voxel_scale = model.voxel_sizes[0]
dataset = SceneDataset(info_file, voxel_sizes=[voxel_scale],
voxel_types=model.voxel_types, num_frames=num_frames)
# compute voxel origin
if 'file_name_vol_%02d'%voxel_scale in dataset.info:
# compute voxel origin from ground truth
tsdf_trgt = dataset.get_tsdf()['vol_%02d'%voxel_scale]
voxel_size = float(voxel_scale)/100
# shift by integer number of voxels for padding
shift = torch.tensor([.5, .5, .5])//voxel_size
offset = tsdf_trgt.origin - shift*voxel_size
else:
# use default origin
# assume floor is a z=0 so pad bottom a bit
offset = torch.tensor([0,0,-.5])
T = torch.eye(4)
T[:3,3] = offset
# insert transformation after dataset init
transform = transforms.Compose([
transforms.ResizeImage((640,480)),
transforms.ToTensor(),
transforms.TransformSpace(T, model.voxel_dim_val, [0,0,0]),
transforms.IntrinsicsPoseToProjection(),
])
dataset.transform = transform
dataloader = torch.utils.data.DataLoader(dataset, batch_size=None,
batch_sampler=None, num_workers=2)
scene = dataset.info['scene']
model.initialize_volume()
torch.cuda.empty_cache()
for j, d in enumerate(dataloader):
# print(d.keys()) #file_name_image, image, projection
# logging progress
if j%25==0:
print(total_scenes_index,
total_scenes_count,
dataset.info['dataset'],
scene,
j,
len(dataloader)
)
model.inference1(d['projection'].unsqueeze(0).cuda(),
image=d['image'].unsqueeze(0).cuda())
torch.cuda.empty_cache()
outputs, losses = model.inference2()
# provide gt as tsdf result for debug
if 'vol_%02d_tsdf'%voxel_scale not in outputs:
T = torch.eye(4)
T[:3, 3] = offset
transform = transforms.Compose([
transforms.ResizeImage((640, 480)),
transforms.ToTensor(),
transforms.TransformSpace(T, model.voxel_dim_val, [0, 0, 0]),
transforms.IntrinsicsPoseToProjection(),
])
dataset.transform = transform
tsdf_trgt = dataset.get_tsdf()['vol_%02d' % voxel_scale]
tsdf_vol = tsdf_trgt.tsdf_vol.detach().clone()
outputs['vol_%02d_tsdf'%voxel_scale] = tsdf_vol.unsqueeze(0).unsqueeze(0)
tsdf_pred = model.postprocess(outputs, b_val=True)[0]
# TODO: set origin in model... make consistent with offset above?
tsdf_pred.origin = offset.view(1,3).cuda()
output_meshs = []
if 'semseg' in tsdf_pred.attribute_vols:
output_meshs.append('semseg')
output_meshs.append('semseg_ent')
if 'centroid_prob' in tsdf_pred.attribute_vols:
output_meshs.append('centroid_prob')
if 'plane_ins' in tsdf_pred.attribute_vols:
output_meshs.append('plane_ins')
output_meshs.append('vert_plane')
output_meshs.append('plane_cls')
meshes = tsdf_pred.get_mesh(output_meshs)
attribute_mesh = None
if isinstance(meshes, dict):
for key in meshes:
if key == 'semseg':
meshes[key].export(os.path.join(save_path, '%s.ply' % scene)) #_semseg
# save vertex attributes seperately since trimesh doesn't
np.savez(os.path.join(save_path, '%s_attributes.npz'%scene),
**(meshes[key]).vertex_attributes)
attribute_mesh = meshes[key]
else:
meshes[key].export(os.path.join(save_path, '%s_%s.ply' %(scene, key)))
else:
meshes.export(os.path.join(save_path, '%s.ply' %(scene)))
tsdf_pred.save(os.path.join(save_path, '%s.npz'%scene))
# transfer semantic txt and instance txt for evaluation
file_mesh_trgt = dataset.info['file_name_mesh_gt']
if attribute_mesh is not None:
# save as txt for benchmark evaluation
mesh_trgt = trimesh.load(file_mesh_trgt, process=False)
mesh_transfer = project_to_mesh(attribute_mesh, mesh_trgt, 'semseg')
semseg = mesh_transfer.vertex_attributes['semseg']
sem_save_pth = os.path.join(save_path, 'semseg')
if not os.path.isdir(sem_save_pth):
os.makedirs(sem_save_pth)
np.savetxt(os.path.join(sem_save_pth, '%s.txt' % scene), semseg, fmt='%d')
mesh_transfer.export(os.path.join(sem_save_pth, '%s_transfer.ply' % scene))
# save plane instance label-- note the plane_ins attribute is only stored in mesh, we use mesh_planeIns to offer color
if os.path.isfile(os.path.join(save_path, '%s_plane_ins.ply' % scene)):
mesh_planeIns_gt = trimesh.load(dataset.info['file_name_plane_mesh'], process=False)
mesh_planeIns_pred = trimesh.load(os.path.join(save_path, '%s_plane_ins.ply' % scene), process=False)
mesh_planeIns_transfer = project_to_mesh(attribute_mesh, mesh_planeIns_gt, 'plane_ins', mesh_planeIns_pred)
planeIns = mesh_planeIns_transfer.vertex_attributes['plane_ins']
plnIns_save_pth = os.path.join(save_path, 'plane_ins')
if not os.path.isdir(plnIns_save_pth):
os.makedirs(plnIns_save_pth)
mesh_planeIns_transfer.export(os.path.join(plnIns_save_pth, '%s_planeIns_transfer.ply' % scene))
util_3d.export_instance_ids_for_eval(os.path.join(plnIns_save_pth, '%s.txt' % scene), (semseg), planeIns)
def main():
parser = argparse.ArgumentParser(description="IndoorMVS Inference")
parser.add_argument("--model", default='/data/Fengting/vPlaneRecover_train/vPlaneRecover/HT_sepPartNormHT_newthre06_lr0.0005_bz4_ep150_nfrm50_resnet50/epoch=134_step=00030104.ckpt', metavar="FILE",
help="path to checkpoint")
parser.add_argument("--scenes", default='meta_file/scannet_val_demo.txt',
help="which scene(s) to run on")
parser.add_argument("--num_frames", default=-1, type=int,
help="number of frames to use (-1 for all)")
parser.add_argument("--save_path", default='val', help="path to save result")
parser.add_argument("--topk", default=int(8e6), type=int, help="number of topk center prob to be used -- ignore")
parser.add_argument("--heatmap_thres", default=0.008, type=float, help="Threshold for heatmap plane detection")
parser.add_argument("--voxel_dim", nargs=3, default=[256,256,128], type=int, help="override voxel dim")
args = parser.parse_args()
# get all the info_file.json's from the command line
# .txt files contain a list of info_file.json's
info_files = parse_splits_list(args.scenes)
model = vPlaneRecNet.load_from_checkpoint(args.model) # all hyper-param setting is in torch.load(args.model)['hyper_parameters']
model = model.cuda().eval()
torch.set_grad_enabled(False)
# overwrite default values of voxel_dim_test
if args.voxel_dim[0] != -1:
model.voxel_dim_test = args.voxel_dim
model.cfg.VOXEL_DIM_VAL = args.voxel_dim
model.backbone3d.voxel_dim_val = args.voxel_dim
# TODO: implement voxel_dim_test
model.voxel_dim_val = model.voxel_dim_test
model.cfg.MODEL.GROUPING.TOPK_PROB = args.topk # useless
model.cfg.MODEL.GROUPING.PROB_THRES = args.heatmap_thres
model_name = os.path.splitext(os.path.split(args.model)[1])[0]
if 'test' in args.scenes : # not used in our work
model.voxel_types = ['tsdf', 'semseg']
save_path = os.path.join(model.cfg.LOG_DIR, model.cfg.TRAINER.NAME,
model.cfg.TRAINER.VERSION, 'test_{}_'.format(args.heatmap_thres) + model_name)
else:
save_path = os.path.join(model.cfg.LOG_DIR, model.cfg.TRAINER.NAME,
model.cfg.TRAINER.VERSION, 'val_{}_'.format(args.heatmap_thres) + model_name) #args.save_path
if args.num_frames>-1:
save_path = '%s_%d'%(save_path, args.num_frames)
os.makedirs(save_path, exist_ok=True)
for i, info_file in enumerate(info_files):
# run model on each scene
with torch.no_grad():
process(info_file, model, args.num_frames, save_path, i, len(info_files))
if __name__ == "__main__":
main()
|
{"hexsha": "48cf0552e77f2ed1d55c24d9b9e1f689eeed1bfc", "size": 10265, "ext": "py", "lang": "Python", "max_stars_repo_path": "inference.py", "max_stars_repo_name": "fuy34/indoorMVS", "max_stars_repo_head_hexsha": "440ba357de50d47e5868e6009bc608f8bcb9e9f6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-15T05:02:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-15T05:02:33.000Z", "max_issues_repo_path": "inference.py", "max_issues_repo_name": "fuy34/indoorMVS", "max_issues_repo_head_hexsha": "440ba357de50d47e5868e6009bc608f8bcb9e9f6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inference.py", "max_forks_repo_name": "fuy34/indoorMVS", "max_forks_repo_head_hexsha": "440ba357de50d47e5868e6009bc608f8bcb9e9f6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.3122362869, "max_line_length": 201, "alphanum_fraction": 0.6685825621, "include": true, "reason": "import numpy", "num_tokens": 2487}
|
# -*- coding: utf-8 -*-
"""
We'd like to know a bit more about the dose we inflict on the patient.
This script is used to calculate said dose based on the x-ray spectra that we
will be able to set (see Source-Specifications).
"""
from __future__ import division # fix integer division
from optparse import OptionParser
import sys
import os
import numpy as np
from scipy import constants
# Clear command line
os.system('clear')
# Use Pythons Optionparser to define and read the options, and also
# give some help to the user
parser = OptionParser()
usage = "usage: %prog [options] arg"
parser.add_option('-v', '--kv', dest='kV',
type='float',
metavar='53',
default=90,
help='Tube peak voltage [kV] you would like to calcuate the '
'dose for. The script only accepts voltages that are '
'in the specs (and tells you if you set others). '
'Defaults to %default kV, which is the WHO setting for '
'lumbar spine.')
parser.add_option('-m', '--mas', dest='mAs',
type='float',
metavar='1.6',
default=125,
help='mAs settings. Defaults to %default mAs, which is the '
'WHO setting for lumbar spine.')
parser.add_option('-e', '--exposuretime', dest='Exposuretime',
type='float',
metavar='100',
default=1000,
help='Exposure time [ms]. Defaults to 1 second, because we '
'assume that "-m" (mAs) is used as input. If the user '
'insists, an exposure time can be set.')
parser.add_option('-d', '--distance', dest='Distance',
type='float',
metavar='100',
default=140,
help='Source-Detector distance [cm]. Defaults to %default'
'cm')
parser.add_option('-l', '--length', dest='Length',
type='float',
metavar='15',
default=43.,
help='Length of the (square) FOV [cm]. Defaults to %default '
'cm.')
parser.add_option('-t', '--thickness', dest='Thickness',
type='float',
metavar='13',
default=15.,
help='Patient or sample thickness [cm]. Used to calculate '
'attenuation. Defaults to %default cm.')
parser.add_option('-c', '--chatty', dest='chatty',
default=False, action='store_true',
help='Be chatty. Default: Tell us only the relevant stuff.')
(options, args) = parser.parse_args()
# show the help if no parameters are given
if options.kV is None:
parser.print_help()
print 'Example:'
print 'The command below calculates the dose for a peak tube voltage of', \
'60 kV.'
print
print sys.argv[0], '-v 60'
exit(1)
# Inform the user that we only have certain values to work with
Voltage = [46, 53, 60, 70, 80, 90, 100, 120]
if options.kV not in Voltage:
print 'You can only enter one of these voltages:', \
str(Voltage).strip('[]'), 'kV'
print
print 'Try again with the nearest allowed value:'
# http://stackoverflow.com/a/9706105/323100
print sys.argv[0], '-v', Voltage[min(range(len(Voltage)),
key=lambda i: abs(Voltage[i] -
options.kV))]
exit(1)
ChosenVoltage = Voltage.index(options.kV)
# Load spectra
SpectraPath = 'Spectra'
# Construct file names, then load the data with the filenames (we could do this
# in one step, but like this it's easier to debug. 'SpectrumData' is the data
# without comments, thus we read the mean energy on line 7 in a second step
SpectrumLocation = [os.path.join(SpectraPath, 'Xray-Spectrum_' +
str("%03d" % kV) + 'kV.txt')
for kV in Voltage]
SpectrumData = [(np.loadtxt(FileName)) for FileName in SpectrumLocation]
MeanEnergy = [float(open(FileName).readlines()[5].split()[3]) for FileName in
[os.path.join(SpectraPath, 'Xray-Spectrum_' + str("%03d" % kV) +
'kV.txt') for kV in Voltage]]
if options.chatty:
for v, e in zip(Voltage, MeanEnergy):
print 'Peak tube voltage', v, 'kV = mean energy', int(round(e)), 'keV'
print 'For a peak tube voltage of', options.kV, 'kV and a current of', \
int(round(options.mAs * (options.Exposuretime / 1000.))), 'mAs (exp.', \
'time', options.Exposuretime, 'ms) we get a mean energy of', \
round(MeanEnergy[ChosenVoltage], 3), 'keV.'
print
# Calculate the numbers of photons emitted from the tube.
PhotonEnergy = (MeanEnergy[ChosenVoltage] * 1000) * constants.e # Joules
# DEBUG
# to get the same value as Zhentians calculation, also change eta to 1 instead
# of 1.1, that's an error in his document :)
# options.kV = 40
# PhotonEnergy = (options.kV * 1000) * constants.e # Joules
# python DoseCalculation.py -v 46 -m 25 -e 1000 -d 120 -l 5
# DEBUG
print 'At this mean energy, a single photon has an energy of', \
'%.3e' % PhotonEnergy, 'J.'
print
# Surface entrance dose
# The K-value is based on the machine. The BAG-calculator (see below) list 0.1
K = 0.1 # mGy m^2 mAs^-1
# BSF found by Arouna2000, cited by BAG2012. Gives the same SED as the
# XLS-calculator from BAG (http://is.gd/oTpniQ)
BSF = 1.35
# calculating while converting Focusdistance from m to cm
SED = K * (options.kV / 100) ** 2 * options.mAs * (100 / options.Distance) ** 2 * BSF
print 'The surface entrance dose for an x-ray pulse with'
print '\t* U =', options.kV, 'kV'
print '\t* Q =', options.mAs, 'mAs'
print '\t* FOD =', options.Distance / 100, 'm'
print '\t* A =', int(options.Length ** 2), 'cm^2'
print '\t* K =', K, 'mGy*m^2/mAs'
print '\t* BSF =', BSF
print 'is SED = K*(U/100)^2*Q*(1/FOD)^2*BSF =', round(SED, 3), 'mGy (mJ/kg).'
print
# Correspond SED to photon count
N0 = SED / PhotonEnergy
print 'A SED of', '%.3e' % (SED / 1000), 'Gy (mJ/kg) corresponds to', \
'%.3e' % N0, 'absorbed photons per kg (with a photon', \
'energy of', '%.3e' % PhotonEnergy, 'J per photon).'
# Calculate the number of photons from the tube to the sample
# N0 = (VI/E)*eta*(A/4Pir^2)
# Calculate efficiency for a Tungsten anode according to Krestel1990, chapter
# 3.1.5
eta = 1.1e-9 * 74 * options.kV * 1000
N0 = (options.kV * 1000 * ((options.mAs / 1000) / (options.Exposuretime / 1000)) / (PhotonEnergy)) * eta * ((options.Length / 100) ** 2 / (4 * np.pi * (options.Distance / 100) ** 2))
print 'The source emits %.3e' % N0, 'photons with a mean energy of', \
'%.3e' % PhotonEnergy, 'each'
print 'We assume these photons are all the photons that reached the ' \
'patient, and thus can calculate the photon flux from this.'
Flux = N0 / (options.Exposuretime / 1000)
print 'With an exposure time of', options.Exposuretime, \
'ms the aforementioned number of photons corresponds to a photon flux ' \
'of', '%.3e' % Flux, 'photons per second (from the source to the ' \
'patient surface).'
exit()
# Attenuation in Patient
AttenuationCoefficient = 0.5 # For calculation we just simply assume 50%.
# We NEED to read the data from the NIST tables, but they're in shutdown now...
print 'Attenuation coefficient set to', AttenuationCoefficient, \
'cm^-1 (@' + str(Voltage[ChosenVoltage]), 'kV)'
# Number of absorbed photons
# N = N0(e^-uT)
N = N0 * (np.exp((-AttenuationCoefficient * (options.Thickness / 100))))
print 'Assuming an attenuation coefficient of', AttenuationCoefficient, \
'and a penetration depth of', options.Thickness, \
'cm we have (according to the Beer-Lambert law (N = N0 * e^-uT)'
print ' *', '%.3e' % N, 'photons after the xrays have passed the patient'
print ' * thus', '%.3e' % (N0 - N), 'photons were absorbed'
print ' * the intensity dropped to', round((N / N0) * 100, 2), '%'
print
print
print 'Use nist-attenuation-scraper.py to get the correct attenuation!'
# Attenuation Coefficients
# @40kV, half bone, half muscle
AttenuationCoefficient = []
AttenuationCoefficient.append(np.mean((2.685e-1, 6.655e-1)))
# @70kV (0.5*60+0.5*80), both half bone, half muscle
AttenuationCoefficient.append(np.mean((np.mean((2.048e-01, 3.148e-01)),
np.mean((1.823e-01, 2.229e-01)))))
# Skeletal muscle (http://is.gd/D88OFv)
# Energy mu/rho mu_en/rho
# (MeV) (cm2/g) (cm2/g)
# 1.00000E-02 5.356E+00 4.964E+00
# 1.50000E-02 1.693E+00 1.396E+00
# 2.00000E-02 8.205E-01 5.638E-01
# 3.00000E-02 3.783E-01 1.610E-01
# 4.00000E-02 *2.685E-01* 7.192E-02
# 5.00000E-02 2.262E-01 4.349E-02
# 6.00000E-02 *2.048E-01* 3.258E-02
# 8.00000E-02 *1.823E-01* 2.615E-02
# 1.00000E-01 1.693E-01 2.544E-02
# 1.50000E-01 1.492E-01 2.745E-02
# 2.00000E-01 1.358E-01 2.942E-02
# Cortical bone (http://is.gd/2176eQ)
# Energy mu/rho mu_en/rho
# (MeV) (cm2/g) (cm2/g)
# 1.00000E-02 2.851E+01 2.680E+01
# 1.50000E-02 9.032E+00 8.388E+00
# 2.00000E-02 4.001E+00 3.601E+00
# 3.00000E-02 1.331E+00 1.070E+00
# 4.00000E-02 *6.655E-01* 4.507E-01
# 5.00000E-02 4.242E-01 2.336E-01
# 6.00000E-02 *3.148E-01* 1.400E-01
# 8.00000E-02 *2.229E-01* 6.896E-02
# 1.00000E-01 1.855E-01 4.585E-02
# 1.50000E-01 1.480E-01 3.183E-02
# 2.00000E-01 1.309E-01 3.003E-02
r = 140 # cm, Distance from source to sample
eta = 1e-9 # *ZV
Z = 74 # Tungsten
eV = 1.602e-19 # J
QFactor = 1 # http://en.wikipedia.org/wiki/Dosimetry#Equivalent_Dose
WeightingFactor = 0.12 # http://en.wikipedia.org/wiki/Dosimetry#Effective_dose
ExposureTime = 1000e-3 # s
Current = (options.mAs / 1000) / (options.Exposuretime / 1000)
Area = (options.Length / 100) ** 2
Weight = 10 # kg
# Calculate the number of photons from the tube to the sample
# ~ N0 = (VI/E)*eta*(A/4*Pi*r^2)
N0 = (Voltage * Current) / (Voltage * eV) * eta * Z * Voltage * Area / (4 * np.pi * r ** 2)
print ' - the tube emitts %.4e' % N0, 'photons per second'
# Absorbed radiation dose per second
# Da = Eneregy / Weight # J/kg per second
Da = N * MeanEnergy * 1000 * eV / Weight
print ' -', round(Da * 1000, 4), 'mGy/s are absorbed by the sample,', \
' if we assume it is', Weight, 'kg'
# Effective dose per second
# De = Da * Wr, WR = Q * N
De = Da * QFactor * WeightingFactor
print ' -', round(De * 1000, 4), 'mSv/s is the effective dose'
# Total effective dose on the sample
D = De * ExposureTime
print ' -', round(D * 1000, 4), 'mSv is the effective dose on the', \
'sample for an exposure time of =', ExposureTime, 's)'
|
{"hexsha": "03fd3059180732229a1fb98f061254002823eb50", "size": 10870, "ext": "py", "lang": "Python", "max_stars_repo_path": "DoseCalculation.py", "max_stars_repo_name": "habi/GlobalDiagnostiX", "max_stars_repo_head_hexsha": "5171ccee3c8a8ccc7f0b82d52d7fdac327e8d7c7", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-10-20T07:29:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T07:10:47.000Z", "max_issues_repo_path": "DoseCalculation.py", "max_issues_repo_name": "guizi327832749/GlobalDiagnostiX", "max_issues_repo_head_hexsha": "5171ccee3c8a8ccc7f0b82d52d7fdac327e8d7c7", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DoseCalculation.py", "max_forks_repo_name": "guizi327832749/GlobalDiagnostiX", "max_forks_repo_head_hexsha": "5171ccee3c8a8ccc7f0b82d52d7fdac327e8d7c7", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2018-10-27T05:16:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T14:48:48.000Z", "avg_line_length": 40.7116104869, "max_line_length": 182, "alphanum_fraction": 0.6045078197, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3503}
|
import mlflow
import os.path
import plotly.express as px
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from src.visualization.plot import track_plot, plot
from src.substitute_dynamic_symbols import run, lambdify
from IPython.display import display
from os import stat
from sklearn.metrics import r2_score
from src.models.diff_eq_to_matrix import DiffEqToMatrix
import sympy as sp
from src.symbols import *
import matplotlib.ticker as plticker
from sklearn.metrics import r2_score
class Result:
def __init__(
self,
simulator,
solution,
df_model_test,
df_control,
ship_parameters,
parameters,
y0,
include_accelerations=True,
name="simulation",
):
self.simulator = simulator
self.solution = solution
self.df_model_test = df_model_test
self.df_control = df_control
self.ship_parameters = ship_parameters
self.parameters = parameters
self.y0 = y0
self.include_accelerations = include_accelerations
self.name = name
@property
def simulation_result(self):
columns = list(self.y0.keys())
df_result = pd.DataFrame(
data=self.solution.y.T, columns=columns, index=self.solution.t
)
for key in self.df_control:
df_result[key] = self.df_control[key].values
try:
df_result["beta"] = -np.arctan2(df_result["v"], df_result["u"])
except:
pass
try:
df_result["U"] = np.sqrt(df_result["u"] ** 2 + df_result["v"] ** 2)
except:
pass
return df_result
@property
def result(self):
df_result = self.simulation_result
if self.include_accelerations:
df_result = pd.concat([df_result, self.accelerations], axis=1)
return df_result
@property
def X_qs(self) -> pd.Series:
"""Hydrodynamic force from ship in X-direction during simulation"""
return self._calcualte_qs_force(
function=self.simulator.X_qs_lambda, unit="force"
)
@property
def Y_qs(self) -> pd.Series:
"""Hydrodynamic force from ship in Y-direction during simulation"""
return self._calcualte_qs_force(
function=self.simulator.Y_qs_lambda, unit="force"
)
@property
def N_qs(self) -> pd.Series:
"""Hydrodynamic force from ship in N-direction during simulation"""
return self._calcualte_qs_force(
function=self.simulator.N_qs_lambda, unit="moment"
)
def _calcualte_qs_force(self, function, unit):
df_result = self.simulation_result.copy()
if self.simulator.primed_parameters:
df_result_prime = self.simulator.prime_system.prime(
df_result, U=df_result["U"]
)
X_qs_ = run(function=function, **df_result_prime, **self.parameters)
return self.simulator.prime_system._unprime(
X_qs_, unit=unit, U=df_result["U"]
)
else:
return run(function=function, **df_result, **self.parameters)
@property
def accelerations(self):
df_result = self.simulation_result.copy()
if self.simulator.primed_parameters:
df_result_prime = self.simulator.prime_system.prime(
df_result, U=df_result["U"]
)
inputs = df_result_prime
inputs["U0"] = inputs.iloc[0]["U"]
u1d_prime, v1d_prime, r1d_prime = run(
function=self.simulator.acceleration_lambda,
X_qs=run(
function=self.simulator.X_qs_lambda,
**inputs,
**self.parameters,
),
Y_qs=run(
function=self.simulator.Y_qs_lambda,
**inputs,
**self.parameters,
),
N_qs=run(
function=self.simulator.N_qs_lambda,
**inputs,
**self.parameters,
),
**inputs,
**self.parameters,
**self.simulator.ship_parameters_prime,
)
df_accelerations_prime = pd.DataFrame(index=df_result.index)
df_accelerations_prime["u1d"] = u1d_prime[0]
df_accelerations_prime["v1d"] = v1d_prime[0]
df_accelerations_prime["r1d"] = r1d_prime[0]
df_accelerations = self.simulator.prime_system.unprime(
df_accelerations_prime, U=df_result["U"]
)
else:
inputs = df_result
inputs["U0"] = inputs.iloc[0]["U"]
u1d, v1d, r1d = run(
function=self.simulator.acceleration_lambda,
X_qs=run(
function=self.simulator.X_qs_lambda,
inputs=inputs,
**self.parameters,
),
Y_qs=run(
function=self.simulator.Y_qs_lambda,
inputs=inputs,
**self.parameters,
),
N_qs=run(
function=self.simulator.N_qs_lambda,
inputs=inputs,
**self.parameters,
),
inputs=inputs,
**self.parameters,
**self.ship_parameters,
)
df_accelerations = pd.DataFrame(index=df_result.index)
df_accelerations["u1d"] = u1d[0]
df_accelerations["v1d"] = v1d[0]
df_accelerations["r1d"] = r1d[0]
return df_accelerations
def plot_compare(self, compare=True):
self.track_plot(compare=compare)
self.plot(compare=compare)
def track_plot(self, ax=None, compare=True):
if ax is None:
fig, ax = plt.subplots()
track_plot(
df=self.simulation_result,
lpp=self.ship_parameters["L"],
beam=self.ship_parameters["B"],
ax=ax,
label=self.name,
color="green",
)
if compare:
track_plot(
df=self.df_model_test,
lpp=self.ship_parameters["L"],
beam=self.ship_parameters["B"],
ax=ax,
label="data",
)
ax.legend()
return ax
def plot(self, subplot=True, compare=True):
if compare:
dataframes = {
self.name: self.simulation_result,
"data": self.df_model_test,
}
else:
dataframes = {
self.name: self.simulation_result,
}
return plot(dataframes=dataframes)
def plot_zigzag(self, ax=None, compare=True):
if ax is None:
fig, ax = plt.subplots()
df_result = self.simulation_result.copy()
df_result["psi_deg"] = np.rad2deg(df_result["psi"])
df_result["-delta_deg"] = -np.rad2deg(df_result["delta"])
df_result.plot(y=["psi_deg", "-delta_deg"], ax=ax)
if compare:
df_result2 = self.df_model_test.copy()
df_result2["psi_deg"] = np.rad2deg(df_result2["psi"])
df_result2["-delta_deg"] = -np.rad2deg(df_result2["delta"])
df_result2.plot(y=["psi_deg"], style="--", ax=ax)
loc = plticker.MultipleLocator(
base=1.0
) # this locator puts ticks at regular intervals
ax.yaxis.set_major_locator(loc)
ax.grid()
def save(self, path: str):
"""Save the simulation to a csv file"""
self.result.to_csv(path, index=True)
def to_mlflow(self, artifact_dir="artifacts"):
"""log this run to mlflow
Ex:
This method is intended to be within a mlflow.start_run with statement:
mlflow.set_experiment(run_params['experiment'])
with mlflow.start_run(run_name='test') as run:
log_params = run_params.copy()
log_params.pop('experiment')
mlflow.log_params(run_params)
-->result.to_mlflow()
Parameters
----------
artifact_dir : str, optional
[description], by default 'artifacts'
"""
if not os.path.exists(artifact_dir):
os.mkdir(artifact_dir)
save_path = os.path.join(artifact_dir, "result.csv")
self.save(path=save_path)
mlflow.log_artifact(save_path)
fig, ax = plt.subplots()
fig.set_size_inches(15, 10)
self.track_plot(compare=True, ax=ax)
mlflow.log_figure(fig, "track_plot.png")
fig = self.plot(compare=True)
fig.set_size_inches(15, 10)
plt.tight_layout()
mlflow.log_figure(fig, "signals.png")
## R2 score
interesting = list(self.simulation_result.keys())
for key in self.df_control.keys():
interesting.remove(key)
r2s = {
f"r2_{key}": r2_score(
y_true=self.df_model_test[key], y_pred=self.result[key]
)
for key in interesting
}
r2s = pd.Series(r2s)
r2s["r2"] = r2s.mean() # Mean r2
mlflow.log_metrics(r2s)
self.plot_parameter_contributions()
def simulate_parameter_contributions(self):
model = self.simulator
df_result_prime = model.prime_system.prime(self.result, U=self.result["U"])
X_ = sp.symbols("X_")
diff_eq_X = DiffEqToMatrix(
ode=model.X_qs_eq.subs(X_D, X_), label=X_, base_features=[delta, u, v, r]
)
X = diff_eq_X.calculate_features(data=df_result_prime)
X_parameters = self.simulator.parameters[
model.get_coefficients_X(sympy_symbols=False)
]
X_forces = X * X_parameters
X_forces.index = df_result_prime.index
Y_ = sp.symbols("Y_")
diff_eq_Y = DiffEqToMatrix(
ode=model.Y_qs_eq.subs(Y_D, Y_), label=Y_, base_features=[delta, u, v, r]
)
X = diff_eq_Y.calculate_features(data=df_result_prime)
Y_parameters = model.parameters[model.get_coefficients_Y(sympy_symbols=False)]
Y_forces = X * Y_parameters
Y_forces.index = df_result_prime.index
N_ = sp.symbols("N_")
diff_eq_N = DiffEqToMatrix(
ode=model.N_qs_eq.subs(N_D, N_), label=N_, base_features=[delta, u, v, r]
)
X = diff_eq_N.calculate_features(data=df_result_prime)
N_parameters = model.parameters[model.get_coefficients_N(sympy_symbols=False)]
N_forces = X * N_parameters
N_forces.index = df_result_prime.index
return X_forces, Y_forces, N_forces
def plot_parameter_contributions(self, to_mlflow=True):
X_forces, Y_forces, N_forces = self.simulate_parameter_contributions()
fig_X = px.line(X_forces, y=X_forces.columns, width=800, height=350, title="X")
display(fig_X)
fig_Y = px.line(Y_forces, y=Y_forces.columns, width=800, height=350, title="Y")
display(fig_Y)
fig_N = px.line(N_forces, y=N_forces.columns, width=800, height=350, title="N")
display(fig_N)
if to_mlflow:
mlflow.log_figure(fig_X, "parameter_contributions_X.html")
mlflow.log_figure(fig_Y, "parameter_contributions_Y.html")
mlflow.log_figure(fig_N, "parameter_contributions_N.html")
def score(self) -> pd.Series:
"""R2 score
Returns
-------
pd.Series
r2 score for each signal
"""
r2s = pd.Series(dtype=float)
for key in self.result.columns:
if key in self.df_control:
continue
if key in self.df_model_test:
r2s[key] = r2_score(
y_true=self.df_model_test[key], y_pred=self.result[key]
)
return r2s
|
{"hexsha": "0ec4a0e67cf9d16b198fd7a0bd705bab1db4eec1", "size": 12006, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/result.py", "max_stars_repo_name": "martinlarsalbert/wPCC", "max_stars_repo_head_hexsha": "16e0d4cc850d503247916c9f5bd9f0ddb07f8930", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/models/result.py", "max_issues_repo_name": "martinlarsalbert/wPCC", "max_issues_repo_head_hexsha": "16e0d4cc850d503247916c9f5bd9f0ddb07f8930", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models/result.py", "max_forks_repo_name": "martinlarsalbert/wPCC", "max_forks_repo_head_hexsha": "16e0d4cc850d503247916c9f5bd9f0ddb07f8930", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.9432989691, "max_line_length": 87, "alphanum_fraction": 0.5702148926, "include": true, "reason": "import numpy,import sympy", "num_tokens": 2687}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.