text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File for testing SSD 7 model
"""
from tensorflow import keras
K = keras.backend
Input = keras.layers.Input
Model = keras.models.Model
Progbar = keras.utils.Progbar
Adam = keras.optimizers.Adam
CSVLogger = keras.callbacks.CSVLogger
ModelCheckpoint = keras.callbacks.ModelCheckpoint
EarlyStopping = keras.callbacks.EarlyStopping
ReduceLROnPlateau = keras.callbacks.ReduceLROnPlateau
TerminateOnNaN = keras.callbacks.TerminateOnNaN
load_model = keras.models.load_model
import argparse
from misc_utils import config_ssd7 as Config
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import os
import pprint
import numpy as np
from models.keras_ssd7 import build_model
from keras_loss_function.keras_ssd_loss import SSDLoss
from eval_utils.average_precision_evaluator import Evaluator
from data_generator.object_detection_2d_data_generator import DataGenerator
# Datasets
DATASETS = {'polyps_rcnn'}
# training settings
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create h5py files')
# general
parser.add_argument('-d', '--dataset', type=str, default='polyps_hospital',
help="dataset, {'" + \
"', '".join(sorted(DATASETS)) + \
"'}")
parser.add_argument('-b', '--batch_size', type=int, default=1,
help='input batch size for training')
parser.add_argument('-m', '--model_name', type=str, default='ssd7_model',
help="model name to save")
parser.add_argument('-tf', '--tf_logs', type=str, default='tf_logs',
help="folder for tensorflow logging")
parser.add_argument('-lr', '--learning_rate', type=float, default=1e-4,
help='initial learning rate')
parser.add_argument('-gpu', '--gpu', type=int, default=0,
help="ID of the GPU to train on (or -1 to train on CPU)")
# parse and validate parameters
args = parser.parse_args()
for k, v in args._get_kwargs():
if isinstance(v, str):
setattr(args, k, v.strip().lower())
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) if args.gpu > -1 else '-1'
pprint.pprint(vars(args))
def main():
model_mode = 'inference'
K.clear_session() # Clear previous models from memory.
model = build_model(image_size=(Config.img_height, Config.img_width, Config.img_channels),
n_classes=Config.n_classes, mode=model_mode, l2_regularization=Config.l2_regularization,
scales=Config.scales,
aspect_ratios_per_layer=Config.steps,
two_boxes_for_ar1=True, steps=Config.steps, offsets=Config.offsets, clip_boxes=False,
variances=Config.variances, normalize_coords=Config.normalize_coords,
subtract_mean=Config.intensity_mean,
swap_channels=[2, 1, 0], confidence_thresh=0.01, iou_threshold=0.45, top_k=200,
nms_max_output_size=400)
# 2: Load the trained weights into the model.
weights_path = os.getcwd() + '/weights/' + args.model_name + ".h5"
model.load_weights(weights_path, by_name=True)
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.compile(optimizer=adam, loss=ssd_loss.compute_loss)
test_dataset = DataGenerator(load_images_into_memory=True,
hdf5_dataset_path=os.getcwd() + "/data/" + args.dataset + '/polyp_test.h5')
test_dataset_size = test_dataset.get_dataset_size()
print("Number of images in the test dataset:\t{:>6}".format(test_dataset_size))
classes = ['background', 'polyp']
evaluator = Evaluator(model=model, n_classes=Config.n_classes, data_generator=test_dataset, model_mode=model_mode)
results = evaluator(img_height=Config.img_height, img_width=Config.img_width, batch_size=args.batch_size,
data_generator_mode='resize',
round_confidences=False, matching_iou_threshold=0.5, border_pixels='include',
sorting_algorithm='quicksort',
average_precision_mode='sample', num_recall_points=11, ignore_neutral_boxes=True,
return_precisions=True, return_recalls=True, return_average_precisions=True, verbose=True)
mean_average_precision, average_precisions, precisions, recalls = results
for i in range(1, len(average_precisions)):
print("{:<14}{:<6}{}".format(classes[i], 'AP', round(average_precisions[i], 3)))
print("{:<14}{:<6}{}".format('', 'mAP', round(mean_average_precision, 3)))
m = max((Config.n_classes + 1) // 2, 2)
n = 2
fig, cells = plt.subplots(m, n, figsize=(n * 8, m * 8))
val = 0
for i in range(m):
for j in range(n):
if n * i + j + 1 > Config.n_classes: break
cells[i, j].plot(recalls[n * i + j + 1], precisions[n * i + j + 1], color='blue', linewidth=1.0)
cells[i, j].set_xlabel('recall', fontsize=14)
cells[i, j].set_ylabel('precision', fontsize=14)
cells[i, j].grid(True)
cells[i, j].set_xticks(np.linspace(0, 1, 11))
cells[i, j].set_yticks(np.linspace(0, 1, 11))
cells[i, j].set_title("{}, AP: {:.3f}".format(classes[n * i + j + 1], average_precisions[n * i + j + 1]),
fontsize=16)
image = plt.gcf()
# plt.show()
plt.draw()
image.savefig(os.getcwd() + "/test_out/test_" + str(val) + ".png", dpi=100)
val += 1
if __name__ == '__main__':
main()
|
{"hexsha": "1d957d9277fe8b42e0dc8899243950b6c3b13e4e", "size": 5787, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_ssd7.py", "max_stars_repo_name": "stanley-king/ssd_kerasex", "max_stars_repo_head_hexsha": "54100732342076815113b48c1720898a70f6806e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test_ssd7.py", "max_issues_repo_name": "stanley-king/ssd_kerasex", "max_issues_repo_head_hexsha": "54100732342076815113b48c1720898a70f6806e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test_ssd7.py", "max_forks_repo_name": "stanley-king/ssd_kerasex", "max_forks_repo_head_hexsha": "54100732342076815113b48c1720898a70f6806e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.3357142857, "max_line_length": 118, "alphanum_fraction": 0.6402280975, "include": true, "reason": "import numpy", "num_tokens": 1389}
|
import json
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn import model_selection
df = pd.read_csv(r'/home/rahul/Workspace/Maa ji for Bachelor/functionalities/Pulse Classification/req_files/dataset.csv')
''' K - Fold validation '''
K = 5 # 5 Folds
df = df.sample(frac=1).reset_index(drop=True)
kf = model_selection.StratifiedKFold(n_splits=K)
df["k-fold"] = -1
for fold, (tra_, val_) in enumerate(kf.split(X = df, y = df.Label.values)):
df.loc[val_,"k-fold"] = fold
print(f'Fold No : {fold}, Training label count : {len(tra_)}, Validation label count : {len(val_)}')
''' Label Encoding '''
label_encoder = preprocessing.LabelEncoder()
label_encoder.fit(df['Label'])
KEY = dict(zip(label_encoder.classes_, label_encoder.transform(label_encoder.classes_)))
df['Label_enc']= label_encoder.transform(df['Label'])
df['Label_enc'].unique()
# Handle int64 files
class npEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.int64):
return int(obj)
return json.JSONEncoder.default(self, obj)
with open(r'/home/rahul/Workspace/Maa ji for Bachelor/functionalities/Pulse Classification/req_files/label_key.json', 'w') as f:
json.dump(KEY, f, cls=npEncoder) # Dumping Label_Key to be refered later
print('Label Key dumped')
df.to_csv(r'/home/rahul/Workspace/Maa ji for Bachelor/functionalities/Pulse Classification/req_files/dataset_folds.csv', index=False)
|
{"hexsha": "cafb7cddbce7efe8db164e2f76c70b6415ab97ec", "size": 1476, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/get_folds.py", "max_stars_repo_name": "RsTaK/pulses-classification", "max_stars_repo_head_hexsha": "e8d2b2f753dbecf93424e5b8ac273b896a5234b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/get_folds.py", "max_issues_repo_name": "RsTaK/pulses-classification", "max_issues_repo_head_hexsha": "e8d2b2f753dbecf93424e5b8ac273b896a5234b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/get_folds.py", "max_forks_repo_name": "RsTaK/pulses-classification", "max_forks_repo_head_hexsha": "e8d2b2f753dbecf93424e5b8ac273b896a5234b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4042553191, "max_line_length": 133, "alphanum_fraction": 0.7256097561, "include": true, "reason": "import numpy", "num_tokens": 371}
|
import numpy
from math import *
#radius of Earth in meters
R = 6371000
#returns the spherical distance between two coordinates in given in lat,lon
#result should be multiplied by R for the actual distance in meters
def distance(lat1, lon1, lat2, lon2):
dlat = lat2-lat1
dlon = lon2-lon1
#using haversine formula
a = sin(dlat/2.0) * sin(dlat/2.0) + \
cos(lat1) * cos(lat2) * \
sin(dlon/2.0) * sin(dlon/2)
d = 2 * atan2(sqrt(a), sqrt(1.0-a))
return d
#returns the area of the triangle enclosed by three lines of distances a, b, and c
#result should be multiplied with R**2 for the actual surface area in meters
def triangle_area(a, b, c):
#compute the semiperimeter
s = (a + b + c)/2
#compute spherical excess using L'Huilier's Theorem
tans = tan(s/2)
tana = tan((s-a)/2)
tanb = tan((s-b)/2)
tanc = tan((s-c)/2)
tanE4 = sqrt(tans * tana * tanb * tanc)
E = 4 * atan(tanE4)
return E
def triangle_area_points(lon1, lat1, lon2, lat2, lon3, lat3):
a = distance(lat1, lon1, lat2, lon2)
b = distance(lat2, lon2, lat3, lon3)
c = distance(lat3, lon3, lat1, lon1)
return triangle_area(a, b, c)
|
{"hexsha": "cfe8b2f14b082a1a5480008202c5af0ac9b721c3", "size": 1190, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/omuse/community/cdo/spherical_geometry.py", "max_stars_repo_name": "ipelupessy/omuse", "max_stars_repo_head_hexsha": "83850925beb4b8ba6050c7fa8a1ef2371baf6fbb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-03-25T10:02:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-18T00:28:35.000Z", "max_issues_repo_path": "src/omuse/community/cdo/spherical_geometry.py", "max_issues_repo_name": "ipelupessy/omuse", "max_issues_repo_head_hexsha": "83850925beb4b8ba6050c7fa8a1ef2371baf6fbb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 45, "max_issues_repo_issues_event_min_datetime": "2020-03-03T16:07:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T09:01:07.000Z", "max_forks_repo_path": "src/omuse/community/cdo/spherical_geometry.py", "max_forks_repo_name": "ipelupessy/omuse", "max_forks_repo_head_hexsha": "83850925beb4b8ba6050c7fa8a1ef2371baf6fbb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-03-03T13:28:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-26T09:20:02.000Z", "avg_line_length": 25.8695652174, "max_line_length": 82, "alphanum_fraction": 0.6394957983, "include": true, "reason": "import numpy", "num_tokens": 399}
|
'''
Scalar and Block tridiagonalized matrices.
Also, there is some uitlities in this module that will help us build tridiagonal matrices.
A <TridMatrix> and it's derivatives can be parse to csr/coo matrices and array freely.
Some other matrix types are also supported.
In the following description, we take p -> the block dimension, N -> the matrix dimension and n = N/p.
'''
from numpy import *
from scipy.sparse import csr_matrix,block_diag,coo_matrix,bsr_matrix,issparse
from scipy.sparse import bmat as sbmat
from scipy.sparse.linalg import inv as sinv
from numpy.linalg import inv
import pdb
from futils.pywraper import ind2ptr,get_tlu_seq,ptr2ind
__all__=['TridMatrix','ScalarTridMatrix','BlockTridMatrix','arr2trid','sr2trid','get_trid','build_sr']
class TridMatrix(object):
'''
The base class for tridiagonal matrix with functionality partly realized.
diagonal:
Array of shape (n,p,p) -> block version or (n) -> scalar version,
which is the diagonal part of this tridiagonal array.
upper/lower:
Array of shape (n-1,p,p) -> block version or (n-1) -> scalar version,
which is the upper/lower part of this tridiagonal array.
Leave lower `None` if your'd like it to be hermitian.
'''
def __init__(self,diagonal,upper,lower=None):
self.diagonal=array(diagonal)
self.upper=array(upper)
assert(len(upper)==len(diagonal)-1)
if lower is None:
if self.is_scalar:
self.lower=upper.conj()
else:
self.lower=swapaxes(upper,1,2).conj()
else:
self.lower=lower
def __str__(self):
n=self.n
p=self.p
sizestr='%s'%n if self.is_scalar else '%sx%s'%(n,p)
return '''%s(%s):
upper -> %s
diagonal -> %s
lower -> %s
'''%(self.__class__,sizestr,self.upper,self.diagonal,self.lower)
@property
def p(self):
'''Block size.'''
if self.is_scalar:
return 1
return self.diagonal.shape[-1]
@property
def n(self):
'''Dimension of matrix in view of blocks'''
return self.diagonal.shape[0]
@property
def N(self):
'''The dimension of this matrix.'''
return self.p*self.n
@property
def shape(self):
'''The shape of this matrix.'''
N=self.N
return (N,N)
@property
def is_scalar(self):
'''Return true if it is a scalar tridiagonal matrix.'''
return ndim(self.diagonal)==1
@property
def dtype(self):
'''Get the data type'''
return self.upper.dtype
def tocoo(self):
'''Transform to coo_matrix.'''
raise Exception('Not Implemented!')
def tocsr(self):
'''Transform to csr_matrix.'''
raise Exception('Not Implemented!')
def toarray(self):
'''Transform to an array.'''
raise Exception('Not Implemented!')
class ScalarTridMatrix(TridMatrix):
'''
Scalar tridiagonal matrix class.
diagonal:
Array of shape (n)
which is the diagonal part of this tridiagonal array.
upper/lower:
Array of shape (n)
which is the upper/lower part of this tridiagonal array.
Leave lower `None` if your'd like it to be hermitian.
'''
def __init__(self,diagonal,upper,lower=None):
assert(ndim(diagonal)==1)
super(ScalarTridMatrix,self).__init__(diagonal,upper,lower)
def toarray(self):
'''Transform to array.'''
n=self.n
m=zeros((n,n),dtype=self.upper.dtype)
fill_diagonal(m,self.diagonal)
fill_diagonal(m[1:,:-1],self.lower)
fill_diagonal(m[:-1,1:],self.upper)
return m
def tocoo(self):
'''Transform to coo_matrix.'''
n=self.n
indx=concatenate([arange(n-1),arange(n),arange(1,n)])
indy=concatenate([arange(1,n),arange(n),arange(n-1)])
data=concatenate([self.upper,self.diagonal,self.lower])
return coo_matrix((data,(indx,indy)))
def tocsr(self):
'''Transform to csr_matrix.'''
return self.tocoo().tocsr()
def toblocktrid(self):
'''
Transform to block tridiagonal matrix.
'''
return BlockTridMatrix(self.diagonal[:,newaxis,newaxis],self.upper[:,newaxis,newaxis],self.lower[:,newaxis,newaxis])
class BlockTridMatrix(TridMatrix):
'''
Scalar tridiagonal matrix class.
diagonal:
Array of shape (n,p,p)
which is the diagonal part of this tridiagonal array.
upper/lower:
Array of shape (n,p,p)
which is the upper/lower part of this tridiagonal array.
Leave lower `None` if your'd like it to be hermitian.
'''
def __init__(self,diagonal,upper,lower=None):
assert(ndim(diagonal)==3)
super(BlockTridMatrix,self).__init__(diagonal,upper,lower)
def toscalartrid(self):
'''
Transform to block tridiagonal matrix.
'''
p=self.p
if p!=1:
raise Exception('Can not parse BlockTridMatrix to ScalarTridMatrix for block size p = %s!',p)
return ScalarTridMatrix(self.diagonal[:,0,0],self.upper[:,0,0],self.lower[:,0,0])
def tobsr(self):
'''Transform to bsr_matrix.'''
n=self.n
p=self.p
m=ndarray((n,n),dtype='O')
indx=concatenate([arange(n-1),arange(n),arange(1,n)])
indy=concatenate([arange(1,n),arange(n),arange(n-1)])
args=argsort(indx)
data=concatenate([self.upper,self.diagonal,self.lower])
res=bsr_matrix((data[args],indy[args],ind2ptr(indx[args],n)),blocksize=(p,p))
return res
def tocsr(self):
'''Transform to csr_matrix.'''
return self.tobsr().tocsr()
def tocoo(self):
'''Transform to coo_matrix.'''
return self.tobsr().tocoo()
def toarray(self):
'''Transform to an array.'''
return self.tobsr().toarray()
def arr2trid(arr,p=None):
'''
Parse an array to a tridiagonal matrix.
p:
the block size, leave None to make it scalar.
'''
if p is None:
return ScalarTridMatrix(arr.diagonal(),upper=arr.diagonal(1),lower=arr.diagonal(-1))
else:
N=len(arr)
n=N/p
dl=[]
ul=[]
ll=[]
for i in xrange(n):
dl.append(arr[i*p:(i+1)*p,i*p:(i+1)*p])
if i!=n-1:
ul.append(arr[i*p:(i+1)*p,(i+1)*p:(i+2)*p])
ll.append(arr[(i+1)*p:(i+2)*p,i*p:(i+1)*p])
return BlockTridMatrix(dl,upper=ul,lower=ll)
def sr2trid(mat,p=None):
'''
Parse an bsr_matrix/csr_matrix instance to tridiagonal matrix.
mat:
An csr_matrix/bsr_matrix instance.
p:
The block size, leave None to make it scalar.
*return*:
A <ScalarTridMatrix> instance if p is None and mat is of type csr,
otherwise, a <BlockTridMatrix> instance.
'''
if issparse(mat):
if p is not None:
if not isinstance(mat,bsr_matrix):
mat=mat.tobsr((p,p))
else:
assert(mat.blocksize[0]==p)
else:
if isinstance(mat,bsr_matrix):
p=mat.blocksize[0]
elif not isinstance(mat,csr_matrix):
mat=mat.tocsr()
else:
raise Exception('Sparse Matrix is required!')
if p is not None:
n=mat.shape[0]/p
diagonal=zeros([n,p,p],dtype=mat.dtype)
upper=zeros([n-1,p,p],dtype=mat.dtype)
lower=zeros([n-1,p,p],dtype=mat.dtype)
else:
n=mat.shape[0]
diagonal=zeros(n,dtype=mat.dtype)
upper=zeros(n-1,dtype=mat.dtype)
lower=zeros(n-1,dtype=mat.dtype)
indptr=mat.indptr
yindices=mat.indices
for i in xrange(n):
x0=indptr[i]
yinds=yindices[x0:indptr[i+1]]
for jind,j in enumerate(yinds):
if j==i:
#print mat[i,i]
diagonal[i]=mat.data[x0+jind]
#print diagonal[i]
elif j==i-1:
lower[j]=mat.data[x0+jind]
elif j==i+1:
upper[i]=mat.data[x0+jind]
if p is not None:
return BlockTridMatrix(diagonal,upper=upper,lower=lower)
else:
return ScalarTridMatrix(diagonal,upper=upper,lower=lower)
def get_trid(n,p=None,fill=None,herm=False):
'''
Generate a tridiagonal matrix.
fill:
The filling value
Leave it None for random numbers.
p:
The block size.
leave it None to make it scalar.
herm:
Get a hermitian matrix if True.
*return*:
A <ScalarTridMatrix> instance if p is None, else <BlockTridMatrix> instance
'''
if fill is None:
agen=random.random
else:
agen=lambda args: fill*ones(args)
if p is None:
dl=agen(n)
ul=agen(n-1)+1j*agen(n-1)
ll=lower=None if herm else (agen(n-1)+1j*agen(n-1))
return ScalarTridMatrix(dl,upper=ul,lower=ll)
ul=agen([n-1,p,p])+1j*agen([n-1,p,p])
dl=agen([n,p,p])+1j*agen([n,p,p])
if herm:
ll=None
dl=(dl+swapaxes(dl,1,2).conj())/2.
else:
ll=agen([n-1,p,p])+1j*agen([n-1,p,p])
return BlockTridMatrix(dl,upper=ul,lower=ll)
def build_sr(ll=None,dl=None,ul=None):
'''
Build a bsr or csr matrix by lower/diagonal/upper part of a tridiagonal matrix.
ll/dl/ul:
The lower/diagonal/upper part of tridiagonal matrix.
Leave None to make any of them zeros(no all of them).
*return*:
a csr_matrix instance for scalar version, else a bsr_matrix instance.
'''
nzarr=None
for i,il in enumerate([ll,ul,dl]):
if il is not None:
nzarr=il
n=len(il)+1 if i!=2 else len(il)
break
if nzarr is None:
raise ValueError('At least one of ll,dl,ul should be nonzeros!')
is_scalar=ndim(nzarr)==1
p=1 if is_scalar else nzarr.shape[-1]
if is_scalar:
mgen=csr_matrix
nullval=zeros(0)
else:
mgen=bsr_matrix
nullval=zeros([0,p,p])
indx_d=arange(n)
indx=concatenate([[] if dl is None else indx_d,[] if ll is None else indx_d[1:],[] if ul is None else indx_d[:-1]],axis=0)
indy=concatenate([[] if dl is None else indx_d,[] if ll is None else indx_d[:-1],[] if ul is None else indx_d[1:]],axis=0)
data=concatenate([nullval if dl is None else dl,nullval if ll is None else ll,nullval if ul is None else ul],axis=0)
odl=argsort(indx)
L=mgen((data[odl],indy[odl],ind2ptr(indx[odl],n)),dtype=nzarr.dtype)
return L
|
{"hexsha": "214c2d9ef0d1d55a28e8a46489bcfa54515d5123", "size": 10603, "ext": "py", "lang": "Python", "max_stars_repo_path": "trid.py", "max_stars_repo_name": "GiggleLiu/tridmat", "max_stars_repo_head_hexsha": "c413a39efdd4ff5ab1dcd4da48891fcb2653e72a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "trid.py", "max_issues_repo_name": "GiggleLiu/tridmat", "max_issues_repo_head_hexsha": "c413a39efdd4ff5ab1dcd4da48891fcb2653e72a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trid.py", "max_forks_repo_name": "GiggleLiu/tridmat", "max_forks_repo_head_hexsha": "c413a39efdd4ff5ab1dcd4da48891fcb2653e72a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.207977208, "max_line_length": 126, "alphanum_fraction": 0.5946430256, "include": true, "reason": "from numpy,from scipy", "num_tokens": 2876}
|
[STATEMENT]
lemma Mignotte_bound:
shows "of_int \<bar>coeff g k\<bar> \<le> (degree g choose k) * mahler_measure g"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
proof (cases "k \<le> degree g \<and> g \<noteq> 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
2. \<not> (k \<le> degree g \<and> g \<noteq> 0) \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
\<not> (k \<le> degree g \<and> g \<noteq> 0)
goal (2 subgoals):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
2. \<not> (k \<le> degree g \<and> g \<noteq> 0) \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
hence "coeff g k = 0"
[PROOF STATE]
proof (prove)
using this:
\<not> (k \<le> degree g \<and> g \<noteq> 0)
goal (1 subgoal):
1. poly.coeff g k = 0
[PROOF STEP]
using le_degree
[PROOF STATE]
proof (prove)
using this:
\<not> (k \<le> degree g \<and> g \<noteq> 0)
poly.coeff ?p ?n \<noteq> (0::?'a) \<Longrightarrow> ?n \<le> degree ?p
goal (1 subgoal):
1. poly.coeff g k = 0
[PROOF STEP]
by (cases "g = 0", auto)
[PROOF STATE]
proof (state)
this:
poly.coeff g k = 0
goal (2 subgoals):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
2. \<not> (k \<le> degree g \<and> g \<noteq> 0) \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
poly.coeff g k = 0
goal (1 subgoal):
1. real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
using mahler_measure_ge_0[of g]
[PROOF STATE]
proof (prove)
using this:
poly.coeff g k = 0
0 \<le> mahler_measure g
goal (1 subgoal):
1. real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
goal (1 subgoal):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
case kg: True
[PROOF STATE]
proof (state)
this:
k \<le> degree g \<and> g \<noteq> 0
goal (1 subgoal):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
hence g: "g \<noteq> 0" "g dvd g"
[PROOF STATE]
proof (prove)
using this:
k \<le> degree g \<and> g \<noteq> 0
goal (1 subgoal):
1. g \<noteq> 0 &&& g dvd g
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
g \<noteq> 0
g dvd g
goal (1 subgoal):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
from mignotte_bound_main[OF g le_refl, of k]
[PROOF STATE]
proof (chain)
picking this:
\<bar>poly.coeff g k\<bar> \<le> \<lfloor>real (degree g - 1 choose k) * mahler_measure g\<rfloor> + int (min k 1 * (degree g - 1 choose (k - 1))) * \<bar>lead_coeff g\<bar>
[PROOF STEP]
have "real_of_int \<bar>coeff g k\<bar>
\<le> of_int \<lfloor>real (degree g - 1 choose k) * mahler_measure g\<rfloor> +
of_int (int (min k 1 * (degree g - 1 choose (k - 1))) * \<bar>lead_coeff g\<bar>)"
[PROOF STATE]
proof (prove)
using this:
\<bar>poly.coeff g k\<bar> \<le> \<lfloor>real (degree g - 1 choose k) * mahler_measure g\<rfloor> + int (min k 1 * (degree g - 1 choose (k - 1))) * \<bar>lead_coeff g\<bar>
goal (1 subgoal):
1. real_of_int \<bar>poly.coeff g k\<bar> \<le> real_of_int \<lfloor>real (degree g - 1 choose k) * mahler_measure g\<rfloor> + real_of_int (int (min k 1 * (degree g - 1 choose (k - 1))) * \<bar>lead_coeff g\<bar>)
[PROOF STEP]
by linarith
[PROOF STATE]
proof (state)
this:
real_of_int \<bar>poly.coeff g k\<bar> \<le> real_of_int \<lfloor>real (degree g - 1 choose k) * mahler_measure g\<rfloor> + real_of_int (int (min k 1 * (degree g - 1 choose (k - 1))) * \<bar>lead_coeff g\<bar>)
goal (1 subgoal):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
real_of_int \<bar>poly.coeff g k\<bar> \<le> real_of_int \<lfloor>real (degree g - 1 choose k) * mahler_measure g\<rfloor> + real_of_int (int (min k 1 * (degree g - 1 choose (k - 1))) * \<bar>lead_coeff g\<bar>)
goal (1 subgoal):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
have "\<dots> \<le> real (degree g - 1 choose k) * mahler_measure g
+ real (min k 1 * (degree g - 1 choose (k - 1))) * (of_int \<bar>lead_coeff g\<bar> * 1)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. real_of_int \<lfloor>real (degree g - 1 choose k) * mahler_measure g\<rfloor> + real_of_int (int (min k 1 * (degree g - 1 choose (k - 1))) * \<bar>lead_coeff g\<bar>) \<le> real (degree g - 1 choose k) * mahler_measure g + real (min k 1 * (degree g - 1 choose (k - 1))) * (real_of_int \<bar>lead_coeff g\<bar> * 1)
[PROOF STEP]
by (rule add_mono, force, auto)
[PROOF STATE]
proof (state)
this:
real_of_int \<lfloor>real (degree g - 1 choose k) * mahler_measure g\<rfloor> + real_of_int (int (min k 1 * (degree g - 1 choose (k - 1))) * \<bar>lead_coeff g\<bar>) \<le> real (degree g - 1 choose k) * mahler_measure g + real (min k 1 * (degree g - 1 choose (k - 1))) * (real_of_int \<bar>lead_coeff g\<bar> * 1)
goal (1 subgoal):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
real_of_int \<lfloor>real (degree g - 1 choose k) * mahler_measure g\<rfloor> + real_of_int (int (min k 1 * (degree g - 1 choose (k - 1))) * \<bar>lead_coeff g\<bar>) \<le> real (degree g - 1 choose k) * mahler_measure g + real (min k 1 * (degree g - 1 choose (k - 1))) * (real_of_int \<bar>lead_coeff g\<bar> * 1)
goal (1 subgoal):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
have "\<dots> \<le> real (degree g - 1 choose k) * mahler_measure g
+ real (min k 1 * (degree g - 1 choose (k - 1))) * mahler_measure g"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. real (degree g - 1 choose k) * mahler_measure g + real (min k 1 * (degree g - 1 choose (k - 1))) * (real_of_int \<bar>lead_coeff g\<bar> * 1) \<le> real (degree g - 1 choose k) * mahler_measure g + real (min k 1 * (degree g - 1 choose (k - 1))) * mahler_measure g
[PROOF STEP]
by (rule add_left_mono[OF mult_left_mono],
unfold mahler_measure_def mahler_measure_poly_def,
rule mult_mono, auto intro!: prod_list_ge1)
[PROOF STATE]
proof (state)
this:
real (degree g - 1 choose k) * mahler_measure g + real (min k 1 * (degree g - 1 choose (k - 1))) * (real_of_int \<bar>lead_coeff g\<bar> * 1) \<le> real (degree g - 1 choose k) * mahler_measure g + real (min k 1 * (degree g - 1 choose (k - 1))) * mahler_measure g
goal (1 subgoal):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
real (degree g - 1 choose k) * mahler_measure g + real (min k 1 * (degree g - 1 choose (k - 1))) * (real_of_int \<bar>lead_coeff g\<bar> * 1) \<le> real (degree g - 1 choose k) * mahler_measure g + real (min k 1 * (degree g - 1 choose (k - 1))) * mahler_measure g
goal (1 subgoal):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
have "\<dots> =
(real ((degree g - 1 choose k) + (min k 1 * (degree g - 1 choose (k - 1))))) * mahler_measure g"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. real (degree g - 1 choose k) * mahler_measure g + real (min k 1 * (degree g - 1 choose (k - 1))) * mahler_measure g = real (degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1))) * mahler_measure g
[PROOF STEP]
by (auto simp: field_simps)
[PROOF STATE]
proof (state)
this:
real (degree g - 1 choose k) * mahler_measure g + real (min k 1 * (degree g - 1 choose (k - 1))) * mahler_measure g = real (degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1))) * mahler_measure g
goal (1 subgoal):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
real (degree g - 1 choose k) * mahler_measure g + real (min k 1 * (degree g - 1 choose (k - 1))) * mahler_measure g = real (degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1))) * mahler_measure g
goal (1 subgoal):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
have "(degree g - 1 choose k) + (min k 1 * (degree g - 1 choose (k - 1))) = degree g choose k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1)) = degree g choose k
[PROOF STEP]
proof (cases "k = 0")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. k = 0 \<Longrightarrow> degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1)) = degree g choose k
2. k \<noteq> 0 \<Longrightarrow> degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1)) = degree g choose k
[PROOF STEP]
case False
[PROOF STATE]
proof (state)
this:
k \<noteq> 0
goal (2 subgoals):
1. k = 0 \<Longrightarrow> degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1)) = degree g choose k
2. k \<noteq> 0 \<Longrightarrow> degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1)) = degree g choose k
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
k \<noteq> 0
[PROOF STEP]
obtain kk where k: "k = Suc kk"
[PROOF STATE]
proof (prove)
using this:
k \<noteq> 0
goal (1 subgoal):
1. (\<And>kk. k = Suc kk \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (cases k, auto)
[PROOF STATE]
proof (state)
this:
k = Suc kk
goal (2 subgoals):
1. k = 0 \<Longrightarrow> degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1)) = degree g choose k
2. k \<noteq> 0 \<Longrightarrow> degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1)) = degree g choose k
[PROOF STEP]
with kg
[PROOF STATE]
proof (chain)
picking this:
k \<le> degree g \<and> g \<noteq> 0
k = Suc kk
[PROOF STEP]
obtain gg where g: "degree g = Suc gg"
[PROOF STATE]
proof (prove)
using this:
k \<le> degree g \<and> g \<noteq> 0
k = Suc kk
goal (1 subgoal):
1. (\<And>gg. degree g = Suc gg \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (cases "degree g", auto)
[PROOF STATE]
proof (state)
this:
degree g = Suc gg
goal (2 subgoals):
1. k = 0 \<Longrightarrow> degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1)) = degree g choose k
2. k \<noteq> 0 \<Longrightarrow> degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1)) = degree g choose k
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1)) = degree g choose k
[PROOF STEP]
unfolding k g
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Suc gg - 1 choose Suc kk + min (Suc kk) 1 * (Suc gg - 1 choose (Suc kk - 1)) = Suc gg choose Suc kk
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1)) = degree g choose k
goal (1 subgoal):
1. k = 0 \<Longrightarrow> degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1)) = degree g choose k
[PROOF STEP]
qed auto
[PROOF STATE]
proof (state)
this:
degree g - 1 choose k + min k 1 * (degree g - 1 choose (k - 1)) = degree g choose k
goal (1 subgoal):
1. k \<le> degree g \<and> g \<noteq> 0 \<Longrightarrow> real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
goal (1 subgoal):
1. real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
real_of_int \<bar>poly.coeff g k\<bar> \<le> real (degree g choose k) * mahler_measure g
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 5792, "file": "Berlekamp_Zassenhaus_Factor_Bound", "length": 42}
|
#!/usr/bin/env python3
"""
Recipe for training a Voice Activity Detection (VAD) model on LibriParty.
This code heavily relis on data augmentation with external datasets.
(e.g, open_rir, musan, CommonLanguge is used as well).
Make sure you download all the datasets before staring the experiment:
- LibriParty: https://drive.google.com/file/d/1--cAS5ePojMwNY5fewioXAv9YlYAWzIJ/view?usp=sharing
- Musan: https://www.openslr.org/resources/17/musan.tar.gz
- CommonLanguage: https://zenodo.org/record/5036977/files/CommonLanguage.tar.gz?download=1
To run an experiment:
python train.py hparams/train.yaml\
--data_folder=/path/to/LibriParty \
--musan_folder=/path/to/musan/\
--commonlanguage_folder=/path/to/commonlang
Authors
* Mohamed Kleit 2021
* Arjun V 2021
* Mirco Ravanelli 2021
"""
import sys
import torch
import logging
import numpy as np
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
from data_augment import augment_data
logger = logging.getLogger(__name__)
class VADBrain(sb.Brain):
def compute_forward(self, batch, stage):
"""Given an input batch it computes the binary probability.
In training phase, we create on-the-fly augmentation data.
"""
batch = batch.to(self.device)
wavs, lens = batch.signal
targets, lens_targ = batch.target
self.targets = targets
if stage == sb.Stage.TRAIN:
wavs, targets, lens = augment_data(
self.noise_datasets,
self.speech_datasets,
wavs,
targets,
lens_targ,
)
self.lens = lens
self.targets = targets
# From wav input to output binary prediciton
feats = self.hparams.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
feats = feats.detach()
outputs = self.modules.cnn(feats)
outputs = outputs.reshape(
outputs.shape[0],
outputs.shape[1],
outputs.shape[2] * outputs.shape[3],
)
outputs, h = self.modules.rnn(outputs)
outputs = self.modules.dnn(outputs)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the binary CE"
predictions, lens = predictions
targets = self.targets
predictions = predictions[:, : targets.shape[-1], 0]
loss = self.hparams.compute_BCE_cost(predictions, targets, lens)
self.train_metrics.append(batch.id, torch.sigmoid(predictions), targets)
if stage != sb.Stage.TRAIN:
self.valid_metrics.append(
batch.id, torch.sigmoid(predictions), targets
)
return loss
def on_stage_start(self, stage, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
self.train_metrics = self.hparams.train_stats()
self.noise_datasets = [
self.hparams.add_noise,
self.hparams.add_noise_musan,
self.hparams.add_music_musan,
]
self.speech_datasets = [
self.hparams.add_speech_musan,
self.hparams.add_speech_musan,
self.hparams.add_speech_musan,
]
if stage != sb.Stage.TRAIN:
self.valid_metrics = self.hparams.test_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
else:
summary = self.valid_metrics.summarize(threshold=0.5)
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats={"loss": self.train_loss},
valid_stats={"loss": stage_loss, "summary": summary},
)
self.checkpointer.save_and_keep_only(
meta={"loss": stage_loss, "summary": summary},
num_to_keep=1,
min_keys=["loss"],
name="epoch_{}".format(epoch),
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats={"loss": stage_loss, "summary": summary},
)
def dataio_prep(hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
data_folder = hparams["data_folder"]
train = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["annotation_train"],
replacements={"data_root": data_folder},
)
validation = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["annotation_valid"],
replacements={"data_root": data_folder},
)
test = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams["annotation_test"],
replacements={"data_root": data_folder},
)
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("signal")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("speech")
@sb.utils.data_pipeline.provides("target")
def vad_targets(speech, hparams=hparams):
boundaries = (
[
(
int(interval[0] / hparams["time_resolution"]),
int(interval[1] / hparams["time_resolution"]),
)
for interval in speech
]
if len(speech) > 0
else []
)
gt = torch.zeros(
int(
np.ceil(
hparams["example_length"] * (1 / hparams["time_resolution"])
)
)
)
for indxs in boundaries:
start, stop = indxs
gt[start:stop] = 1
return gt
# Create dataset
datasets = [train, validation, test]
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
sb.dataio.dataset.add_dynamic_item(datasets, vad_targets)
sb.dataio.dataset.set_output_keys(
datasets, ["id", "signal", "target", "speech"]
)
# Split dataset
train_data, valid_data, test_data = datasets
return train_data, valid_data, test_data
# Begin Recipe!
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# Load hyperparameters file with command-line overrides
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
from libriparty_prepare import prepare_libriparty
# LibriParty preparation
run_on_main(
prepare_libriparty,
kwargs={
"data_folder": hparams["data_folder"],
"save_json_folder": hparams["save_folder"],
"sample_rate": hparams["sample_rate"],
"window_size": hparams["example_length"],
"skip_prep": hparams["skip_prep"],
},
)
# Prepare Musan
from musan_prepare import prepare_musan
run_on_main(
prepare_musan,
kwargs={
"folder": hparams["musan_folder"],
"music_csv": hparams["music_csv"],
"noise_csv": hparams["noise_csv"],
"speech_csv": hparams["speech_csv"],
"max_noise_len": hparams["example_length"],
},
)
# Prepare common
from commonlanguage_prepare import prepare_commonlanguage
run_on_main(
prepare_commonlanguage,
kwargs={
"folder": hparams["commonlanguage_folder"],
"csv_file": hparams["multilang_speech_csv"],
},
)
# Dataset IO prep: creating Dataset objects
train_data, valid_data, test_data = dataio_prep(hparams)
# Trainer initialization
vad_brain = VADBrain(
modules=hparams["modules"],
opt_class=hparams["opt_class"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# Training/validation loop
vad_brain.fit(
vad_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Test
vad_brain.evaluate(
test_data,
min_key="loss",
test_loader_kwargs=hparams["test_dataloader_opts"],
)
|
{"hexsha": "ea16333d1a91da85fde178bd384f3fadd47c7c3b", "size": 9125, "ext": "py", "lang": "Python", "max_stars_repo_path": "recipes/LibriParty/VAD/train.py", "max_stars_repo_name": "JasonSWFu/speechbrain", "max_stars_repo_head_hexsha": "cb78ba2b33fceba273b055dc471535344c3053f0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3913, "max_stars_repo_stars_event_min_datetime": "2021-03-14T13:54:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T05:09:55.000Z", "max_issues_repo_path": "recipes/LibriParty/VAD/train.py", "max_issues_repo_name": "JasonSWFu/speechbrain", "max_issues_repo_head_hexsha": "cb78ba2b33fceba273b055dc471535344c3053f0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 667, "max_issues_repo_issues_event_min_datetime": "2021-03-14T20:11:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T04:07:17.000Z", "max_forks_repo_path": "recipes/LibriParty/VAD/train.py", "max_forks_repo_name": "JasonSWFu/speechbrain", "max_forks_repo_head_hexsha": "cb78ba2b33fceba273b055dc471535344c3053f0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 785, "max_forks_repo_forks_event_min_datetime": "2021-03-14T13:20:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T03:26:03.000Z", "avg_line_length": 31.6840277778, "max_line_length": 96, "alphanum_fraction": 0.6229041096, "include": true, "reason": "import numpy", "num_tokens": 2015}
|
from tensorflow.python.keras.layers import Input, AveragePooling2D, Dense, Conv2D, Flatten
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.datasets import cifar10
from tensorflow.python.keras.utils.np_utils import to_categorical
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.regularizers import l2
from tensorflow.python.keras.callbacks import TensorBoard
import numpy as np
import os
from time import time
from CustomKerasLayers import DenseBlock2D
def evaluate_on_cifar10():
total_depth = 100
n_blocks = 3
depth = (total_depth - 4) // n_blocks
growth_rate = 12
filters = growth_rate * 2
# region Model
input_layer = Input(shape=[32, 32, 3])
layer = input_layer
layer = Conv2D(filters=filters, kernel_size=3, strides=1, padding="same")(layer)
for k in range(n_blocks):
layer = DenseBlock2D(kernel_size=3, growth_rate=growth_rate, depth=depth,
use_batch_normalization=True)(layer)
if k < (n_blocks - 1):
filters += growth_rate * depth // 4
layer = transition_block(layer, filters)
else:
layer = AveragePooling2D(pool_size=8)(layer)
layer = Flatten()(layer)
layer = Dense(units=10, activation="softmax")(layer)
model = Model(inputs=input_layer, outputs=layer)
model.summary()
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["acc"])
# endregion
# region Data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.astype(np.float32) / 255.0
x_test = x_test.astype(np.float32) / 255.0
y_train = to_categorical(y_train, num_classes=10)
y_test = to_categorical(y_test, num_classes=10)
generator = ImageDataGenerator(rotation_range=15,
width_shift_range=5. / 32,
height_shift_range=5. / 32,
horizontal_flip=True)
generator.fit(x_train, seed=0)
# endregion
log_dir = "../tests/dense_block_cifar10/{}".format(int(time()))
log_dir = os.path.normpath(log_dir)
tensorboard = TensorBoard(log_dir=log_dir, profile_batch=0)
model.fit_generator(generator.flow(x_train, y_train, batch_size=64),
steps_per_epoch=100, epochs=300, validation_data=(x_test, y_test),
validation_steps=100, verbose=1, callbacks=[tensorboard])
def transition_block(layer, filters):
layer = Conv2D(filters=filters, kernel_size=1, kernel_initializer="he_normal", use_bias=False,
kernel_regularizer=l2(1e-4))(layer)
layer = AveragePooling2D(pool_size=2, strides=2)(layer)
return layer
if __name__ == "__main__":
evaluate_on_cifar10()
|
{"hexsha": "4b3a2a842307aa6a7cda94cbde5a977e948fb70f", "size": 2837, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/CustomKerasLayers/tests/DenseBlockCifar10.py", "max_stars_repo_name": "Zelgunn/Video-Latent-Lerp", "max_stars_repo_head_hexsha": "c479a26c0be5174543268667bde09f1154d2ff79", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-04-28T20:31:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-18T14:16:45.000Z", "max_issues_repo_path": "code/CustomKerasLayers/tests/DenseBlockCifar10.py", "max_issues_repo_name": "Zelgunn/Video-Latent-Lerp", "max_issues_repo_head_hexsha": "c479a26c0be5174543268667bde09f1154d2ff79", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-05T06:55:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-09T12:42:01.000Z", "max_forks_repo_path": "code/CustomKerasLayers/tests/DenseBlockCifar10.py", "max_forks_repo_name": "Zelgunn/Video-Latent-Lerp", "max_forks_repo_head_hexsha": "c479a26c0be5174543268667bde09f1154d2ff79", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3717948718, "max_line_length": 98, "alphanum_fraction": 0.6771237222, "include": true, "reason": "import numpy", "num_tokens": 673}
|
import numpy as np
from baseline.remote import RemoteModelREST, RemoteModelGRPC, register_remote
def _convert(data):
if isinstance(data, np.ndarray):
return data
return np.array(data)
@register_remote('http')
class RemoteModelRESTPytorch(RemoteModelREST):
"""JSON schema:
{
"signature_name": "name",
"inputs": {
"data": [
[...],
[...],
...
],
"shapes": [
[...],
[...],
...
]
"lengths": [...]
}
}
data should be flattened (np.ravel)
shapes should be the call needed to reshape the tensor (should be same size as data)
"""
def predict(self, examples, **kwargs):
"""The pytorch server can only handle batch size of 1 because the JIT'd
`pack_padded_sequence jits that batch size. So we send a request per
example.
"""
results = []
example_input = examples[self.input_keys[0]]
batch_size = len(example_input)
for i in range(batch_size):
example = {k: np.array([v[i]]) for k, v in examples.items()}
example_output = super(RemoteModelRESTPytorch, self).predict(example, **kwargs)
results.append(example_output[0])
return results
def create_request(self, examples):
request = {}
request['signature_name'] = self.signature
request['inputs'] = {}
request['inputs']['data'] = [_convert(examples[x]).ravel().tolist() for x in self.input_keys]
request['inputs']['shapes'] = [list(examples[x].shape) for x in self.input_keys]
request['inputs']['lengths'] = examples[self.lengths_key].tolist()
return request
@register_remote('grpc')
class RemoteModelGRPCPytorch(RemoteModelGRPC):
def __init__(self, *args, **kwargs):
raise NotImplementedError('Pytorch GRPC service is not implemented.')
@register_remote('grpc-preproc')
class RemoteModelGRPCPytorchPreproc(RemoteModelGRPCPytorch):
def __init__(self, *args, **kwargs):
raise NotImplementedError('Pytorch does not support string tensors so Server side preproc is not supported.')
@register_remote('http-preproc')
class RemoteModelHTTPPytorchPreproc(RemoteModelRESTPytorch):
def __init__(self, *args, **kwargs):
raise NotImplementedError('Pytorch does not support string tensors so Server side preproc is not supported.')
|
{"hexsha": "0230c7ab3a1a2751a5314bd941b127e18760c92f", "size": 2572, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/baseline/pytorch/remote.py", "max_stars_repo_name": "domyounglee/baseline", "max_stars_repo_head_hexsha": "2261abfb7e770cc6f3d63a7f6e0015238d0e11f8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/baseline/pytorch/remote.py", "max_issues_repo_name": "domyounglee/baseline", "max_issues_repo_head_hexsha": "2261abfb7e770cc6f3d63a7f6e0015238d0e11f8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/baseline/pytorch/remote.py", "max_forks_repo_name": "domyounglee/baseline", "max_forks_repo_head_hexsha": "2261abfb7e770cc6f3d63a7f6e0015238d0e11f8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-05-27T04:52:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T00:22:53.000Z", "avg_line_length": 32.5569620253, "max_line_length": 117, "alphanum_fraction": 0.600311042, "include": true, "reason": "import numpy", "num_tokens": 549}
|
[STATEMENT]
lemma PO_m1_step5_refines_ir_a0i_running:
"{R_a0im1_ir}
(a0i_running [A, B] (Kab, Nb)), (m1_step5 Rb A B Nb Kab)
{> R_a0im1_ir}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {R_a0im1_ir} a0i_running [A, B] (Kab, Nb), m1_step5 Rb A B Nb Kab {> R_a0im1_ir}
[PROOF STEP]
by (simp add: PO_rhoare_defs R_a0im1_ir_defs a0i_defs m1_defs, safe, auto)
|
{"llama_tokens": 208, "file": "Security_Protocol_Refinement_Key_establish_m1_nssk", "length": 1}
|
[STATEMENT]
lemma rem_cycles_subs: "set (rem_cycles i j xs) \<subseteq> set xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. set (rem_cycles i j xs) \<subseteq> set xs
[PROOF STEP]
by (meson order_trans remove_all_cycles_subs remove_all_subs remove_all_rev_subs)
|
{"llama_tokens": 110, "file": "Floyd_Warshall_Floyd_Warshall", "length": 1}
|
import pickle
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from mc_lib.observable import RealObservable, block_stats
def test_simple():
r = RealObservable()
lst = list(range(4096))
for val in lst:
r.add_measurement(val)
assert_allclose(r.mean,
np.sum(lst)/len(lst), atol=1e-14)
def test_gaussian_noise():
rndm = np.random.RandomState(1234)
arr = rndm.normal(loc=1., scale=2, size=1000000)
r = RealObservable()
for j in range(arr.size):
r.add_measurement(arr[j])
assert_allclose(r.mean, 1.0, rtol=1e-3)
assert_allclose(r.errorbar, 2./np.sqrt(arr.size), rtol=3e-2)
def test_block_stats():
rndm = np.random.RandomState(1234)
arr = rndm.normal(loc=1., scale=2, size=100)
r = RealObservable()
for j in range(arr.size):
r.add_measurement(arr[j])
expected = np.array([(1.07022457, 0.19913697, 100),
(1.07022457, 0.17666925, 50),
(1.07022457, 0.14712066, 25),
(1.10226201, 0.15669437, 12),
(1.10226201, 0.06177727, 6)],
dtype=[('mean', '<f8'), ('errorbar', '<f8'), ('num_blocks', '<i8')])
stats = block_stats(r)
# this test might be brittle: it depends on the exact random stream,
# also on the internal max block size handling of RealObservable
assert_allclose(stats["mean"], expected["mean"], atol=1e-14)
assert_allclose(stats["errorbar"], expected["errorbar"], atol=1e-14)
assert_equal(stats["num_blocks"], expected["num_blocks"])
def test_pickling():
r = RealObservable()
for j in range(20):
r.add_measurement(j)
pickled = pickle.dumps(r)
unpickled = pickle.loads(pickled)
assert_allclose(r.mean, unpickled.mean, atol=1e-14)
for j in range(20):
r.add_measurement(j+20)
unpickled.add_measurement(j+20)
assert_allclose(r.mean, unpickled.mean, atol=1e-14)
|
{"hexsha": "d9db95e946d77eb85b3a2b667444a442d44c8533", "size": 2027, "ext": "py", "lang": "Python", "max_stars_repo_path": "mc_lib/tests/test_observable.py", "max_stars_repo_name": "MoskalenkoRomanBorisovich/mc_lib", "max_stars_repo_head_hexsha": "024e82cdeb214a76d8b2157de1de6537cd0277ab", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-02-13T08:36:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-19T22:01:55.000Z", "max_issues_repo_path": "mc_lib/tests/test_observable.py", "max_issues_repo_name": "MoskalenkoRomanBorisovich/mc_lib", "max_issues_repo_head_hexsha": "024e82cdeb214a76d8b2157de1de6537cd0277ab", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 51, "max_issues_repo_issues_event_min_datetime": "2020-12-09T22:28:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T15:54:22.000Z", "max_forks_repo_path": "mc_lib/tests/test_observable.py", "max_forks_repo_name": "MoskalenkoRomanBorisovich/mc_lib", "max_forks_repo_head_hexsha": "024e82cdeb214a76d8b2157de1de6537cd0277ab", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-01-25T21:01:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-25T22:44:08.000Z", "avg_line_length": 29.3768115942, "max_line_length": 80, "alphanum_fraction": 0.6132215096, "include": true, "reason": "import numpy,from numpy", "num_tokens": 569}
|
#Daniel Sand
import numpy as np
from sklearn import metrics
from sklearn.metrics import classification_report, roc_curve, precision_recall_curve, roc_auc_score, auc, make_scorer, recall_score, accuracy_score, precision_score, confusion_matrix
import pandas as pd
def ROC_LOO_binary(ylabels, scores):
from sklearn.model_selection import LeaveOneOut
from sklearn.metrics import confusion_matrix
pos_label_array=[0,1]
for i in range(0,2):
pos_label = pos_label_array[i]
#intliazion
results = type('', (), {})()
results.train_roc_auc=np.array([])
results.train_precision = np.array([])
results.train_recall= np.array([])
results.train_accuracy= np.array([])
results.train_specificity= np.array([])
results.train_senstivity= np.array([])
results.test_TrueLabel=np.array([])
results.test_Pred=np.array([])
index=-1
loo = LeaveOneOut()
loo.get_n_splits(scores)
# loop on LOO
for train_index, test_index in loo.split(scores):
index=index+1
X_train, X_test = scores[train_index], scores[test_index]
y_train, y_test = ylabels[train_index], ylabels[test_index]
fpr, tpr, thresholds = metrics.roc_curve(y_train, X_train,pos_label=pos_label)
results.train_roc_auc= np.append(results.train_roc_auc, auc(fpr, tpr))
optimal_idx = np.argmax(tpr + (1-fpr))
optimal_threshold = thresholds[optimal_idx]
indxABoveTH=X_train>=optimal_threshold
trainPred=np.empty(indxABoveTH.shape)
trainPred[:]=np.nan
trainPred[indxABoveTH==True]=pos_label
trainPred[indxABoveTH==False]=1 - pos_label
train_tn, train_fp, train_fn, train_tp = confusion_matrix(y_train, trainPred).ravel()
results.train_senstivity=np.append(results.train_senstivity,tpr[optimal_idx])
results.train_specificity=np.append(results.train_specificity,1-fpr[optimal_idx])
results.train_precision = np.append(results.train_precision,train_tp / (train_tp + train_fp))
results.train_recall = np.append(results.train_recall,(train_tp / (train_tp + train_fn)))
results.train_accuracy = np.append(results.train_accuracy,(train_tp + train_tn) / (train_tp + train_tn + train_fp + train_fn))
test_PosOrNeg = pos_label if X_test >= optimal_threshold else (1 - pos_label)
results.test_TrueLabel =np.append(results.test_TrueLabel,y_test)
results.test_Pred=np.append(results.test_Pred,test_PosOrNeg)
#Test Mesurment caclulation
test_tn, test_fp, test_fn, test_tp = confusion_matrix(results.test_TrueLabel, results.test_Pred).ravel()
results.test_precision = test_tp / (test_tp + test_fp)
results.test_recall = test_tp / (test_tp + test_fn)
results.test_accuracy=(test_tp+test_tn)/(test_tp+test_tn+test_fp+test_fn)
print("AUC mean:"+str(results.train_roc_auc.mean()))
if sum(results.train_roc_auc >= 0.5) >= results.train_roc_auc.size/2:
print('noNeed to reRun the function with different pos_label becouse most of the itearation was in the right classfication')
break # this break is if the direction of the postive label was ok ( auc >0.5)
else:
print(' Warning: There are more iteartions with train auc under 0.5, the function will replace change pos_label from 0 to 1 ')
print('num of oppsite direction (auc less then 0.5):'+str(sum(results.train_roc_auc < 0.5)))
return results
def ROC_CV_binary(ylabels, scores,ylabels_cleanVal, scores_cleanVal):
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import StratifiedKFold
cleanVal_flag=1
ylabels[ylabels==-1]=0
if cleanVal_flag:
ylabels_cleanVal[ylabels_cleanVal == -1] = 0
pos_label_array=[0,1]
for i in range(0,2):
pos_label = pos_label_array[i]
#intliazion
results = type('', (), {})()
results.optimal_threshold=np.array([])
results.train_roc_auc=np.array([])
results.train_precision = np.array([])
results.train_recall= np.array([])
results.train_accuracy= np.array([])
results.train_specificity= np.array([])
results.train_senstivity= np.array([])
results.test_precision=np.array([])
results.test_recall=np.array([])
results.test_accuracy=np.array([])
results.test_auc=np.array([])
index=-1
skf = StratifiedKFold(n_splits=5,random_state=None)
skf.get_n_splits(scores, ylabels)
for train_index, test_index in skf.split(scores, ylabels):
index=index+1
X_train, X_test = scores[train_index], scores[test_index]
y_train, y_test = ylabels[train_index], ylabels[test_index]
fpr, tpr, thresholds = metrics.roc_curve(y_train, X_train,pos_label=pos_label)
results.train_roc_auc= np.append(results.train_roc_auc, auc(fpr, tpr))#adding auc for this train iteration
optimal_idx = np.argmax(tpr + (1-fpr))
optimal_threshold = thresholds[optimal_idx]
results.optimal_threshold=np.append(results.optimal_threshold,optimal_threshold)
#Train mesurment calculation
indxABoveTH=X_train>=optimal_threshold
trainPred=np.empty(indxABoveTH.shape)
trainPred[:]=np.nan
trainPred[indxABoveTH==True]=pos_label
trainPred[indxABoveTH==False]=1 - pos_label
train_tn, train_fp, train_fn, train_tp = confusion_matrix(y_train, trainPred).ravel()
results.train_senstivity=np.append(results.train_senstivity,tpr[optimal_idx])
results.train_specificity=np.append(results.train_specificity,1-fpr[optimal_idx])
results.train_precision = np.append(results.train_precision,train_tp / (train_tp + train_fp))
results.train_recall = np.append(results.train_recall,(train_tp / (train_tp + train_fn)))
results.train_accuracy = np.append(results.train_accuracy,(train_tp + train_tn) / (train_tp + train_tn + train_fp + train_fn))
#Test- collect the value of the test refer to the train TH
test_PosOrNeg = np.zeros((X_test.size, 1))
test_PosOrNeg[:] = np.nan
test_PosOrNeg_temp = np.array(X_test >= optimal_threshold )
test_PosOrNeg[test_PosOrNeg_temp==True]=pos_label
test_PosOrNeg[test_PosOrNeg_temp==False]=1 - pos_label
#Test Mesurment caclulation
results.test_precision=np.append(results.test_precision,precision_score(y_test,test_PosOrNeg))
results.test_recall=np.append(results.test_recall,recall_score(y_test,test_PosOrNeg))
results.test_accuracy=np.append(results.test_accuracy,accuracy_score(y_test,test_PosOrNeg))
results.test_auc=np.append(results.test_auc,roc_auc_score(y_test,test_PosOrNeg))
print("AUC mean:"+str(results.train_roc_auc.mean()))
if sum(results.train_roc_auc >= 0.5) >= results.train_roc_auc.size/2:
print('noNeed to reRun the function with different pos_label becouse most of the itearation was in the right classfication')
break #
else:
print(' Warning: There are more iteartions with train auc under 0.5, the function will replace change pos_label from 0 to 1 ')
print('num of oppsite direction (auc less then 0.5):'+str(sum(results.train_roc_auc < 0.5)))
'''clean Valdtion Mesurment caclulation'''
if cleanVal_flag:
# inlitize
cleanVal_PosOrNeg = np.zeros((scores_cleanVal.size, 1))
cleanVal_PosOrNeg[:] = np.nan
# finding mean TH
cleanVal_PosOrNeg_temp = np.array(
scores_cleanVal >= results.optimal_threshold.mean()) #
cleanVal_PosOrNeg[cleanVal_PosOrNeg_temp == True] = pos_label
cleanVal_PosOrNeg[cleanVal_PosOrNeg_temp == False] = 1 - pos_label
# calculting measrments
results.cleanVal_precision = precision_score(ylabels_cleanVal, cleanVal_PosOrNeg)
results.cleanVal_recall = recall_score(ylabels_cleanVal, cleanVal_PosOrNeg)
results.cleanVal_accuracy = accuracy_score(ylabels_cleanVal, cleanVal_PosOrNeg)
return results
|
{"hexsha": "8cc7d11ae68ce6b597f7fdd46f761e437c06f36a", "size": 8445, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/Functions_base/Functions/AUC_func.py", "max_stars_repo_name": "DanielHuji-RB/RB-article", "max_stars_repo_head_hexsha": "e5a9ba30edfb030db1cd3bcf562c6abff3f9d48e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python/Functions_base/Functions/AUC_func.py", "max_issues_repo_name": "DanielHuji-RB/RB-article", "max_issues_repo_head_hexsha": "e5a9ba30edfb030db1cd3bcf562c6abff3f9d48e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/Functions_base/Functions/AUC_func.py", "max_forks_repo_name": "DanielHuji-RB/RB-article", "max_forks_repo_head_hexsha": "e5a9ba30edfb030db1cd3bcf562c6abff3f9d48e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9951456311, "max_line_length": 182, "alphanum_fraction": 0.6764949674, "include": true, "reason": "import numpy", "num_tokens": 1973}
|
# -*- coding: utf-8 -*-
import numpy as np, arviz as az, matplotlib.pyplot as plt
from cmdstanpy import CmdStanModel
rng = np.random.default_rng(seed = 123) # newly introduced type of random generator
pA, N = .05, 1500
occurrences = rng.binomial(N, pA)
mdl_data = {"N": N, "occur": occurrences}
modelfile = "ABtesting.stan"
with open(modelfile, "w") as file: file.write("""
data {
int<lower=0> N;
int<lower=0, upper=N> occur;
}
parameters { // discrete parameters impossible
real<lower=0, upper=1> probA;
}
model {
occur ~ binomial(N, probA);
}
""")
sm = CmdStanModel(stan_file = modelfile)
# maximum likelihood estimation
optim = sm.optimize(data = mdl_data).optimized_params_pd
optim[optim.columns[~optim.columns.str.startswith("lp")]]
# variational inference
vb = sm.variational(data = mdl_data)
vb.variational_sample.columns = vb.variational_params_dict.keys()
vb_name = vb.variational_params_pd.columns[~vb.variational_params_pd.columns.str.startswith(("lp", "log_"))]
vb.variational_params_pd[vb_name]
vb.variational_sample[vb_name]
# Markov chain Monte Carlo
fit = sm.sample(
data = mdl_data, show_progress = True, chains = 4,
iter_sampling = 50000, iter_warmup = 10000, thin = 5
)
fit.draws().shape # iterations, chains, parameters
fit.summary().loc[vb_name] # pandas DataFrame
print(fit.diagnose())
posterior = fit.stan_variables()
az_trace = az.from_cmdstanpy(fit)
az.summary(az_trace).loc[vb_name] # pandas DataFrame
az.plot_trace(az_trace)
|
{"hexsha": "389d4186082a8d4f993aa725a8f6130a42571165", "size": 1534, "ext": "py", "lang": "Python", "max_stars_repo_path": "STANchap2ex2.py", "max_stars_repo_name": "phineas-pta/Bayesian-Methods-for-Hackers-using-PyStan", "max_stars_repo_head_hexsha": "d708faab0fdd43800e8726e2c6dd99452c8dcedb", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-18T08:01:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-18T08:01:32.000Z", "max_issues_repo_path": "STANchap2ex2.py", "max_issues_repo_name": "phineas-pta/Bayesian-Methods-for-Hackers-using-PyStan", "max_issues_repo_head_hexsha": "d708faab0fdd43800e8726e2c6dd99452c8dcedb", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "STANchap2ex2.py", "max_forks_repo_name": "phineas-pta/Bayesian-Methods-for-Hackers-using-PyStan", "max_forks_repo_head_hexsha": "d708faab0fdd43800e8726e2c6dd99452c8dcedb", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8909090909, "max_line_length": 109, "alphanum_fraction": 0.7105606258, "include": true, "reason": "import numpy", "num_tokens": 427}
|
# David R Thompson
import numpy as np
# OSF seam channels
osf_seam_positions = ((187,189),)
# Number of basis vectors used to describe EMIT nonlinearity
linearity_nbasis = 2
# The columns on either side of the FPA are masked.
last_masked_col_left, first_masked_col_right = 9, 1272
# The first and last represent the extrema of the rows
# containing good data (inclusive, zero-indexed)
first_valid_row, last_valid_row = 6, 333
# Subframes have 328 rows
valid_rows = last_valid_row - first_valid_row + 1
# Rows that are not masked, columns not significantly
# Vignetted
first_illuminated_row, last_illuminated_row = 20, 320
first_illuminated_column, last_illuminated_column = 24, 1265
# The range of elements that is distributed
first_distributed_column, last_distributed_column = 24, 1265
first_distributed_row, last_distributed_row = 26, 313
# EMIT FPA size
native_rows, native_columns = 480, 1280
# Define masked rows and columns
masked_rows = np.concatenate((np.arange(first_valid_row, first_illuminated_row, dtype=int),
np.arange(last_illuminated_row+1, last_valid_row+1, dtype=int)),
axis=0)
masked_cols = np.concatenate((np.arange(0, last_masked_col_left+1, dtype=int),
np.arange(first_masked_col_right, native_columns, dtype=int)),
axis=0)
# These columns used for stray light checks
vignetted_cols = np.concatenate((np.arange(last_masked_col_left+1, first_illuminated_column, dtype=int),
np.arange(last_illuminated_column+1, first_masked_col_right, dtype=int)),axis=0)
# EMIT frames can be in native format or in subframe (328 row) format.
# This function extracts a subframe from a native format frame
def frame_extract(frame, clip_columns = False):
if frame.shape[1] != native_columns:
raise IndexError('All frames should have '+str(native_columns)+' columns')
if frame.shape[0] != native_rows:
raise IndexError('Native frames should have '+str(native_rows)+' rows')
frame = frame[first_valid_row:(last_valid_row+1),:]
if clip_columns:
frame = frame[:,first_illuminated_column:(last_illuminated_column+1)]
return frame
# EMIT frames can be in native format or in subframe (328 row) format.
# This function makes sure that all frames have native format by
# embedding subframes inside some padding.
def frame_embed(frame):
if frame.shape[1] != native_columns:
raise IndexError('All frames should have '+str(native_columns)+' columns')
if frame.shape[0] == native_rows:
return frame
if frame.shape[0] != valid_rows:
raise IndexError('Invalid number of rows')
embedded = np.zeros((native_rows, native_columns))
embedded[first_valid_row, last_valid_row+1] = frame
return embedded
|
{"hexsha": "b52ee0751f6e317aea029594162bb46b7271a66a", "size": 2724, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/emit_fpa.py", "max_stars_repo_name": "emit-sds/emit-sds-l1b", "max_stars_repo_head_hexsha": "be5307fe6821a043971becdd33609b4cf89b1974", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/emit_fpa.py", "max_issues_repo_name": "emit-sds/emit-sds-l1b", "max_issues_repo_head_hexsha": "be5307fe6821a043971becdd33609b4cf89b1974", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/emit_fpa.py", "max_forks_repo_name": "emit-sds/emit-sds-l1b", "max_forks_repo_head_hexsha": "be5307fe6821a043971becdd33609b4cf89b1974", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3661971831, "max_line_length": 104, "alphanum_fraction": 0.753671072, "include": true, "reason": "import numpy", "num_tokens": 701}
|
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <boost/asio.hpp>
#include <chrono>
using boost::asio::ip::tcp;
int numChannelsToTest = 1;
int sendCommand(const char* ip, const char* port, const char* command, size_t command_length)
{
try
{
boost::asio::io_service io_service;
tcp::socket s(io_service);
tcp::resolver resolver(io_service);
boost::asio::connect(s, resolver.resolve({ip, port}));
boost::asio::write(s, boost::asio::buffer(command, command_length));
}
catch (std::exception& e)
{
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}
int main()
{
const char* ipTable[] = {"192.168.0.2", "192.168.0.3", "192.168.0.4", "192.168.0.5", "192.168.0.6"};
std::cout << "NETS UI V1.0\n";
std::cout << "Press Enter to begin test...";
std::cin.get();
sendCommand("127.0.0.1", "13", "timestamp", 10); // send message to tcp server to hack timestamp
// cycle through ip addresses and initiate test
for (int i = 0; i < numChannelsToTest; i++)
{
const char* ip = ipTable[i];
char port[] = "13";
char command[] = {'t'};
size_t command_length = std::strlen(command);
sendCommand(ip, port, command, command_length);
}
return 0;
}
|
{"hexsha": "dec3609aa2d17aadd4145e1816fc0546c392386f", "size": 1223, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "ui.cpp", "max_stars_repo_name": "mikeepiazza/NETS", "max_stars_repo_head_hexsha": "107ddfb11747605380c3cb1caaafc8fc4ba476b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2018-10-19T09:16:29.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-19T09:16:32.000Z", "max_issues_repo_path": "ui.cpp", "max_issues_repo_name": "mikeepiazza/NETS", "max_issues_repo_head_hexsha": "107ddfb11747605380c3cb1caaafc8fc4ba476b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ui.cpp", "max_forks_repo_name": "mikeepiazza/NETS", "max_forks_repo_head_hexsha": "107ddfb11747605380c3cb1caaafc8fc4ba476b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.9803921569, "max_line_length": 101, "alphanum_fraction": 0.6353229763, "num_tokens": 368}
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
# @author : biao chen
# @Email : chenbiao@sleepace.net
# @Project : Python_Files
# @File : utils.py
# @Software: PyCharm
# @Time : 2021/5/20 下午7:42
"""
import os
import struct
import sys
import time
import traceback
from datetime import datetime
from pathlib import Path
import matplotlib as mpl
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
pd.set_option("display.max_columns", None)
# 相应的我们可以设置显示的最大行数
pd.set_option("display.max_rows", None)
# function: byte2int
def byte2int(data, mode="u16"):
dbyte = bytearray(data)
darray = []
i = 0
while i < len(dbyte):
if "u8" == mode:
darray.append(dbyte[i])
i = i + 1
elif "u16" == mode:
darray.append(dbyte[i] | dbyte[i + 1] << 8)
i = i + 2
return darray
# end: byte2int
# function: byte2float
def byte2float(data, mode="float"):
darray = []
i = 0
if "float" == mode:
while i < len(data):
fx = struct.unpack("f", data[i : i + 4])
darray.append(fx)
i = i + 4
elif "double" == mode:
while i < len(data):
dx = struct.unpack("d", data[i : i + 8])
darray.append(dx)
i = i + 8
return darray
# end: byte2float
def read_bytefile(path, folder, file, mode="u8"):
fname = path + folder + file
f = open(fname, "rb")
dtmp = f.read()
global rslt
if "u8" == mode:
rslt = byte2int(dtmp, mode="u8")
if "u16" == mode:
rslt = byte2int(dtmp, mode="u16")
if "float" == mode:
rslt = byte2float(dtmp, mode="float")
if "double" == mode:
rslt = byte2float(dtmp, mode="double")
return rslt
# 向sheet中写入一行数据
def insertOne(value, sheet):
sheet.append(value)
def read_raw(src_dir, fname):
bcg, gain = [], []
fname = src_dir + fname
f = open(fname, "rb")
dtmp = f.read()
dbyte = bytearray(dtmp)
i = 0
while i < len(dbyte):
bcg.append(dbyte[i] | dbyte[i + 1] << 8)
gain.append(dbyte[i + 2])
i = i + 3
return bcg, gain
def read_wgt(src_dir, fname):
wgt = []
fname = src_dir + fname
f = open(fname, "rb")
dtmp = f.read()
dbyte = bytearray(dtmp)
i = 0
while i < len(dbyte):
wgt.append(dbyte[i + 1] | dbyte[i] << 8)
i = i + 2
return wgt
def time2stamp(cmnttime): # 转时间戳函数
# 转为时间数组
timeArray = time.strptime(cmnttime, "%Y-%m-%d %H:%M:%S")
# 转为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def stamp2time(timeStamp):
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
return otherStyleTime
def day2stamp(cmnttime): # 转时间戳函数
# 转为时间数组
timeArray = time.strptime(cmnttime, "%Y-%m-%d")
# 转为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def stamp2day(timeStamp):
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d", timeArray)
return otherStyleTime
def hour2stamp(cmnttime): # 转时间戳函数
# 转为时间数组
timeArray = time.strptime(cmnttime, "%Y-%m-%d %H:%M")
# 转为时间戳
timeStamp = int(time.mktime(timeArray))
return timeStamp
def stamp2hour(timeStamp):
timeArray = time.localtime(timeStamp)
otherStyleTime = time.strftime("%Y-%m-%d %H:%M", timeArray)
return otherStyleTime
def time2datetime(tranTime, pList):
tdelta, startstamp = 60, int(time2stamp(tranTime))
t = [datetime.fromtimestamp(startstamp + t * tdelta) for t in range(len(pList))]
return t
def time_formattime(pList):
famTime = [datetime.fromisoformat(t) for t in pList]
return famTime
def quest_time_extract(num_spl, quest_outbed, slp_awTim):
num_slp0 = num_spl[0]
num_slp2 = num_spl[:2]
aslp_day = stamp2day(day2stamp(slp_awTim) - 86400)
awak_day = slp_awTim
if len(num_spl) == 6:
outbed_stamp = "0" + num_spl[0] + ":" + num_spl[1:3] + ":00"
if int(num_slp0) >= 19 and int(num_slp0) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp0) >= 0 and int(num_slp0) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 4:
outbed_stamp = num_spl[:2] + ":" + num_spl[2:] + ":00"
if int(num_slp2) >= 19 and int(num_slp2) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp2) >= 0 and int(num_slp2) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 3:
outbed_stamp = "0" + num_spl[0] + ":" + num_spl[1:] + ":00"
if int(num_slp0) >= 19 and int(num_slp0) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp0) >= 0 and int(num_slp0) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 2:
outbed_stamp = "0" + num_spl[0] + ":" + "00" + ":00"
if int(num_slp0) >= 19 and int(num_slp0) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_slp0) >= 0 and int(num_slp0) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif len(num_spl) == 1:
outbed_stamp = "0" + num_spl + ":" + "00" + ":00"
if int(num_spl) >= 19 and int(num_spl) <= 23:
outbed_stamp = aslp_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
elif int(num_spl) >= 0 and int(num_spl) <= 8:
outbed_stamp = awak_day + " " + outbed_stamp
quest_outbed.append(outbed_stamp)
def diff_acl(slpList, psgList):
fslp_diff = int(abs(time2stamp(str(psgList)) - time2stamp(str(slpList))) / 60)
return fslp_diff
def num_pop(num1: list, num2: list):
if len(num1) > len(num2):
lenDiff = len(num1) - len(num2)
for i in range(lenDiff):
num1.pop()
elif len(num2) > len(num1):
lenDiff = len(num2) - len(num1)
for i in range(lenDiff):
num2.pop()
def num3_pop(num1: list, num2: list, num3: list):
num2 = [str(i) for i in range(len(num2))]
num3 = [str(i) for i in range(len(num3))]
maxLen = max(len(num1), len(num2), len(num3))
minLen = min(len(num1), len(num2), len(num3))
plen = maxLen - minLen
new_num1, new_num2, new_num3 = 0, 0, 0
for i in range(maxLen):
if len(num1) == maxLen:
new_num1 = num1[:-plen]
elif len(num2) == maxLen:
new_num2 = num2[:-plen]
elif len(num3) == maxLen:
new_num3 = num3[:-plen]
return new_num1, new_num2, new_num3
def len_compare(pr_list: list, rr_list: list):
if len(pr_list) > len(rr_list):
return len(rr_list)
elif len(pr_list) < len(rr_list):
return len(pr_list)
def path_concat(sub_dir, pathName):
_path = str(sub_dir.joinpath(pathName)) + "/"
return _path
def is_empty_file_3(file_path: str):
assert isinstance(file_path, str), f"file_path参数类型不是字符串类型: {type(file_path)}"
p = Path(file_path)
assert p.is_file(), f"file_path不是一个文件: {file_path}"
return p.stat().st_size == 0
def dir_empty(dir_path):
try:
next(os.scandir(dir_path))
return False
except StopIteration:
return True
def select_num(df1, df2):
# num_requried = 0
hr_lower_limit = df1["hr"].map(lambda x: x != 0)
hr_upper_limit = df1["hr"].map(lambda x: x != 255)
br_lower_limit = df1["br"].map(lambda x: x != 0)
br_upper_limit = df1["br"].map(lambda x: x != 255)
pr_lower_limit = df2["pr"].map(lambda x: x != 0)
pr_upper_limit = df2["pr"].map(lambda x: x != 255)
rr_lower_limit = df2["rr"].map(lambda x: x != 0)
rr_upper_limit = df2["rr"].map(lambda x: x != 255)
df1 = df1[
(hr_lower_limit & hr_upper_limit & br_lower_limit & br_upper_limit)
& (pr_lower_limit & pr_upper_limit & rr_lower_limit & rr_upper_limit)
]
df2 = df2[
(hr_lower_limit & hr_upper_limit & br_lower_limit & br_upper_limit)
& (pr_lower_limit & pr_upper_limit & rr_lower_limit & rr_upper_limit)
]
df1 = df1.reset_index(drop=True) # 重新给索引
df2 = df2.reset_index(drop=True) # 重新给索引
return df1, df2
def minute_mean(df, cname, stime):
# 计算每分钟SLP的心率、呼吸率
hr_min_list = []
slp_time_min_list = []
df_min = int(len(df[cname]) / 60) # 数据共多少分钟
for i in range(df_min):
hr_min_len = (i + 1) * 60
num = 0
temp = 0
slp_time_min = stime + hr_min_len
for j in df[cname][hr_min_len - 60 : hr_min_len]:
if j != 0 and j != 255:
num += 1
temp += j
if num > 0:
res = int(temp / num)
hr_min_list.append(res)
if num == 0:
hr_min_list.append(0)
slp_time_min_list.append(slp_time_min)
# rslt = {'time':slp_time_min_list,'hr':hr_min_list,'br':br_min_list}
# df_clean = pd.DataFrame(data=rslt)
return slp_time_min_list, hr_min_list
def file_exist(my_file):
txt_list = []
if Path(my_file).is_file() is False:
Path(my_file).touch()
return txt_list
def Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv):
PR = PR[PR.map(lambda x: x > 0)]
HR = HR[HR.map(lambda x: x > 0)]
PR = PR.reset_index(drop=True) # 重新给索引
HR = HR.reset_index(drop=True) # 重新给索引
diff_hr = PR - HR
diff_hr_cnt = 0
try:
diff_hr_pre = abs(diff_hr) / PR
diff_hr_pre = diff_hr_pre.dropna()
diff_hr_pre = diff_hr_pre * 100
for i, val in enumerate(diff_hr):
if i <= len(PR):
if abs(val) <= PR[i] * 0.1 or abs(val) <= 5:
diff_hr_cnt += 1
hr_mean = round(np.mean(abs(diff_hr)), 2)
hr_std = round(np.std(abs(diff_hr), ddof=1), 2)
if len(diff_hr_pre) == 0:
print(traceback.print_exc())
else:
acc_hr = diff_hr_cnt / len(diff_hr_pre)
txt_content = (
fcsv
+ " 心率准确性[%d / %d]: %.2f %%"
% (
diff_hr_cnt,
len(diff_hr_pre),
round(acc_hr * 100, 2),
)
+ " 心率误差:",
str(hr_mean) + "±" + str(hr_std),
)
f = open(src_txt + "accuracy.txt", "a")
f.write((str(txt_content) + "\r"))
return acc_hr
except Exception as exc:
print(exc)
print(traceback.print_exc())
def Respiration_rate_accuracy_calculat(RR, br, src_txt, fcsv):
RR = RR[RR.map(lambda x: x > 0)]
br = br[br.map(lambda x: x > 0)]
RR = RR.reset_index(drop=True) # 重新给索引
br = br.reset_index(drop=True) # 重新给索引
try:
# 计算呼吸率准确性
diff_br_pre = abs(RR - br)
diff_br_pre = diff_br_pre.dropna()
diff_br_cnt = 0
for i in diff_br_pre:
if i <= 2:
diff_br_cnt += 1
br_mean = round(np.mean(abs(diff_br_pre)), 2)
br_std = round(np.std(abs(diff_br_pre), ddof=1), 2)
if len(diff_br_pre) == 0:
print(traceback.print_exc())
else:
acc_br = diff_br_cnt / len(diff_br_pre)
txt_content = (
fcsv
+ " 呼吸率准确性[%d / %d]: %.2f %%"
% (
diff_br_cnt,
len(diff_br_pre),
round(acc_br * 100, 2),
)
+ " 呼吸率误差:",
str(br_mean) + "±" + str(br_std),
)
f = open(src_txt + "accuracy.txt", "a")
f.write((str(txt_content) + "\r"))
return acc_br
except Exception as exc:
print(exc)
print(traceback.print_exc())
def draw_PR_save(PR, slp_hr, time_offset, img_dir, fcsv, acc_flag):
# 作图
mpl.rcParams["font.sans-serif"] = ["SimHei"]
mpl.rcParams["axes.unicode_minus"] = False
# 配置横坐标日期显示#格式#间隔
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y%m/%d %H:%M:%S"))
plt.gca().xaxis.set_major_locator(mdates.MinuteLocator(interval=15))
if len(PR) > len(time_offset):
PR = PR[:-1]
ax1 = plt.subplot(412)
plt.plot(time_offset, PR, "r-", label="PSG")
plt.plot(time_offset, slp_hr, "b-", label="智能枕头")
plt.title("心率对比(bpm)", fontsize=9)
plt.legend(loc="upper right")
plt.setp(ax1.get_xticklabels(), visible=False, fontsize=9)
# plt.xlim(time_offset[0], time_offset[-1])
plt.ylim(40, 100)
f = plt.gcf() # 获取当前图像
if acc_flag == 1:
f.savefig(img_dir + "err_img/" + fcsv + ".png", bbox_inches="tight")
elif acc_flag == 0:
f.savefig(img_dir + "nor_img/" + fcsv + ".png", bbox_inches="tight")
f.clear() # 释放内存
def draw_PR_RR_save(PR, RR, slp_hr, slp_br, time_offset, img_dir, fcsv, acc_flag):
# 作图
mpl.rcParams["font.sans-serif"] = ["SimHei"]
mpl.rcParams["axes.unicode_minus"] = False
# fig.suptitle(fname)
# 配置横坐标日期显示#格式#间隔
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter("%Y%m/%d %H:%M:%S"))
plt.gca().xaxis.set_major_locator(mdates.MinuteLocator(interval=15))
if len(PR) > len(time_offset):
PR = PR[:-1]
if len(RR) > len(time_offset):
RR = RR[:-1]
print(len(time_offset), len(PR))
print(time_offset)
ax1 = plt.subplot(412)
plt.plot(time_offset, PR, "r-", label="PSG")
plt.plot(time_offset, slp_hr, "b-", label="智能枕头")
plt.title("心率对比(bpm)", fontsize=9)
plt.legend(loc="upper right")
plt.setp(ax1.get_xticklabels(), visible=False, fontsize=9)
# plt.xlim(time_offset[0], time_offset[-1])
plt.ylim(40, 100)
ax2 = plt.subplot(413, sharex=ax1)
plt.plot(time_offset, RR, "r-", label="PSG")
plt.plot(time_offset, slp_br, "b-", label="智能枕头")
plt.title("呼吸率对比(rpm)", fontsize=9)
plt.legend(loc="upper right")
plt.setp(ax2.get_xticklabels(), visible=True, fontsize=9)
plt.xticks()
# plt.xlim(time_offset[0], time_offset[-1])
plt.ylim(5, 35)
f = plt.gcf() # 获取当前图像
if acc_flag == 1:
f.savefig(img_dir + "err_img/" + fcsv + ".png", bbox_inches="tight")
elif acc_flag == 0:
f.savefig(img_dir + "nor_img/" + fcsv + ".png", bbox_inches="tight")
# f.figlegend()
f.clear() # 释放内存
def slp_hr_br_transfrom(cat_dir, save_dir, flag):
# slp批量仿真数据转成csv文件
flist = os.listdir(cat_dir + "hr_sec/")
for fcsv in flist[:]:
fname = fcsv.split(".")[0]
hr_list = read_bytefile(cat_dir, "hr_sec/", fcsv, mode="u8")
br_list = read_bytefile(cat_dir, "br_sec/", fcsv, mode="u8")
startstamp = int(fcsv.split("_")[-1].split(".")[0])
time_list = [startstamp + t for t in range(len(hr_list))]
if flag == 0:
rslt = {"time": time_list, "heart_rate": hr_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"), index=False, header=["time", "heart_rate"]
)
elif flag == 1:
rslt = {"time": time_list, "breath_rate": br_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"), index=False, header=["time", "breath_rate"]
)
elif flag == 2:
rslt = {"time": time_list, "heart_rate": hr_list, "breath_rate": br_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"),
index=False,
header=["time", "heart_rate", "breath_rate"],
)
def psg_slp_heart_cal(src_slp, src_psg, src_txt, src_img):
"""心率准确性脚本计算"""
slp_flist = os.listdir(src_slp)
psg_flist = os.listdir(src_psg)
txt_list = []
my_file = src_txt + "setime.txt"
for i, fcsv in enumerate(slp_flist):
simg_name = fcsv.split(".")[0]
data_slp = pd.read_csv(src_slp + fcsv)
print(fcsv, psg_flist[i])
data_psg = pd.read_csv(src_psg + psg_flist[i])
data_slp.columns = ["time", "hr"]
data_psg.columns = ["time", "pr"]
time_set = [
data_slp["time"].tolist()[0],
time2stamp(data_psg["time"].tolist()[0]),
data_slp["time"].tolist()[-1],
time2stamp(data_psg["time"].tolist()[-1]),
]
start_time = time_set[0] - time_set[1]
end_time = time_set[2] - time_set[3]
if start_time < 0:
file_start = time_set[1]
else:
file_start = time_set[0]
if end_time < 0:
file_end = time_set[2]
else:
file_end = time_set[3]
data_psg["timestamp"] = time2stamp(data_psg["time"])
print(
"开始区间:", file_start, "结束区间:", file_end, "公共区间长度:", (file_end - file_start)
)
slp_sind = data_slp[data_slp["time"] == file_start].index.tolist()[0]
slp_eind = data_slp[data_slp["time"] == file_end].index.tolist()[0]
slp_clist = data_slp[slp_sind : slp_eind + 1]
psg_sind = data_psg[data_psg["timestamp"] == file_start].index.tolist()[0]
psg_eind = data_psg[data_psg["timestamp"] == file_end].index.tolist()[0]
psg_clist = data_psg[psg_sind : psg_eind + 1]
hr_time, hr_list = minute_mean(slp_clist, "hr", file_start)
pr_time, pr_list = minute_mean(psg_clist, "pr", file_start)
rslt_slp = {"time": hr_time, "hr": hr_list}
clean_slp = pd.DataFrame(data=rslt_slp)
rslt_psg = {"time": pr_time, "pr": pr_list}
clean_psg = pd.DataFrame(data=rslt_psg)
time = clean_slp["time"]
HR = clean_slp["hr"]
PR = clean_psg["pr"]
acc_hr = Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv)
time_offset = [datetime.fromtimestamp(i) for i in time]
# 准备原始SLP心率、呼吸数据
slp_hr = pd.Series(list(HR), index=time_offset)
if len(time_offset) > 0:
acc_flag = 0
if acc_hr < 0.9:
acc_flag = 1
draw_PR_save(PR, slp_hr, time_offset, src_img, simg_name, acc_flag)
else:
draw_PR_save(PR, slp_hr, time_offset, src_img, simg_name, acc_flag)
if Path(my_file).is_file() is False:
Path(my_file).touch()
if Path(my_file).exists():
size = os.path.getsize(my_file)
if size > 100:
os.remove(my_file)
Path(my_file).touch()
elif size == 0:
time_diff = file_end - file_start
txt_content = (
fcsv
+ " 起始时间:"
+ str(file_start)
+ " 结束时间:"
+ str(file_end)
+ " 时间长度:"
+ str(time_diff)
)
txt_list.append(txt_content)
for i, val in enumerate(txt_list):
f = open(my_file, "a")
f.write((str(val) + "\r"))
f.close()
def psg_slp_heart_breath_cal(src_slp, src_psg, src_txt, src_img, flag):
"""心率、呼吸率准确性计算脚本"""
if flag == 0:
slp_flist = os.listdir(src_slp)
psg_flist = os.listdir(src_psg)
slp_idList = [i.split(".")[0].split("_")[0] for i in slp_flist]
psg_idList = [i.split(".")[0].split("_")[0] for i in psg_flist]
txt_list = []
my_file = src_txt + "setime.txt"
for i, fcsv in enumerate(slp_flist):
# print(slp_idList[i],psg_idList[i])
j = psg_idList.index(slp_idList[i])
simg_name = fcsv.split(".")[0]
data_slp = pd.read_csv(src_slp + fcsv)
data_psg = pd.read_csv(src_psg + psg_flist[j])
data_slp.columns = ["time", "hr", "br"]
data_psg.columns = ["time", "pr", "rr"]
time_set = [
data_slp["time"].tolist()[0],
time2stamp(data_psg["time"].tolist()[0]),
data_slp["time"].tolist()[-1],
time2stamp(data_psg["time"].tolist()[-1]),
]
start_time = time_set[0] - time_set[1]
end_time = time_set[2] - time_set[3]
if start_time < 0:
file_start = time_set[1]
else:
file_start = time_set[0]
if end_time < 0:
file_end = time_set[2]
else:
file_end = time_set[3]
data_psg["timestamp"] = time2stamp(data_psg["time"])
print(
"开始区间:",
file_start,
"结束区间:",
file_end,
"公共区间长度:",
(file_end - file_start),
)
slp_sind = data_slp[data_slp["time"] == file_start].index.tolist()[0]
slp_eind = data_slp[data_slp["time"] == file_end].index.tolist()[0]
slp_clist = data_slp[slp_sind : slp_eind + 1]
psg_sind = data_psg[data_psg["timestamp"] == file_start].index.tolist()[0]
psg_eind = data_psg[data_psg["timestamp"] == file_end].index.tolist()[0]
psg_clist = data_psg[psg_sind : psg_eind + 1]
hr_time, hr_list = minute_mean(slp_clist, "hr", file_start)
br_time, br_list = minute_mean(slp_clist, "br", file_start)
pr_time, pr_list = minute_mean(psg_clist, "pr", file_start)
rr_time, rr_list = minute_mean(psg_clist, "rr", file_start)
rslt_slp = {"time": hr_time, "hr": hr_list, "br": br_list}
clean_slp = pd.DataFrame(data=rslt_slp)
rslt_psg = {"time": pr_time, "pr": pr_list, "rr": rr_list}
clean_psg = pd.DataFrame(data=rslt_psg)
time = clean_slp["time"]
HR = clean_slp["hr"]
PR = clean_psg["pr"]
BR = clean_slp["br"]
RR = clean_psg["rr"]
acc_hr = Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv)
acc_br = Respiration_rate_accuracy_calculat(RR, BR, src_txt, fcsv)
time_offset = [datetime.fromtimestamp(i) for i in time]
# 准备原始SLP心率、呼吸数据
slp_hr = pd.Series(list(HR), index=time_offset)
slp_br = pd.Series(list(BR), index=time_offset)
if len(time_offset) > 0:
acc_flag = 0
if acc_hr is not None and acc_br is not None:
if acc_hr < 0.9 or acc_br < 0.9:
acc_flag = 1
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
else:
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
if Path(my_file).is_file() is False:
Path(my_file).touch()
if Path(my_file).exists():
size = os.path.getsize(my_file)
if size > 100:
os.remove(my_file)
Path(my_file).touch()
elif size == 0:
time_diff = file_end - file_start
txt_content = (
fcsv
+ " 起始时间:"
+ str(file_start)
+ " 结束时间:"
+ str(file_end)
+ " 时间长度:"
+ str(time_diff)
)
txt_list.append(txt_content)
for i, val in enumerate(txt_list):
f = open(my_file, "a")
f.write((str(val) + "\r"))
f.close()
elif flag == 1:
slp_flist = os.listdir(src_slp)
psg_flist = os.listdir(src_psg)
slp_idList = [i.split(".")[0].split("_")[0] for i in slp_flist]
psg_idList = [i.split(".")[0].split("_")[0].lstrip("0") for i in psg_flist]
txt_list = []
my_file = src_txt + "setime.txt"
for i, fcsv in enumerate(slp_flist):
j = psg_idList.index(slp_idList[i])
simg_name = fcsv.split(".")[0]
data_slp = pd.read_csv(src_slp + fcsv)
data_psg = pd.read_csv(src_psg + psg_flist[j])
data_slp.columns = ["time", "hr", "br"]
data_psg.columns = ["time", "pr", "rr"]
time_set = [
data_slp["time"].tolist()[0],
hour2stamp(data_psg["time"].tolist()[0]),
data_slp["time"].tolist()[-1],
hour2stamp(data_psg["time"].tolist()[-1]),
]
start_time = time_set[0] - time_set[1]
end_time = time_set[2] - time_set[3]
if start_time < 0:
file_start = time_set[1]
else:
file_start = time_set[0]
if end_time < 0:
file_end = time_set[2]
else:
file_end = time_set[3]
print(time_set[1], time_set[0])
# data_psg["timestamp"] = data_psg["time"].apply(lambda x: hour2stamp(x))
data_psg["timestamp"] = hour2stamp(data_psg["time"])
print(
"开始区间:",
file_start,
"结束区间:",
file_end,
"公共区间长度:",
(file_end - file_start),
)
slp_sind = data_slp[data_slp["time"] == file_start].index.tolist()[0]
slp_eind = data_slp[data_slp["time"] == file_end].index.tolist()[0]
slp_clist = data_slp[slp_sind : slp_eind + 1]
psg_sind = data_psg[data_psg["timestamp"] == file_start].index.tolist()[0]
psg_eind = data_psg[data_psg["timestamp"] == file_end].index.tolist()[0]
psg_clist = data_psg[psg_sind : psg_eind + 1]
hr_time, hr_list = minute_mean(slp_clist, "hr", file_start)
br_time, br_list = minute_mean(slp_clist, "br", file_start)
pr_time, pr_list = minute_mean(psg_clist, "pr", file_start)
rr_time, rr_list = minute_mean(psg_clist, "rr", file_start)
rslt_slp = {"time": hr_time, "hr": hr_list, "br": br_list}
clean_slp = pd.DataFrame(data=rslt_slp)
rslt_psg = {"time": pr_time, "pr": pr_list, "rr": rr_list}
clean_psg = pd.DataFrame(data=rslt_psg)
time = clean_slp["time"]
HR = clean_slp["hr"]
PR = clean_psg["pr"]
BR = clean_slp["br"]
RR = clean_psg["rr"]
acc_hr = Heart_rate_accuracy_calculat(PR, HR, src_txt, fcsv)
acc_br = Respiration_rate_accuracy_calculat(RR, BR, src_txt, fcsv)
time_offset = [datetime.fromtimestamp(i) for i in time]
# 准备原始SLP心率、呼吸数据
slp_hr = pd.Series(list(HR), index=time_offset)
slp_br = pd.Series(list(BR), index=time_offset)
if len(time_offset) > 0:
acc_flag = 0
if acc_hr < 0.9 or acc_br < 0.9:
acc_flag = 1
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
else:
draw_PR_RR_save(
PR,
RR,
slp_hr,
slp_br,
time_offset,
src_img,
simg_name,
acc_flag,
)
if Path(my_file).is_file() is False:
Path(my_file).touch()
if Path(my_file).exists():
size = os.path.getsize(my_file)
if size > 100:
os.remove(my_file)
Path(my_file).touch()
elif size == 0:
time_diff = file_end - file_start
txt_content = (
fcsv
+ " 起始时间:"
+ str(file_start)
+ " 结束时间:"
+ str(file_end)
+ " 时间长度:"
+ str(time_diff)
)
txt_list.append(txt_content)
for i, val in enumerate(txt_list):
f = open(my_file, "a")
f.write((str(val) + "\r"))
f.close()
def psg_rr_transfrom(cat_dir, save_dir):
# psg批量仿真数据转成csv文件
flist = os.listdir(cat_dir + "br_sec/")
for fcsv in flist[:]:
fname = fcsv.split(".")[0]
br_list = read_bytefile(cat_dir, "br_sec/", fcsv, mode="u8")
startstamp = int(fcsv.split("_")[-1].split(".")[0])
time_list = [startstamp + t for t in range(len(br_list))]
rslt = {"time": time_list, "breath_rate": br_list}
df = pd.DataFrame(data=rslt)
df.to_csv(
(save_dir + fname + ".csv"), index=False, header=["time", "breath_rate"]
)
def read_summary(path, folder, file):
fname = path + folder + file
f = open(fname, "rb")
dtmp = f.read()
dtmp = bytearray(dtmp)
mean_hrate = dtmp[0] | dtmp[1] << 8 # 平均心率
mean_brate = dtmp[2] | dtmp[3] << 8 # 平均呼吸率
fallasleeptime = dtmp[4] | dtmp[5] << 8 # 入睡时刻
wakeuptime = dtmp[6] | dtmp[7] << 8 # 清醒时刻
offbed_cnt = dtmp[8] | dtmp[9] << 8 # 离床次数
turnover_cnt = dtmp[10] | dtmp[11] << 8 # 翻身次数
bodymove_cnt = dtmp[12] | dtmp[13] << 8 # 体动次数
heartstop_cnt = dtmp[14] | dtmp[15] << 8 # 心跳暂停次数
respstop_cnt = dtmp[16] | dtmp[17] << 8 # 呼吸暂停次数
deepsleep_per = dtmp[18] | dtmp[19] << 8 # 深睡比例
remsleep_per = dtmp[20] | dtmp[21] << 8 # 中睡比例
lightsleep_per = dtmp[22] | dtmp[23] << 8 # 浅睡比例
wakesleep_per = dtmp[24] | dtmp[25] << 8 # 清醒比例
wakesleep_time = dtmp[26] | dtmp[27] << 8 # 清醒时长
lightsleep_time = dtmp[28] | dtmp[29] << 8 # 浅睡时长
remsleep_time = dtmp[30] | dtmp[31] << 8 # 中睡时长
deepsleep_time = dtmp[32] | dtmp[33] << 8 # 深睡时长
wake_off_cnt = dtmp[34] | dtmp[35] << 8 # 清醒(含离床)次数
hrate_max = dtmp[36] | dtmp[37] << 8 # 最高心率
brate_max = dtmp[38] | dtmp[39] << 8 # 最高呼吸率
hrate_min = dtmp[40] | dtmp[41] << 8 # 最低心率
brate_min = dtmp[42] | dtmp[43] << 8 # 最低呼吸率
hrate_high_time = dtmp[44] | dtmp[55] << 8 # 心率过速时长
hrate_low_time = dtmp[46] | dtmp[47] << 8 # 心率过缓时长
brate_high_time = dtmp[48] | dtmp[49] << 8 # 呼吸过速时长
brate_low_time = dtmp[50] | dtmp[51] << 8 # 呼吸过缓时长
allsleep_time = dtmp[52] | dtmp[53] << 8 # 睡觉时长
body_move = dtmp[54] | dtmp[55] << 8 # 躁动不安扣分
off_bed = dtmp[56] | dtmp[57] << 8 # 离床扣分
wake_cnt = dtmp[58] | dtmp[59] << 8 # 易醒扣分
start_time = dtmp[60] | dtmp[61] << 8 # 睡太晚扣分
fall_asleep = dtmp[62] | dtmp[63] << 8 # 难于入睡扣分
perc_deep = dtmp[64] | dtmp[65] << 8 # 深睡不足扣分
sleep_long = dtmp[66] | dtmp[67] << 8 # 睡时间过长扣分
sleep_less = dtmp[68] | dtmp[69] << 8 # 睡眠时间过短扣分
breath_stop = dtmp[70] | dtmp[71] << 8 # 呼吸暂停扣分
heart_stop = dtmp[72] | dtmp[73] << 8 # 心跳暂停扣分
hrate_low = dtmp[74] | dtmp[75] << 8 # 心跳过缓扣分
hrate_high = dtmp[76] | dtmp[77] << 8 # 心跳过速扣分
brate_low = dtmp[78] | dtmp[79] << 8 # 呼吸过缓扣分
brate_high = dtmp[80] | dtmp[81] << 8 # 呼吸过速扣分
benign_sleep = dtmp[82] | dtmp[83] << 8 # 良性睡眠分布扣分
offset = dtmp[84] | dtmp[85] << 8
data_len = dtmp[86] | dtmp[87] << 8
start_stamp = dtmp[88] | dtmp[89] << 8 | dtmp[90] << 16 | dtmp[91] << 24
print(start_stamp, start_stamp + fallasleeptime * 60)
diff = (
body_move
+ off_bed
+ wake_cnt
+ start_time
+ fall_asleep
+ perc_deep
+ sleep_long
+ sleep_less
+ breath_stop
+ heart_stop
+ hrate_low
+ hrate_high
+ brate_low
+ brate_high
+ benign_sleep
)
score = 100 - diff
rslt = {"offset": offset, "len": data_len, "start_time": start_stamp}
print("-----睡眠报告-----")
print(">>> 睡眠比例")
print(
"睡眠时长:%d H %d min (入睡:%d, 清醒:%d)"
% (allsleep_time / 60, allsleep_time % 60, fallasleeptime, wakeuptime)
)
print(
"深睡时长:%d H %d min (%d%%) | 中睡时长:%d H %d min (%d%%) "
"| 浅睡时长:%d H %d min (%d%%) | 清醒时长:%d H %d min (%d%%)"
% (
deepsleep_time / 60,
deepsleep_time % 60,
deepsleep_per,
remsleep_time / 60,
remsleep_time % 60,
remsleep_per,
lightsleep_time / 60,
lightsleep_time % 60,
lightsleep_per,
wakesleep_time / 60,
wakesleep_time % 60,
wakesleep_per,
)
)
print(">>> 呼吸心率")
print("平均呼吸:%d bpm (min: %d, max: %d)" % (mean_brate, brate_min, brate_max))
print("呼吸暂停:%d 次" % respstop_cnt)
print(
"呼吸过速:%d H %d min | 呼吸过缓:%d H %d min "
% (
brate_high_time / 60,
brate_high_time % 60,
brate_low_time / 60,
brate_low_time % 60,
)
)
print("平均心率:%d bpm (min: %d, max: %d)" % (mean_hrate, hrate_min, hrate_max))
print(
"心率过速:%d H %d min | 心率过缓:%d H %d min "
% (
hrate_high_time / 60,
hrate_high_time % 60,
hrate_low_time / 60,
hrate_low_time % 60,
)
)
print("心跳暂停:%d 次" % heartstop_cnt)
print(">>> 体动翻身")
print(
"体动次数:%d | 翻身次数:%d | 离床次数:%d | 清醒次数:%d "
% (bodymove_cnt, turnover_cnt, offbed_cnt, wake_off_cnt)
)
print(">>> 睡眠分数")
print("整晚睡眠得分:", score)
print("躁动不安扣分:", body_move)
print("离床过多扣分:", off_bed)
print("睡觉易醒扣分:", wake_cnt)
print("睡觉太晚扣分:", start_time)
print("难于入睡扣分:", fall_asleep)
print("深睡不足扣分:", perc_deep)
print("睡眠过长扣分:", sleep_long)
print("睡眠过短扣分:", sleep_less)
print("呼吸暂停扣分:", breath_stop)
print("心跳暂停扣分:", heart_stop)
print("心跳过缓扣分:", hrate_low)
print("心跳过速扣分:", hrate_high)
print("呼吸过缓扣分:", brate_low)
print("呼吸过速扣分:", brate_high)
print("良性睡眠扣分:", benign_sleep)
print("----------------")
return rslt
|
{"hexsha": "79f107653e5a7204965621d15204397792defbbd", "size": 35471, "ext": "py", "lang": "Python", "max_stars_repo_path": "slp_utils/utils.py", "max_stars_repo_name": "66chenbiao/sleepace_verification_tool", "max_stars_repo_head_hexsha": "6271312d9d78ee50703e27a75787510cab4c7f4d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "slp_utils/utils.py", "max_issues_repo_name": "66chenbiao/sleepace_verification_tool", "max_issues_repo_head_hexsha": "6271312d9d78ee50703e27a75787510cab4c7f4d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "slp_utils/utils.py", "max_forks_repo_name": "66chenbiao/sleepace_verification_tool", "max_forks_repo_head_hexsha": "6271312d9d78ee50703e27a75787510cab4c7f4d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1295289855, "max_line_length": 88, "alphanum_fraction": 0.5205942883, "include": true, "reason": "import numpy", "num_tokens": 10965}
|
import pandas as pd
import seaborn as sns
import numpy as np
from skforecast.ForecasterAutoreg import ForecasterAutoreg
from skforecast.ForecasterAutoregCustom import ForecasterAutoregCustom
from skforecast.ForecasterAutoregMultiOutput import ForecasterAutoregMultiOutput
from skforecast.model_selection import grid_search_forecaster
from skforecast.model_selection import time_series_spliter
from skforecast.model_selection import cv_forecaster
from skforecast.model_selection import backtesting_forecaster
from skforecast.model_selection import backtesting_forecaster_intervals
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
import datetime
from datetime import date
import joblib
import matplotlib.pyplot as plt
from modelos import *
def periodo(inicio, fin):
d0 = date(*(int(s) for s in inicio.split('-')))
d1 = date(*(int(s) for s in fin.split('-')))
delta = d1 - d0
if delta.days < 0:
return print("Fecha inicio mayor que fecha fin")
neg = delta.days
else:
c = delta.days
return c
def table_predict(city, finicio, ffin):
'''
fecha : YYYY-MM-DD
'''
df2 = pd.read_csv('data/'+str(city)+'.csv')
df2['Date'] = pd.to_datetime(df2['Date'],errors='coerce')
df2 = df2[['Date', 'y']]
df2.columns = ['ds', 'y']
df2 = df2[(df2['ds'].dt.hour>=6) & (df2['ds'].dt.hour<=18)]
# create datetime index passing the datetime series
datetime_index = pd.DatetimeIndex(df2.ds.values)
df2=df2.set_index(datetime_index)
df2.drop('ds',axis=1,inplace=True)
# Create future empty values
c = periodo(finicio, ffin)
idx = pd.date_range(df2.index[-1] + pd.Timedelta(hours=7), periods=24*c, freq='h')[1:]
table = df2.append(pd.DataFrame(pd.Series(np.repeat(0, len(idx)), index=idx), columns= ['y']))
table = table[(table.index.hour>=6) & (table.index.hour<=18)]
return table, c
def calculate_lags(df2):
# Lag for the time: day, week, month, quarter, semester, annual
serie2 =pd.concat([df2,df2.y.shift(91),df2.y.shift(104),df2.y.shift(117),df2.y.shift(130),df2.y.shift(143)
,df2.y.shift(156),df2.y.shift(169),df2.y.shift(182),df2.y.shift(390)
,df2.y.shift(403),df2.y.shift(1170), df2.y.shift(1183),df2.y.shift(1196)
,df2.y.shift(1209),df2.y.shift(2340), df2.y.shift(2353), df2.y.shift(2366)
,df2.y.shift(2379),df2.y.shift(3900),df2.y.shift(4745)],
axis=1)
# Columns
columns_name2 = ['y','t_7','t_8','t_9','t_10','t_11','t_12','t_13','t_14',
't_30','t_31','t_90','t_91','t_92','t_93','t_180',
't_181','t_182','t_183','t_300','t_365']
serie2.columns = columns_name2
serie2 = serie2.dropna()
return serie2
def forecast_values(serie, days):
c = days * 13
serie = serie[-c:]
X_pred = serie.drop(['y'], axis=1)
return X_pred
def table_show(table, inicio, forecast):
inicio = date(*(int(s) for s in inicio.split('-')))
inicio += datetime.timedelta(days=1)
inicio = inicio.strftime('%Y/%m/%d')
salida = table[table.index > inicio]
salida['y'] = 0
temp = pd.DataFrame(forecast)
temp = round(temp,1)
name = ['y']
temp.columns= name
salida = salida.assign(y=temp['y'].values)
name2 = ['DHI_Forecast']
salida.columns = name2
return salida
#source
ciudad_temp = pd.read_csv('cities_prom.csv')
def consumo_solar(city, irrad):
if irrad <= 0:
irrad = irrad + 0.1
temp = ciudad_temp[ciudad_temp['name']==city]['temperature']
pot_sol= potencia_solar(temp, irrad,36)
if pot_sol < 0:
pot_sol = 0
return pot_sol
def final_table_solar(city, pred_MLP_1, p_tabla):
column_generation = pd.DataFrame([consumo_solar(city,x) for x in pred_MLP_1])
name_temp = ['G2']
column_generation.columns = name_temp
p_tabla['G'] = 0
final_table = p_tabla.assign(G=column_generation['G2'].values)
name_cg = ['DHI_Forecast','Generated Power (W)']
final_table.columns = name_cg
return final_table
|
{"hexsha": "e06c988db6714a9b2c1fc8ad0ac22bdc5d1ba77f", "size": 4539, "ext": "py", "lang": "Python", "max_stars_repo_path": "EApp/predictions.py", "max_stars_repo_name": "eljimenezj/Team_51_DS4A_2021", "max_stars_repo_head_hexsha": "6f8e1fca0962e1698e4b533fee6eabd36abea1cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "EApp/predictions.py", "max_issues_repo_name": "eljimenezj/Team_51_DS4A_2021", "max_issues_repo_head_hexsha": "6f8e1fca0962e1698e4b533fee6eabd36abea1cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "EApp/predictions.py", "max_forks_repo_name": "eljimenezj/Team_51_DS4A_2021", "max_forks_repo_head_hexsha": "6f8e1fca0962e1698e4b533fee6eabd36abea1cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7401574803, "max_line_length": 111, "alphanum_fraction": 0.6461775722, "include": true, "reason": "import numpy", "num_tokens": 1260}
|
#%% Necessary Dependencies
import numpy as np
import logging
import yaml
try:
import matplotlib.pyplot as plt
matplot=True
except(ImportError):
logging.warning(f'no matplotlib, debug plotting disabled')
matplot=False
from hexrd.grainmap import nfutil
from hexrd.grainmap import tomoutil
from hexrd import instrument
def load_instrument(yml):
with open(yml, 'r') as f:
icfg = yaml.load(f, Loader=yaml.FullLoader)
return instrument.HEDMInstrument(instrument_config=icfg)
# %% FILES TO LOAD -CAN BE EDITED
#==============================================================================
#These files are attached, retiga.yml is a detector configuration file
#The near field detector was already calibrated
#A materials file, is a cPickle file which contains material information like lattice
#parameters necessary for the reconstruction
main_dir = '/INSERT/WORK/DIR/'
det_file = main_dir + 'tomo_det.yml'
#==============================================================================
# %% OUTPUT INFO -CAN BE EDITED
#==============================================================================
output_dir = main_dir
output_stem='tomo_out'
#==============================================================================
# %% TOMOGRAPHY DATA FILES -CAN BE EDITED
#==============================================================================
stem='nf_'
#Locations of tomography dark field images
tdf_data_folder='/LOC/nf/'
tdf_img_start=52 #for this rate, this is the 6th file in the folder
tdf_num_imgs=10
#Locations of tomography bright field images
tbf_data_folder='/LOC/nf/'
tbf_img_start=68 #for this rate, this is the 6th file in the folder
tbf_num_imgs=10
#Locations of tomography images
tomo_data_folder='/LOC/nf/'
tomo_img_start=84#for this rate, this is the 6th file in the folder
tomo_num_imgs=360
#==============================================================================
# %% USER OPTIONS -CAN BE EDITED
#==============================================================================
ome_range_deg=[(0.,359.75)] #degrees
#tomography options
recon_thresh=0.0002#usually varies between 0.0001 and 0.0005
#Don't change these unless you know what you are doing, this will close small holes
#and remove noise
noise_obj_size=500
min_hole_size=500
erosion_iter=1
dilation_iter=1
project_single_layer=False #projects the center layers through the volume, faster but not recommended, included for completion / historical purposes
#reconstruction volume options
cross_sectional_dim=1.35 #cross sectional to reconstruct (should be at least 20%-30% over sample width)
voxel_spacing=0.005#in mm
v_bnds=[-0.4,0.4]
#==============================================================================
# %% LOAD INSTRUMENT DATA
#==============================================================================
instr=load_instrument(det_file)
panel = next(iter(instr.detectors.values()))
nrows=panel.rows
ncols=panel.cols
pixel_size=panel.pixel_size_row
rot_axis_pos=panel.tvec[0] #should match t_vec_d[0] from nf_detector_parameter_file
vert_beam_center=panel.tvec[1]
# need to do a few calculations because not every row will be reconstructed
# depending on sampling
vert_points=np.arange(v_bnds[0]+voxel_spacing/2.,v_bnds[1],voxel_spacing)
center_layer_row=nrows/2.+vert_beam_center/pixel_size
rows_to_recon=np.round(center_layer_row-vert_points/pixel_size).astype(int)
center_layer_row=int(center_layer_row)
#==============================================================================
# %% TOMO PROCESSING - GENERATE DARK AND BRIGHT FIELD
#==============================================================================
tdf=tomoutil.gen_median_image(tdf_data_folder,tdf_img_start,tdf_num_imgs,nrows,ncols,stem=stem,num_digits=6)
tbf=tomoutil.gen_median_image(tbf_data_folder,tbf_img_start,tbf_num_imgs,nrows,ncols,stem=stem,num_digits=6)
#==============================================================================
# %% TOMO PROCESSING - BUILD RADIOGRAPHS
#==============================================================================
rad_stack=tomoutil.gen_attenuation_rads(tomo_data_folder,tbf,tomo_img_start,tomo_num_imgs,nrows,ncols,stem=stem,num_digits=6,tdf=tdf)
#==============================================================================
# %% TOMO PROCESSING - INVERT SINOGRAM
#==============================================================================
# center = 0.0
test_fbp=tomoutil.tomo_reconstruct_layer(rad_stack,cross_sectional_dim,layer_row=center_layer_row,\
start_tomo_ang=ome_range_deg[0][0],end_tomo_ang=ome_range_deg[0][1],\
tomo_num_imgs=tomo_num_imgs, center=rot_axis_pos,pixel_size=pixel_size)
test_binary_recon=tomoutil.threshold_and_clean_tomo_layer(test_fbp,recon_thresh, \
noise_obj_size,min_hole_size, erosion_iter=erosion_iter, \
dilation_iter=dilation_iter)
tomo_mask_center=tomoutil.crop_and_rebin_tomo_layer(test_binary_recon,recon_thresh,voxel_spacing,pixel_size,cross_sectional_dim)
#==============================================================================
# %% TOMO PROCESSING - VIEW RAW FILTERED BACK PROJECTION
#==============================================================================
if matplot:
plt.figure(1)
plt.imshow(test_fbp,vmin=recon_thresh,vmax=recon_thresh*2)
plt.title('Check Thresholding')
#Use this image to view the raw reconstruction, estimate threshold levels. and
#figure out if the rotation axis position needs to be corrected
plt.figure(2)
plt.imshow(tomo_mask_center,interpolation='none')
plt.title('Check Center Mask')
#==============================================================================
# %% PROCESS REMAINING LAYERS
#==============================================================================
full_mask=np.zeros([len(rows_to_recon),tomo_mask_center.shape[0],tomo_mask_center.shape[1]])
for ii in np.arange(len(rows_to_recon)):
print('Layer: ' + str(ii) + ' of ' + str(len(rows_to_recon)))
if project_single_layer: #not recommended option
full_mask[ii]=tomo_mask_center
else:
reconstruction_fbp=tomoutil.tomo_reconstruct_layer(rad_stack,cross_sectional_dim,layer_row=rows_to_recon[ii],\
start_tomo_ang=ome_range_deg[0][0],end_tomo_ang=ome_range_deg[0][1],\
tomo_num_imgs=tomo_num_imgs, center=rot_axis_pos,pixel_size=pixel_size)
binary_recon=tomoutil.threshold_and_clean_tomo_layer(reconstruction_fbp,recon_thresh, \
noise_obj_size,min_hole_size,erosion_iter=erosion_iter, \
dilation_iter=dilation_iter)
tomo_mask=tomoutil.crop_and_rebin_tomo_layer(binary_recon,recon_thresh,voxel_spacing,pixel_size,cross_sectional_dim)
full_mask[ii]=tomo_mask
#==============================================================================
# %% TOMO PROCESSING - VIEW LAST TOMO_MASK FOR SAMPLE BOUNDS
#==============================================================================
if matplot:
plt.figure(3)
plt.imshow(tomo_mask,interpolation='none')
plt.title('Check Center Mask')
#==============================================================================
# %% TOMO PROCESSING - CONSTRUCT DATA GRID
#==============================================================================
test_crds, n_crds, Xs, Ys, Zs = nfutil.gen_nf_test_grid_tomo(full_mask.shape[2], full_mask.shape[1], v_bnds, voxel_spacing)
#%%
np.savez('tomo_mask.npz',mask=full_mask,Xs=Xs,Ys=Ys,Zs=Zs,voxel_spacing=voxel_spacing)
|
{"hexsha": "8f2bcdb813ed6cd614d62c685c35c1f2a8bb3dcb", "size": 7979, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/preprocess_tomo_mask.py", "max_stars_repo_name": "cjh1/hexrd", "max_stars_repo_head_hexsha": "057deee3e9d9beb09a30aac8ed263eff3febf3ec", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2020-02-18T12:15:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T17:53:46.000Z", "max_issues_repo_path": "scripts/preprocess_tomo_mask.py", "max_issues_repo_name": "cjh1/hexrd", "max_issues_repo_head_hexsha": "057deee3e9d9beb09a30aac8ed263eff3febf3ec", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 259, "max_issues_repo_issues_event_min_datetime": "2020-02-02T22:18:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T19:59:58.000Z", "max_forks_repo_path": "scripts/preprocess_tomo_mask.py", "max_forks_repo_name": "cjh1/hexrd", "max_forks_repo_head_hexsha": "057deee3e9d9beb09a30aac8ed263eff3febf3ec", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-02-18T12:14:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T16:19:11.000Z", "avg_line_length": 37.111627907, "max_line_length": 148, "alphanum_fraction": 0.5584659732, "include": true, "reason": "import numpy", "num_tokens": 1635}
|
@testset "Unification" begin
# Test unification of constant with zero-arity compound
subst = unify(@julog(constant), @julog(constant()))
@test subst == @varsub {}
# Test unification of nested terms
subst = unify(@julog(f(g(X, h(X, b)), Z)), @julog(f(g(a, Z), Y)))
@test subst == @varsub {X => a, Z => h(a, b), Y => h(a, b)}
# Test occurs check during unification
@test unify(@julog(A), @julog(functor(A)), false) == @varsub {A => functor(A)}
@test unify(@julog(A), @julog(functor(A)), true) === nothing
# Test extended unification of arithmetic functions (Prolog can't do this!)
# X unifies to 4, Y unifies to 5, so *(X, Y) unifies by evaluation to 20
@test unify(@julog(f(X, X*Y, Y)), @julog(f(4, 20, 5))) == @varsub {Y => 5, X => 4}
# X unifies to 4, Y unifies to /(20,4), so *(X, Y) unifies by evaluation to 20
@test unify(@julog(f(X, X*Y, Y)), @julog(f(4, 20, 20/4))) == @varsub {Y => /(20, 4), X => 4}
# X unifies to 4, Y unifies to 5, Z unifies to *(X, Y) === *(4, 5) post substitution
@test unify(@julog(f(X, X*Y, Y)), @julog(f(4, Z, 5))) == @varsub {Y => 5, X => 4, Z => *(4, 5)}
# X unifies to X, Y unifies to 5, X*Y cannot be evaluated and so fails to unify with 20
@test unify(@julog(f(X, X*Y, Y)), @julog(f(X, 20, 5))) === nothing
# Test subterm detection
@test has_subterm(@julog(atom), @julog(Var)) == true
@test has_subterm(@julog(functor(functor(atom))), @julog(functor)) == false
@test has_subterm(@julog(functor(functor(atom))), @julog(atom)) == true
@test has_subterm(@julog(functor(functor(atom))), @julog(functor(Var))) == true
@test has_subterm(@julog(list[a, b, c, d]), @julog(b)) == true
@test has_subterm(@julog(list[a, b, c, d]), @julog(list[a, b])) == false
@test has_subterm(@julog(list[a, b, c, d]), @julog(list[c, d])) == true
@test has_subterm(@julog(f(g(X, h(X, b)), Z)), @julog(h(X, Y))) == true
subterms = find_subterms(@julog(foo(bar(1), bar(bar(2)))), @julog(bar(X)))
@test Set(subterms) == Set(@julog(Term[bar(1), bar(bar(2)), bar(2)]))
end
|
{"hexsha": "5f946e604c9ef72ca3b53333fa97eaad5a1583c7", "size": 1988, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/unify.jl", "max_stars_repo_name": "Herb-AI/Julog.jl", "max_stars_repo_head_hexsha": "490646ca15ec3dd93fb69443d003b988576b5259", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-12-28T07:15:24.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-07T02:07:12.000Z", "max_issues_repo_path": "test/unify.jl", "max_issues_repo_name": "Herb-AI/Julog.jl", "max_issues_repo_head_hexsha": "490646ca15ec3dd93fb69443d003b988576b5259", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/unify.jl", "max_forks_repo_name": "Herb-AI/Julog.jl", "max_forks_repo_head_hexsha": "490646ca15ec3dd93fb69443d003b988576b5259", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.3333333333, "max_line_length": 95, "alphanum_fraction": 0.6217303823, "num_tokens": 749}
|
"""
Copyright 2013 Steven Diamond and Xinyue Shen.
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.atoms import reshape, vec
from cvxpy.expressions.constants import Constant
def linearize(expr):
"""Returns the tangent approximation to the expression.
Gives an elementwise lower (upper) bound for convex (concave)
expressions. No guarantees for non-DCP expressions.
Returns None if cannot be linearized.
Args:
expr: An expression.
Returns:
An affine expression or None.
"""
expr = Constant.cast_to_const(expr)
if expr.is_affine():
return expr
else:
tangent = expr.value
if tangent is None:
raise ValueError(
"Cannot linearize non-affine expression with missing variable values."
)
grad_map = expr.grad
for var in expr.variables():
if grad_map[var] is None:
return None
elif var.is_matrix():
flattened = Constant(grad_map[var]).T*vec(var - var.value)
tangent = tangent + reshape(flattened, *expr.size)
else:
tangent = tangent + Constant(grad_map[var]).T*(var - var.value)
return tangent
|
{"hexsha": "3f0786fc627760047e109583953e81527e9c2991", "size": 1824, "ext": "py", "lang": "Python", "max_stars_repo_path": "cvxpy/transforms/linearize.py", "max_stars_repo_name": "quantopian/cvxpy", "max_stars_repo_head_hexsha": "7deee4d172470aa8f629dab7fead50467afa75ff", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2017-08-31T01:37:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T04:23:09.000Z", "max_issues_repo_path": "cvxpy/transforms/linearize.py", "max_issues_repo_name": "quantopian/cvxpy", "max_issues_repo_head_hexsha": "7deee4d172470aa8f629dab7fead50467afa75ff", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cvxpy/transforms/linearize.py", "max_forks_repo_name": "quantopian/cvxpy", "max_forks_repo_head_hexsha": "7deee4d172470aa8f629dab7fead50467afa75ff", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2017-02-09T19:37:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-07T00:17:54.000Z", "avg_line_length": 32.0, "max_line_length": 86, "alphanum_fraction": 0.6710526316, "include": true, "reason": "from cvxpy", "num_tokens": 389}
|
import networkx as nx
import numpy
import pytest
from nereid.src.land_surface.tasks import land_surface_loading
from nereid.tests.utils import generate_random_land_surface_request
@pytest.fixture
def watershed_graph():
g = nx.gnr_graph(n=13, p=0.0, seed=0)
nx.relabel_nodes(g, lambda x: str(x), copy=False)
return g
@pytest.fixture
def initial_node_data(contexts, watershed_graph, land_surface_permutations):
context = contexts["default"]
numpy.random.seed(42)
ls_req = generate_random_land_surface_request(
watershed_graph.nodes(), land_surface_permutations
)
ls_attrs = land_surface_loading(ls_req, details=False, context=context)["summary"]
return {dct["node_id"]: dct for dct in ls_attrs}
|
{"hexsha": "f6d6caa1befad239b44df4f155ab80f6a7295c7a", "size": 745, "ext": "py", "lang": "Python", "max_stars_repo_path": "nereid/nereid/tests/test_src/test_watershed/conftest.py", "max_stars_repo_name": "Geosyntec/nereid", "max_stars_repo_head_hexsha": "3399b616ae19dfc75f5b6ba83d598495db9b09fb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-16T22:10:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T22:10:24.000Z", "max_issues_repo_path": "nereid/nereid/tests/test_src/test_watershed/conftest.py", "max_issues_repo_name": "Geosyntec/nereid", "max_issues_repo_head_hexsha": "3399b616ae19dfc75f5b6ba83d598495db9b09fb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 99, "max_issues_repo_issues_event_min_datetime": "2019-11-18T20:06:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-01T04:01:51.000Z", "max_forks_repo_path": "nereid/nereid/tests/test_src/test_watershed/conftest.py", "max_forks_repo_name": "Geosyntec/nereid", "max_forks_repo_head_hexsha": "3399b616ae19dfc75f5b6ba83d598495db9b09fb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-28T21:06:39.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-13T23:12:17.000Z", "avg_line_length": 26.6071428571, "max_line_length": 86, "alphanum_fraction": 0.7583892617, "include": true, "reason": "import numpy,import networkx", "num_tokens": 174}
|
# Represents a graph which with different lincomb functionality:
# It can add more than two matrices. Only used in code generation.
struct MultiLincombCompgraph{T}
operations::Dict{Symbol,Symbol}
parents::Dict{Symbol,NTuple{<:Any,Symbol}}
coeffs::Dict{Symbol,NTuple{<:Any,T}}
outputs::Vector{Symbol}
end
function MultiLincombCompgraph(g::Compgraph)
T = eltype(g)
extgraph = MultiLincombCompgraph(
Dict{Symbol,Symbol}(),
Dict{Symbol,NTuple{<:Any,Symbol}}(),
Dict{Symbol,NTuple{<:Any,T}}(),
Vector{Symbol}(),
)
Z = extract_sums(g)
for k in keys(g.operations)
if (g.operations[k] == :mult)
add_mult!(extgraph, k, g.parents[k][1], g.parents[k][2])
end
if (g.operations[k] == :ldiv)
add_ldiv!(extgraph, k, g.parents[k][1], g.parents[k][2])
end
end
for s in Z
coeff_list = s[1]
symbol_list = s[2]
key = s[3][end]
p = size(coeff_list, 1)
extgraph.operations[key] = :lincomb
extgraph.coeffs[key] = NTuple{p,T}(coeff_list)
extgraph.parents[key] = NTuple{p,Symbol}(symbol_list)
end
for k in g.outputs
push!(extgraph.outputs, k)
end
return extgraph
end
"""
sums=extract_sums(graph::Compgraph{T})
`sums::Vector{Tuple{Vector{T},Vector{Symbol},Vector{Symbol}}}`
Returns a representation of sums in `graph` which may potentially be merged, in a dot-fusion.
The vector `sums` contains a tuple for each of these sums. The three entries of
the tuple are:
- a vector of `T` values that represent the coefficients of the summands;
- a vector of `Symbol`s that correspond to the summands; and
- a vector of intermediate `Symbol`s (i.e, nodes) that can be merged.
The first two vectors have the same number of entries, one for each element that
can be merged in the sum.
"""
function extract_sums(graph)
coeff, nodes, merged, sums =
find_mergeable_sums(graph, graph.outputs[1], [])
return sums
end
# Return true if `node` has multiple parents. Such nodes cannot be freed.
function has_multiple_parents(graph, node)
return sum(map(x -> any(x .== node), values(graph.parents))) > 1
end
function find_mergeable_sums(graph, node, processed, curr_coeff = 1)
# Extract sums in the subgraph of `graph` with root `node`.
#
# The function accepts four parameters:
# * `graph`: current graph.
# * `node`: current node.
# * `processed`: set of nodes the algorithm has already processed..
# * `curr_coeff`: coefficient `node` if parent is lincomb, 1 otherwise.
#
# The functions returns four vectors:
# * `pcoeffs`: coefficients of sum currently being constructed.
# * `pnodes`: corresponding nodes of sum currently being constructed.
# * `pmerged`: nodes merged in the current sum (these will disappear).
# * `sums`: completely extracted sums.
#
# The algorithm starts from the first output node, which is seen as the root
# of a spanning tree with edges defined in `graph.parents`. The graph may
# have cycles, but the vector `processed` ensures that each node is
# processed only once, the first time it is visited.
#
# If `node` is an input node, that is, a node without parents, then the
# algorithm returns four empty vectors, as 1) the subtree rooted at `node`,
# being empty, does not have extracted sums, and 2) no sum is being
# constructed.
#
# If the node is not a leaf, the function is called recursively on the
# two parents, and three cases are possible:
#
# 1) If `node` is not a `:lincomb`, then the function returns the union of
# the sums extracted in the two subgraphs rooted at the parents. If either
# parent is a `:lincomb` the sum that parent was constructing is added to
# the vector of extracted sums. The three other output vectors are empty.
#
# 2) If `node` is a `:lincomb`, is parent to only one node, and is not an
# output node, then the function merges the two sums being constructed by
# the parents, if any, adds `node` to it, and returns the data accordingly.
# The union of the vectors of extracted sums is also returned.
#
# 3) Otherwise, `node` is added to the union of the (possibly empty) sums
# being constructed by the parents. In particular, the algorithm will add
# `node` to the list of nodes to be merged, will updated the coefficients of
# the constructed sum accordingly, and will add the current sum to the
# vector of extracted sums, which will also include the union of the sum
# extracted in the subgraph rooted at the parents.
if !(node in keys(graph.operations)) || (node in processed)
# Nothing to do for leaf nodes and nodes already processed.
return Float64[], Symbol[], Symbol[], []
else
# Call function recursively on both parents.
push!(processed, node)
(parent1, parent2) = graph.parents[node]
curr_lincomb = graph.operations[node] == :lincomb
(coeff1, coeff2) = curr_lincomb ? graph.coeffs[node] : (1, 1)
pcoeffs1, pnodes1, pmerged1, sums1 =
find_mergeable_sums(graph, parent1, processed, coeff1)
pcoeffs2, pnodes2, pmerged2, sums2 =
find_mergeable_sums(graph, parent2, processed, coeff2)
if curr_lincomb
# Grow the sum by adjoining terms coming from parents, if any.
has_multiple_parents(graph, node) && (curr_coeff = 1)
new_coeffs = vcat(curr_coeff * pcoeffs1, curr_coeff * pcoeffs2)
new_nodes = vcat(pnodes1, pnodes2)
new_merged = vcat(pmerged1, pmerged2)
# Lincomb parents are added to the mergeable nodes.
# Non-lincomb parents are added to the sum and put, and their
# coefficients are added to the vector of coefficients.
if haskey(graph.operations, parent1) &&
graph.operations[parent1] == :lincomb &&
!has_multiple_parents(graph, parent1)
new_merged = vcat(new_merged, parent1)
else
new_coeffs = vcat(new_coeffs, curr_coeff * coeff1)
new_nodes = vcat(new_nodes, parent1)
end
if haskey(graph.operations, parent2) &&
graph.operations[parent2] == :lincomb &&
!has_multiple_parents(graph, parent2)
new_merged = vcat(new_merged, parent2)
else
new_coeffs = vcat(new_coeffs, curr_coeff * coeff2)
new_nodes = vcat(new_nodes, parent2)
end
if node in graph.outputs || has_multiple_parents(graph, node)
return Float64[],
Symbol[],
Symbol[],
vcat(
sums1,
sums2,
(new_coeffs, new_nodes, vcat(new_merged, node)),
)
else
return new_coeffs, new_nodes, new_merged, vcat(sums1, sums2)
end
else
# Current node is not a lincomb node.
sums = vcat(sums1, sums2)
# Add sum of lincomb parents, if any, to sums.
sums =
!isempty(pcoeffs1) ?
vcat(sums, (pcoeffs1, pnodes1, vcat(pmerged1, parent1))) : sums
sums =
!isempty(pcoeffs2) ?
vcat(sums, (pcoeffs2, pnodes2, vcat(pmerged2, parent2))) : sums
return Float64[], Symbol[], Symbol[], sums
end
end
end
|
{"hexsha": "7369096951e5fc285e66c6d9ba598087049726aa", "size": 7604, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/code_gen/multilincomb.jl", "max_stars_repo_name": "matrixfunctions/GraphMatFun.jl", "max_stars_repo_head_hexsha": "1fac14aa849e7f050ae5281bf6414b4356807199", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-07-09T07:33:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T22:57:51.000Z", "max_issues_repo_path": "src/code_gen/multilincomb.jl", "max_issues_repo_name": "matrixfunctions/GraphMatFun.jl", "max_issues_repo_head_hexsha": "1fac14aa849e7f050ae5281bf6414b4356807199", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2021-07-09T17:53:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T14:48:01.000Z", "max_forks_repo_path": "src/code_gen/multilincomb.jl", "max_forks_repo_name": "matrixfunctions/GraphMatFun.jl", "max_forks_repo_head_hexsha": "1fac14aa849e7f050ae5281bf6414b4356807199", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.4804469274, "max_line_length": 93, "alphanum_fraction": 0.6246712257, "num_tokens": 1893}
|
#!/usr/bin/env python
import ldac, getopt, sys, os, glob
def make_eazy_filter_file(filterlist):
f = open('test.RES','w')
f_info = open('test.RES.info','w')
f_translate = open('zphot.translate','w')
line_c = 1
i = 0
for filter_name in filterlist:
i += 1
f_translate.write('f_' + filter_name + ' F' + str(i) + '\n')
f_translate.write('e_' + filter_name + ' E' + str(i) + '\n')
o = open(os.environ['BPZPATH'] + '/FILTER/' + filter_name + '.res','r').readlines()
f_info.write(str(i) + ' ' + str(line_c) + ': ' + str(len(o)) + ' ' + filter_name + '\n')
line_c += (1 + len(o))
f.write(str(len(o)) + ' ' + filter_name + ' total system response (should be!)\n')
f.write(reduce(lambda x,y: x + y,[' ' + str(q[0]) + ' ' + str(q[1]) for q in zip(range(1,len(o)+1),o)]))
f.close()
f_info.close()
f_translate.close()
def run(command,to_delete=[]):
for file in to_delete:
if glob.glob(file):
os.system('rm ' + file)
print command
os.system(command)
def conditions(object,filterlist):
for filter_name in filterlist:
if object['Flag_'+filter_name + '_data'] !=0:
return 0
elif object['IMAFLAGS_ISO_'+filter_name + '_data'] !=0:
return 0
return 1
class file_iter:
def __init__(self,name):
self.name = name
self.suffix = 1
self.file = self.name + str(self.suffix)
def next(self):
self.suffix += 1
self.file = self.name + str(self.suffix)
return self.file
def __iter__(self):
self.file = self.name + str(self.suffix)
''' select a random subsample of objects to adjust training band zeropoints '''
def select_random(filterlist, fulltable,train_filters, magtype):
print len(fulltable)
''' first need to keep only galaxies with decent S/N '''
import scipy, random
#print fulltable.columns#.has_key('MAGERR_ISO-'+train_filters[1]+'_data')
''' removed this b/c HDFN has two B-band which don't always overlap -- if just training on u-band OK '''
''' hopefully have fixed this problem -- union not intersection '''
if True:
length = len(fulltable.field('SeqNr'))
randvec = scipy.array([random.random() for ww in range(length)])
has_a_good_measurement = scipy.zeros(len(fulltable),dtype=int)
for f in filterlist:
for f2 in train_filters:
if f == f2:
backup_array = fulltable.field('MAGERR_' + magtype + '-'+f2+'')[:]
mask = backup_array < 0.1
temptable = fulltable[mask]
goodnum = len(temptable)
masktot = (backup_array < 0.1) * (randvec < 10000./goodnum)
print f2, masktot.sum(), goodnum
has_a_good_measurement[masktot] = 1
fulltable = fulltable[has_a_good_measurement==1]
#length = len(fulltable.field('SeqNr'))
#randvec = scipy.array([random.random() for ww in range(length)])
#mask = randvec < (10000./length)
#fulltable = fulltable[mask]
print len(fulltable)
return fulltable
def doit(cluster,DETECT_FILTER,filterlist,inputcat,speccat,outspeccat,outfullcat,spec,varname,errvarname,magtype,train_filters=[],correction_dict={},randsample=False, magflux='MAG',quickHDFN=True,inputcat_zlist=None):
make_eazy_filter_file(filterlist,)
print filterlist,inputcat,speccat,outspeccat,outfullcat,varname,errvarname
matchedcat = "tmp_matched.cat" + cluster
scale=3631.0e-23
import os
import astropy.io.fits as pyfits
print inputcat
core = pyfits.open(inputcat)['OBJECTS'] # ???
fulltable = core.data
print len(core.data)
print spec
run_list = [['all',core,outfullcat,'',inputcat]]
print run_list
print inputcat
#spec = False
if spec:
print speccat
specfile = file_iter(speccat+'spec')
from glob import glob
if not glob(speccat):
print 'NO SPECTRA FILE'
raise Exception
os.system('rm ' + specfile.file[:-1] + '*')
os.system('cp '+ speccat +' '+specfile.file)
run("ldacrentab -i " + specfile.file + " -t OBJECTS STDTAB FIELDS NULL -o " + specfile.next(),[specfile.file])
run("ldacrenkey -i " + specfile.file + " -t STDTAB -k Ra ALPHA_J2000 Dec DELTA_J2000 Z z -o " + specfile.next(),[specfile.file])
run("ldaccalc -i " + specfile.file + " -t STDTAB -c '(Nr);' -k LONG -n SeqNr '' -o " + specfile.next(),[specfile.file] )
print specfile.file
# inputtable = ldac.openObjectFile(inputcat)
run("ldacrentab -i " + inputcat + " -t OBJECTS STDTAB -o " + inputcat+str(1),\
[inputcat+str(1)])
if os.environ['USER'] == 'dapple':
os.chdir('/a/wain001/g.ki.ki02/dapple/pipeline/wtgpipeline/')
print os.environ['USER'], os.system('pwd')
command = "./match_neighbor.sh " + matchedcat + " STDTAB " + specfile.file + " spec " + inputcat+str(1) + " data "
else:
os.chdir('/u/ki/pkelly/pipeline/wtgpipeline/')
print os.environ['USER'], os.system('pwd')
command = "/u/ki/pkelly/pipeline/wtgpipeline//match_neighbor.sh " + matchedcat + " STDTAB " + specfile.file + " spec " + inputcat+str(1) + " data "
print command
os.system('pwd')
run(command, [matchedcat])
print matchedcat, specfile.file
import astropy.io.fits as pyfits
spectable = pyfits.open(matchedcat)['STDTAB']
print "looking at "+varname+'-'+filterlist[0]+'_data'
print spectable
print matchedcat
run_list.append(['spectra',spectable,outspeccat,'_data',matchedcat])
#print pyfits.open(inputcat)['STDTAB'].columns# ???
naper=len(fulltable.field(magflux + '_' + magtype + '-'+filterlist[0]+''))
print naper
print inputcat+str(1)
eazy_write=True
bpz_write=True
bpz_cols_info = open(outspeccat + '.columns','w')
eazy_cols = ''
import scipy
#for type,table,file,appendix in [['spectra',spectable,outspeccat,'_data'],['all',fulltable,outfullcat,'']]:
for type,alltable,file,appendix,tablefile in run_list:
table = alltable.data
print file, appendix, type
print len(table)
''' select subsample '''
if randsample:
table = select_random(filterlist, table, train_filters, magtype)
#if quickHDFN:
for i in [1]: #range(naper):
prior_cols = []
bpz_cols = []
eazy_cols = []
eazy_head = ''
cols = []
colnum = 1
name = 'SeqNr'+appendix
#prior_cols.append(pyfits.Column(name='SeqNr', format='D', array=table.field('SeqNr')))
#cols.append(pyfits.Column(name=name, format='D', array=table.field(name)))
ID_COL = pyfits.Column(name=name, format='D', array=table.field(name))
#eazy_cols.append(pyfits.Column(name=name, format='D', array=table.field(name)))
eazy_head += ('# id ')
print filterlist
from glob import glob
file2 = os.environ['subdir'] + '/' + cluster + '/PHOTOMETRY/pat_slr.calib.pickle'
if glob(file2):
import pickle
f2 = open(file2,'r')
m = pickle.Unpickler(f2)
a2 = m.load()
results = a2['results']
zpcorr = results['full']
print zpcorr
def short_filter(f2):
a_short = f2.replace('+','').replace('C','')[-1]
print filt, a_short
import string
ok = True
if string.find(f2,'MEGAPRIME') != -1:
a_short = 'MP' + a_short.upper() + 'SUBARU'
elif string.find(f2,'SUBARU') != -1:
if string.find(f2,"W-S-") != -1:
a_short = 'WS' + a_short.upper() + 'SUBARU'
else:
a_short = a_short.upper() + 'JOHN'
if string.find(f2,"-1-") == -1:
ok = False
return a_short
good_filts = scipy.zeros(len(table))
cols_names = [x.name for x in alltable.columns]
import string as stringlib
coaddlist = filter(lambda x: stringlib.find(x,'COADD') != -1 and stringlib.find(x,'APER1') != -1, cols_names)
import re
coadd_filts = list(set([re.split('APER1',x)[1] for x in coaddlist]))
print coaddlist
print coadd_filts
''' need to check that these filters are also in filterlist '''
''' place filters into GROUPS based on coadd column '''
use_filts = []
for filt in coadd_filts:
coadd_filt_list = []
for filt_good in filterlist:
if stringlib.find(filt_good, filt.split('COADD')[-1][2:]) != -1:
coadd_filt_list.append(filt_good)
if coadd_filt_list:
use_filts.append(coadd_filt_list)
print use_filts, 'use_filts'
print filterlist, 'filterlist'
for filts in use_filts:
good_filt = scipy.zeros(len(table))
for filt in filts:
fluxmag_name = magflux + '_' + magtype + '-' + filt
error_name = magflux + 'ERR_' + magtype + '-' + filt
error_cat = table.field(error_name)[:]
fluxmag_cat = table.field(fluxmag_name)[:]
print len(good_filt)
if magflux == 'FLUX':
fluxmag = scipy.array(fluxmag_cat) #*scale)
error = scipy.array(error_cat) #*scale)
fluxmag[fluxmag_cat==-99]=0
error[fluxmag_cat==-99]=0
good_filt[error!=0]=1
elif magflux == 'MAG':
fluxmag = scipy.array(fluxmag_cat)
error = scipy.array(error_cat)
good_filt[(fluxmag!=-99)*(error!=99)] = 1
good_filts += good_filt
print good_filts[:1000]
print filterlist, len(filterlist)
for filt in filterlist:
if bpz_write:
colnum += 1
import string
fix = filter(lambda x:x is True, [filt==f for f in train_filters])
print fix, correction_dict.has_key(filt), len(fix), filt
if correction_dict.has_key(filt) and len(fix) > 0:
bpz_cols_info.write(filt + '\t' + str(colnum) + ',' + str(colnum+1) + '\tAB\t0.02\t' + str(correction_dict[filt]) + '\n')
#print correction_dict[filt], filt, 'fixed'
else:
#print zpcorr.keys(), short_filter(filt), filt
if False: #zpcorr.has_key(short_filter(filt)):
bpz_cols_info.write(filt + '\t' + str(colnum) + ',' + str(colnum+1) + '\tAB\t0.02\t' + str(zpcorr[short_filter(filt)]) + '\n')
else:
bpz_cols_info.write(filt + '\t' + str(colnum) + ',' + str(colnum+1) + '\tAB\t0.02\t0.0\n')
colnum += 1
print fix, len(fix) > 0
print [filt==f for f in train_filters]
print train_filters
if eazy_write:
eazy_head += ('f_' + filt + ' e_' + filt + ' ')
fluxmag_name = magflux + '_' + magtype + '-'+filt+appendix
error_name = magflux + 'ERR_' + magtype + '-'+filt+appendix
error_cat = table.field(error_name)[:]
fluxmag_cat = table.field(fluxmag_name)[:]
if magflux == 'FLUX':
#flag_name = 'Flags_' + magtype + '-'+filt+appendix
#flag_cat = table.field(flag_name)[:]
#imaflags_name = 'IMAFLAGS_ISO_' + magtype + '-'+filt+appendix
#imaflags_cat = table.field(fluxmag_name)[:]
fluxmag = scipy.array(fluxmag_cat) #*scale)
fluxmag_eazy = scipy.array(fluxmag_cat) #*scale)
error = scipy.array(error_cat) #*scale)
fluxmag[fluxmag_cat==-99]=0
fluxmag_eazy[fluxmag_cat==-99]=-100
fluxmag_eazy[fluxmag_cat==0]=-100
error[fluxmag_cat==-99]=0
''' mark bad measurements '''
#fluxmag[flag_cat!=0]=0
#error[flag_cat!=0]=0
#fluxmag[imaflags_cat!=0]=0
#error[imaflags_cat!=0]=0
#good_filt[error==0]=0
''' expand errors for training bands '''
for e in train_filters:
if string.find(filt,e) != -1 and randsample:
error[(error/fluxmag<0.15)*(error>0)*(fluxmag!=0)] = 0.15 * abs(fluxmag)
#print error
elif magflux == 'MAG':
fluxmag = scipy.array(fluxmag_cat)
fluxmag_eazy = scipy.array(fluxmag_cat) #*scale)
fluxmag_eazy[fluxmag_cat==-99]=-100
fluxmag_eazy[fluxmag_cat==0]=-100
error = scipy.array(error_cat)
''' for the COSMOS catalog '''
flag_dict = {'SUBARU-10_2-1-W-J-B':'B_mask',
'SUBARU-10_2-1-W-J-V':'V_mask',
'SUBARU-10_2-1-W-S-I+':'I_mask',
'SUBARU-10_2-1-W-S-Z+':'z_mask'}
if filt in flag_dict:
flag_cat = table.field(flag_dict[filt])[:]
fluxmag[flag_cat!=0]=-99
error[flag_cat!=0]=-99
''' expand errors for training bands '''
for e in train_filters:
if string.find(filt,e) != -1 and randsample:
error[(error<0.15)*(error>0)] = 0.15
''' remember that not all columns have a mask (i.e. R-band) '''
#good_filt[fluxmag==-99]=0
#good_filt[error==99]=0
''' BPZ: If an object is not detected in a filter, write m=99. and its error as m_lim, where m_lim is the 1-sigma detection limit.
For fluxes, write flux=0 and an error equal to the 1-sigma detection limit.
If an object is not observed in a filter, write m=-99., error=0. For fluxes, just write both flux and error as 0.
http://acs.pha.jhu.edu/~txitxo/bpzdoc.html
COSMOS CATALOG: A magnitude of -99 indicates a photometric measurement was not possible due to lack of data, a large number of bad pixels, or saturation. A magnitude of 99.0 indicates no detection. In the case of no detection the error given for the object is the 1 sigma limiting magnitude at the position of the souce.
http://irsa.ipac.caltech.edu/data/COSMOS/gator_docs/cosmos_photom_colDescriptions.html#masks
'''
from copy import copy
bpz_cols.append(pyfits.Column(name=fluxmag_name, format='D', array=copy(fluxmag)))
bpz_cols.append(pyfits.Column(name=error_name, format='D', array=copy(error)))
eazy_cols.append(pyfits.Column(name=fluxmag_name, format='D', array=copy(fluxmag_eazy)))
eazy_cols.append(pyfits.Column(name=error_name, format='D', array=copy(error)))
#good_filts += good_filt
if cluster == 'COSMOS_PHOTOZ':
filterprior = 'SUBARU-10_2-1-W-S-R+'
else:
filterprior = 'SUBARU-COADD-1-W-C-RC'
import itertools
import scipy
truth = scipy.array([x.name == magflux + '_' + magtype + '-'+filterprior for x in alltable.columns])
truth = truth[truth]
print 'truth', truth, filterprior, filterlist
if len(truth) == 0:
truth = scipy.array([x.name == magflux + '_' + magtype + '-'+filterprior for x in alltable.columns])
filterprior = 'SUBARU-10_1-1-W-C-RC'
truth = truth[truth]
if len(truth) == 0:
truth = scipy.array([x.name == magflux + '_' + magtype + '-'+filterprior for x in alltable.columns])
filterprior = 'SUBARU-10_2-1-W-C-RC'
truth = truth[truth]
if len(truth) == 0:
truth = scipy.array([x.name == magflux + '_' + magtype + '-'+filterprior for x in alltable.columns])
truth = scipy.array([x == filterprior for x in filterlist])
truth = truth[truth]
if len(truth) == 0:
filterprior = 'SUBARU-COADD-1-W-S-R+'
truth = scipy.array([x.name == magflux + '_' + magtype + '-'+filterprior for x in alltable.columns])
truth = truth[truth]
if len(truth) == 0:
filterprior = 'MEGAPRIME-COADD-1-r'
truth = scipy.array([x.name == magflux + '_' + magtype + '-'+filterprior for x in alltable.columns])
truth = truth[truth]
if len(truth) == 0:
filterprior = 'SUBARU-10_2-1-W-S-R+'
truth = scipy.array([x.name == magflux + '_' + magtype + '-'+filterprior for x in alltable.columns])
truth = truth[truth]
if len(truth) == 0:
filterprior = 'SUBARU-10_2-1-W-S-I+'
truth = scipy.array([x.name == magflux + '_' + magtype + '-'+filterprior for x in alltable.columns])
truth = truth[truth]
print truth, filterprior
filter_name = filterprior
print filter_name
#print table.field('FLUX_ISO-'+filter+appendix)[:,1], scale
#print -2.5 * scipy.log10(table.field('FLUX_ISO-'+filter+appendix)[:,1])
#print len(table.field('FLUX_ISO-'+filter+appendix)[:,1])
''' need to make a prior magnitude column that does not have only negative values?? '''
prior_array = table.field(magflux + '_' + magtype + '-'+filter_name+appendix)[:]
print len(prior_array[prior_array!=-99])
import string
if string.find(filter_name,'-1-') != -1:
backfilter = filter_name.replace('-1-','-2-')
truth = scipy.array([x == backfilter for x in filterlist])
truth = truth[truth]
if len(truth) != 0:
backup_array = table.field(magflux + '_' + magtype + '-'+backfilter+appendix)[:]
prior_array[prior_array == -99] = backup_array[prior_array == -99]
if string.find(filter_name,'-2-') != -1:
backfilter = filter_name.replace('-2-','-1-')
truth = scipy.array([x == backfilter for x in filterlist])
truth = truth[truth]
if len(truth) != 0:
backup_array = table.field(magflux + '_' + magtype + '-'+backfilter+appendix)[:]
prior_array[prior_array == -99] = backup_array[prior_array == -99]
print len(prior_array[prior_array!=-99])
print prior_array
bpz_cols = [ID_COL] + bpz_cols
eazy_cols = [ID_COL] + eazy_cols
''' Add up number of good filter measurements for each object NFILT '''
if type=='spectra':
print type
cols.append(pyfits.Column(name='PatID', format='D', array=table.field('z_spec')*0))
cols.append(pyfits.Column(name='zspec', format='D', array=table.field('z_spec')))
cols.append(pyfits.Column(name='priormag', format='D', array=prior_array) )
a = scipy.arange(1,len(table)+1)
bpz_cols.append(pyfits.Column(name='PatID', format='D', array=a))
bpz_cols.append(pyfits.Column(name='zspec', format='D', array=table.field('z_spec')))
#bpz_cols.append(pyfits.Column(name='priormag', format='D', array=table.field('FLUX_ISO-'+filter+appendix)[:,1]))
if magflux == 'FLUX':
bpz_cols.append(pyfits.Column(name='priormag', format='D', array=-2.5*scipy.log10(prior_array)) )
else:
bpz_cols.append(pyfits.Column(name='priormag', format='D', array=prior_array))
bpz_cols.append(pyfits.Column(name='NFILT', format='D', array=copy(good_filts)))
#bpz_cols.append(pyfits.Column(name='randsample', format='D', array=scipy.array([random.random() for ww in range(len(prior_array))])) )
#bpz_cols.append(pyfits.Column(name='RA', format='D', array=(table.field('ALPHA_J2000'))))
#bpz_cols.append(pyfits.Column(name='DEC', format='D', array=(table.field('DELTA_J2000'))))
prior_cols.append(pyfits.Column(name='priorflux', format='D', array=prior_array))
prior_cols.append(pyfits.Column(name='priormag', format='D', array=-2.5*scipy.log10(prior_array)) )
eazy_cols.append(pyfits.Column(name='zspec', format='D', array=table.field('z_spec')))
else:
#bpz_cols.append(pyfits.Column(name='0', format='D', array=table.field('SeqNr')*0))
a = scipy.arange(1,len(table)+1)
bpz_cols.append(pyfits.Column(name='PatID', format='D', array=a))
if inputcat_zlist:
f = open(inputcat_zlist,'r').readlines()
zs_array = scipy.array(f)
bpz_cols.append(pyfits.Column(name='zspec', format='D', array=zs_array))
else:
bpz_cols.append(pyfits.Column(name='zspec', format='D', array=table.field('SeqNr')*0))
if magflux == 'FLUX':
bpz_cols.append(pyfits.Column(name='priormag', format='D', array=-2.5*scipy.log10(prior_array)))
else:
bpz_cols.append(pyfits.Column(name='priormag', format='D', array=prior_array))
bpz_cols.append(pyfits.Column(name='NFILT', format='D', array=copy(good_filts)))
#bpz_cols.append(pyfits.Column(name='randsample', format='D', array=scipy.array([random.random() for ww in range(len(prior_array))])))
#bpz_cols.append(pyfits.Column(name='RA', format='D', array=(table.field('ALPHA_J2000'))))
#bpz_cols.append(pyfits.Column(name='DEC', format='D', array=(table.field('DELTA_J2000'))))
prior_cols.append(pyfits.Column(name='priorflux', format='D', array=prior_array))
prior_cols.append(pyfits.Column(name='priormag', format='D', array=-2.5*scipy.log10(prior_array)) )
eazy_cols.append(pyfits.Column(name='zspec', format='D', array=-1.*scipy.ones(len(table.field('SeqNr')))))
if bpz_write:
bpz_cols_info.write('ID\t' + str(1) + '\n')
colnum += 2
bpz_cols_info.write('Z_S\t' + str(colnum) + '\n')
colnum += 1
bpz_cols_info.write('M_0\t' + str(colnum) + '\n')
#colnum += 1
#bpz_cols_info.write('NFILT\t' + str(colnum) + '\n')
#colnum += 1
#bpz_cols_info.write('RANDOM\t' + str(colnum) + '\n')
#colnum += 1
#bpz_cols_info.write('RA\t' + str(colnum) + '\n')
#colnum += 1
#bpz_cols_info.write('DEC\t' + str(colnum) + '\n')
bpz_write = False
bpz_cols_info.close()
print outspeccat + '.columns'
if eazy_write:
eazy_head += 'z_spec\n# id ' + reduce(lambda x,y: x + ' ' + y, ['F' + str(g) + ' E' + str(g) for g in range(1,len(filterlist)+1)]) + ' z_spec\n'
print eazy_head
lphfile = file+'.lph'+str(i)
bpzfile = file+'.bpz'+str(i)
eazyfile = file+'.eazy'+str(i)
print eazyfile
eazyheader= file+'.eazyheader'
f = open(eazyheader,'w')
f.write(eazy_head)
f.close()
''' write out prior columns '''
f = file+'.prior'+str(i)
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
print cols
tbhu = pyfits.BinTableHDU.from_columns(prior_cols)
hdulist.append(tbhu)
hdulist[1].header['EXTNAME']='STDTAB'
outcat = '/tmp/test' #path + 'PHOTOMETRY/' + type + '.cat'
os.system('rm ' + f + '.tab')
hdulist.writeto(f + '.tab')
for c,f in [[bpz_cols,bpzfile],[eazy_cols,eazyfile]]: #[cols,lphfile],
hdu = pyfits.PrimaryHDU()
hdulist = pyfits.HDUList([hdu])
print cols
tbhu = pyfits.BinTableHDU.from_columns(c)
hdulist.append(tbhu)
hdulist[1].header['EXTNAME']='STDTAB'
outcat = '/tmp/test' #path + 'PHOTOMETRY/' + type + '.cat'
os.system('rm ' + f + '.tab')
hdulist.writeto(f + '.tab')
os.system('rm test')
print 'writing'
#pyfits.tdump(outcat,datafile='test',ext=1)
os.system('ldactoasc -b -i ' + f + '.tab -t STDTAB > ' + f)
print 'finished'
print f
print "Finished aper "+str(i)
print "Type " + type
if __name__ == "__main__":
import sys
usage = '''./make_lephare_cats.py -i,--inputcat=STRING # full calibrated cat
-s,--speccat=STRING # spectra catalog
-o,--outspeccat=STRING # spectra catalog for lph
-c,--outfullcat=STRING # full cat for lph
-f,--filterlist=STRING # filterlist
'''
try:
opts, args = getopt.getopt(sys.argv[1:],
"i:s:o:c:f:",
["inputcat=", "speccat=", "outspeccat=",\
"outfullcat=","filterlist="])
except getopt.GetoptError:
print usage
sys.exit(2)
filterlist=''
inputcat=''
speccat=''
outspeccat=''
outfullcat=''
varname=''
errvarname=''
for o, a in opts:
if o in ("-i", "--inputcat"):
inputcat = a
elif o in ("-s","--speccat"):
speccat = a
elif o in ("-o", "--outspeccat"):
outspeccat = a
elif o in ("-c", "--outfullcat"):
outfullcat = a
elif o in ("-f","--filterlist"):
print a
a=a.replace(","," ")
filterlist = a.split()
else:
print "option:", o, " unknown"
print usage
sys.exit(2)
counter = 0
for item in filterlist,inputcat,speccat,outspeccat,outfullcat:
counter = counter +1
if item == '':
print str(counter) +"unknown: need all options"
print usage
sys.exit(2)
#doit(cluster,DETECT_FILTER,filterlist,inputcat,speccat,outspeccat,outfullcat,varname,errvarname)
|
{"hexsha": "cf1b9dae35520ac4fa1f9438529e50a3cfdff042", "size": 31891, "ext": "py", "lang": "Python", "max_stars_repo_path": "make_lephare_cats.py", "max_stars_repo_name": "deapplegate/wtgpipeline", "max_stars_repo_head_hexsha": "9693e8562022cc97bf5a96427e22965e1a5e8497", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-15T04:01:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-15T04:01:19.000Z", "max_issues_repo_path": "make_lephare_cats.py", "max_issues_repo_name": "deapplegate/wtgpipeline", "max_issues_repo_head_hexsha": "9693e8562022cc97bf5a96427e22965e1a5e8497", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-12-11T00:11:39.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-09T17:05:16.000Z", "max_forks_repo_path": "make_lephare_cats.py", "max_forks_repo_name": "deapplegate/wtgpipeline", "max_forks_repo_head_hexsha": "9693e8562022cc97bf5a96427e22965e1a5e8497", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-08-15T21:19:11.000Z", "max_forks_repo_forks_event_max_datetime": "2017-10-12T00:36:35.000Z", "avg_line_length": 43.5669398907, "max_line_length": 337, "alphanum_fraction": 0.4571822771, "include": true, "reason": "import scipy,import astropy", "num_tokens": 7180}
|
# Base class for "other" kinetic (radius + momentum + time) quantities
#
import matplotlib.pyplot as plt
import numpy as np
from . OutputException import OutputException
from . ScalarQuantity import ScalarQuantity
class OtherScalarQuantity(ScalarQuantity):
def __init__(self, name, data, description, grid, output):
"""
Constructor.
"""
attr = {'description': description}
super(OtherScalarQuantity, self).__init__(name=name, data=data, grid=grid, attr=attr, output=output)
self.time = grid.t[1:]
def __repr__(self):
"""
Convert this object to an "official" string.
"""
#s = self.__str__()
return self.__str__()
def __str__(self):
"""
Convert this object to a string.
"""
return '({}) Other scalar quantity of size NT = {}'.format(self.name, self.data.shape[0])
def __getitem__(self, index):
"""
Direct access to data.
"""
return self.data[index]
|
{"hexsha": "28e4e527bec2800729808b15cdf8f96f565b2f0b", "size": 1035, "ext": "py", "lang": "Python", "max_stars_repo_path": "py/DREAM/Output/OtherScalarQuantity.py", "max_stars_repo_name": "chalmersplasmatheory/DREAM", "max_stars_repo_head_hexsha": "715637ada94f5e35db16f23c2fd49bb7401f4a27", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-09-07T11:19:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-17T17:40:19.000Z", "max_issues_repo_path": "py/DREAM/Output/OtherScalarQuantity.py", "max_issues_repo_name": "chalmersplasmatheory/DREAM", "max_issues_repo_head_hexsha": "715637ada94f5e35db16f23c2fd49bb7401f4a27", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 110, "max_issues_repo_issues_event_min_datetime": "2020-09-02T15:29:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-09T09:50:01.000Z", "max_forks_repo_path": "py/DREAM/Output/OtherScalarQuantity.py", "max_forks_repo_name": "chalmersplasmatheory/DREAM", "max_forks_repo_head_hexsha": "715637ada94f5e35db16f23c2fd49bb7401f4a27", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-05-21T13:24:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-11T14:43:12.000Z", "avg_line_length": 22.5, "max_line_length": 108, "alphanum_fraction": 0.6038647343, "include": true, "reason": "import numpy", "num_tokens": 229}
|
"""
`InitialGuessODE`: Initial guess for ordinary differential equations
### Fields
* `int`: interpolation structure
* `v`: vector field
* `Δt`: time step
* `s`: number of extrapolation stages (for initialisation)
"""
mutable struct InitialGuessODE{DT, TT, VT, IT <: Interpolator}
int::IT
v::VT
Δt::TT
s::Int
function InitialGuessODE{DT,TT,VT,IT}(interp, v, Δt) where {DT,TT,VT,IT}
new(interp, v, Δt, get_config(:ig_extrapolation_stages))
end
end
function InitialGuessODE(interp, equation::ODE{DT,TT,VT}, Δt::TT) where {DT,TT,VT}
int = interp(zero(DT), one(DT), Δt, ndims(equation))
InitialGuessODE{DT, TT, VT, interp}(int, equation.v, Δt)
end
function InitialGuessODE(interp, equation::IODE{DT,TT,ΘT,FT,GT,HT,VT}, Δt::TT) where {DT,TT,ΘT,FT,GT,HT,VT}
int = interp(zero(DT), one(DT), Δt, ndims(equation))
InitialGuessODE{DT, TT, VT, interp}(int, equation.v, Δt)
end
function InitialGuessODE(interp, equation::VODE{DT,TT,AT,FT,GT,VT}, Δt::TT) where {DT,TT,AT,FT,GT,VT}
int = interp(zero(DT), one(DT), Δt, ndims(equation))
InitialGuessODE{DT, TT, VT, interp}(int, equation.v, Δt)
end
"Initialise initial guess, i.e., given t₀, t₁, q₁ compute q₀, v₀, v₁."
function initialize!(ig::InitialGuessODE{DT,TT,VT,IT},
t₁::TT,
q₁::SolutionVector{DT},
v₁::SolutionVector{DT},
t₀::TT,
q₀::SolutionVector{DT},
v₀::SolutionVector{DT}) where {DT,TT,VT,IT}
midpoint_extrapolation(ig.v, t₁, t₀, q₁, q₀, ig.s)
ig.v(t₀, q₀, v₀)
ig.v(t₁, q₁, v₁)
end
"compute vector field of new solution"
function update!(ig::InitialGuessODE{DT,TT}, t₁::TT, q₁::SolutionVector{DT}, v₁::Vector{DT}) where {DT,TT}
ig.v(t₁, q₁, v₁)
end
function CommonFunctions.evaluate!(ig::InitialGuessODE{DT,TT},
q₀::SolutionVector{DT},
v₀::SolutionVector{DT},
q₁::SolutionVector{DT},
v₁::SolutionVector{DT},
guess::SolutionVector{DT},
c::TT=one(TT)) where {DT,TT}
if q₀ == q₁
@warn "q₀ and q₁ in initial guess are identical! Setting q=q₁."
guess .= q₁
else
evaluate!(ig.int, q₀, q₁, v₀, v₁, one(TT)+c, guess)
end
end
function CommonFunctions.evaluate!(ig::InitialGuessODE{DT,TT},
q₀::SolutionVector{DT},
v₀::SolutionVector{DT},
q₁::SolutionVector{DT},
v₁::SolutionVector{DT},
guess_q::SolutionVector{DT},
guess_v::SolutionVector{DT},
c::TT=one(TT)) where {DT,TT}
@assert length(guess_q) == length(guess_v)
if q₀ == q₁
@warn "q₀ and q₁ in initial guess are identical! Setting q=q₁ and v=0."
guess_q .= q₁
guess_v .= 0
else
evaluate!(ig.int, q₀, q₁, v₀, v₁, one(TT)+c, guess_q, guess_v)
end
end
|
{"hexsha": "5845f27430d216576993793c8b6ffbd790190b83", "size": 2933, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/integrators/initial_guess/initial_guess_ode.jl", "max_stars_repo_name": "TomaszTyranowski/GeometricIntegrators.jl", "max_stars_repo_head_hexsha": "8f514c18548754186d14ae2ef49ae956561ca529", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-02-04T11:52:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-04T11:52:47.000Z", "max_issues_repo_path": "src/integrators/initial_guess/initial_guess_ode.jl", "max_issues_repo_name": "TomaszTyranowski/GeometricIntegrators.jl", "max_issues_repo_head_hexsha": "8f514c18548754186d14ae2ef49ae956561ca529", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/integrators/initial_guess/initial_guess_ode.jl", "max_forks_repo_name": "TomaszTyranowski/GeometricIntegrators.jl", "max_forks_repo_head_hexsha": "8f514c18548754186d14ae2ef49ae956561ca529", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8804347826, "max_line_length": 107, "alphanum_fraction": 0.5946130242, "num_tokens": 945}
|
"""Utility functions for managing model paths and the hparams dict."""
import os
import pickle
import numpy as np
from behavenet.data.utils import get_data_generator_inputs
def get_subdirs(path):
"""Get all first-level subdirectories in a given path (no recursion).
Parameters
----------
path : :obj:`str`
absolute path
Returns
-------
:obj:`list`
first-level subdirectories in :obj:`path`
"""
if not os.path.exists(path):
raise ValueError('%s is not a path' % path)
try:
return next(os.walk(path))[1]
except StopIteration:
raise StopIteration('%s does not contain any subdirectories' % path)
def _get_multisession_paths(base_dir, lab='', expt='', animal=''):
"""Returns all paths in `base_dir` that start with `multi`.
The absolute paths returned are determined by `base_dir`, `lab`, `expt`, `animal`, and
`session` as follows: :obj:`base_dir/lab/expt/animal/session/sub_dir`
Use empty strings to ignore one of the session id components.
Parameters
----------
base_dir : :obj:`str`
lab : :obj:`str`, optional
expt : :obj:`str`, optional
animal : :obj:`str`, optional
Returns
-------
:obj:`list`
list of absolute paths
"""
sub_dirs = get_subdirs(os.path.join(base_dir, lab, expt, animal))
multi_paths = []
for sub_dir in sub_dirs:
if sub_dir[:5] == 'multi':
# record top-level multi-session directory
multi_paths.append(os.path.join(base_dir, lab, expt, animal, sub_dir))
return multi_paths
def _get_single_sessions(base_dir, depth, curr_depth):
"""Recursively search through non-multisession directories for all single sessions.
Parameters
----------
base_dir : :obj:`str`
depth : :obj:`int`
depth of recursion
curr_depth : :obj:`int`
current depth in recursion
Returns
-------
:obj:`list` of :obj:`dict`
session ids for all single sessions in :obj:`base_dir`
"""
session_list = []
if curr_depth < depth:
curr_depth += 1
sub_dirs = get_subdirs(base_dir)
for sub_dir in sub_dirs:
if sub_dir[:12] != 'multisession':
session_list += _get_single_sessions(
os.path.join(base_dir, sub_dir), depth=depth, curr_depth=curr_depth)
elif curr_depth == depth:
# take previous 4 directories (lab/expt/animal/session)
sess_path = base_dir.split(os.sep)
session_list = [{
'lab': sess_path[-4],
'expt': sess_path[-3],
'animal': sess_path[-2],
'session': sess_path[-1]}]
return session_list
def get_session_dir(hparams, path_type='save'):
"""Get session-level directory for saving model outputs from hparams dict.
Relies on hparams keys 'sessions_csv', 'multisession', 'lab', 'expt', 'animal' and 'session'.
The :obj:`sessions_csv` key takes precedence. The value for this key is a non-empty string of
the pattern :obj:`/path/to/session_info.csv`, where `session_info.csv` has 4 columns for lab,
expt, animal and session.
If `sessions_csv` is an empty string or the key is not in `hparams`, the following occurs:
- if :obj:`'lab' == 'all'`, an error is thrown since multiple-lab runs are not currently
supported
- if :obj:`'expt' == 'all'`, all sessions from all animals from all expts from the specified
lab are used; the session_dir will then be :obj:`save_dir/lab/multisession-xx`
- if :obj:`'animal' == 'all'`, all sessions from all animals in the specified expt are used;
the session_dir will then be :obj:`save_dir/lab/expt/multisession-xx`
- if :obj:`'session' == 'all'`, all sessions from the specified animal are used; the
session_dir will then be :obj:`save_dir/lab/expt/animal/multisession-xx`
- if none of 'lab', 'expt', 'animal' or 'session' is 'all', session_dir is
:obj:`save_dir/lab/expt/animal/session`
The :obj:`multisession-xx` directory will contain a file :obj:`session_info.csv` which will
contain information about the sessions that comprise the multisession; this file is used to
determine whether or not a new multisession directory needs to be created.
Parameters
----------
hparams : :obj:`dict`
requires `sessions_csv`, `multisession`, `lab`, `expt`, `animal` and `session`
path_type : :obj:`str`, optional
'save' to use hparams['save_dir'], 'data' to use hparams['data_dir'] as base directory;
note that using :obj:`path_type='data'` will not return multisession directories
Returns
-------
:obj:`tuple`
- session_dir (:obj:`str`)
- sessions_single (:obj:`list`)
"""
if path_type == 'save':
base_dir = hparams['save_dir']
elif path_type == 'data':
base_dir = hparams['data_dir']
else:
raise ValueError('"%s" is an invalid path_type' % path_type)
if len(hparams.get('sessions_csv', [])) > 0:
# collect all single sessions from csv
sessions_single = read_session_info_from_csv(hparams['sessions_csv'])
labs, expts, animals, sessions = [], [], [], []
for sess in sessions_single:
sess.pop('save_dir', None)
labs.append(sess['lab'])
expts.append(sess['expt'])
animals.append(sess['animal'])
sessions.append(sess['session'])
# find appropriate session directory
labs, expts, animals, sessions = \
np.array(labs), np.array(expts), np.array(animals), np.array(sessions)
lab, expt, animal, session = '', '', '', ''
if len(np.unique(sessions)) == 1:
# get single session from one animal
lab, expt, animal, session = labs[0], expts[0], animals[0], sessions[0]
session_dir_base = os.path.join(base_dir, lab, expt, animal, session)
elif len(np.unique(animals)) == 1:
# get all sessions from one animal
lab, expt, animal = labs[0], expts[0], animals[0]
session_dir_base = os.path.join(base_dir, lab, expt, animal)
elif len(np.unique(expts)) == 1:
lab, expt = labs[0], expts[0]
# get all animals from one experiment
session_dir_base = os.path.join(base_dir, lab, expt)
elif len(np.unique(labs)) == 1:
# get all experiments from one lab
lab = labs[0]
session_dir_base = os.path.join(base_dir, lab)
else:
raise NotImplementedError('multiple labs not currently supported')
# find corresponding multisession (ok if they don't exist)
multisession_paths = _get_multisession_paths(base_dir, lab=lab, expt=expt, animal=animal)
else:
# get session dirs (can include multiple sessions)
lab = hparams['lab']
if lab == 'all':
raise NotImplementedError('multiple labs not currently supported')
elif hparams['expt'] == 'all':
# get all experiments from one lab
multisession_paths = _get_multisession_paths(base_dir, lab=lab)
sessions_single = _get_single_sessions(
os.path.join(base_dir, lab), depth=3, curr_depth=0)
session_dir_base = os.path.join(base_dir, lab)
elif hparams['animal'] == 'all':
# get all animals from one experiment
expt = hparams['expt']
multisession_paths = _get_multisession_paths(base_dir, lab=lab, expt=expt)
sessions_single = _get_single_sessions(
os.path.join(base_dir, lab, expt), depth=2, curr_depth=0)
session_dir_base = os.path.join(base_dir, lab, expt)
elif hparams['session'] == 'all':
# get all sessions from one animal
expt = hparams['expt']
animal = hparams['animal']
multisession_paths = _get_multisession_paths(
base_dir, lab=lab, expt=expt, animal=animal)
sessions_single = _get_single_sessions(
os.path.join(base_dir, lab, expt, animal), depth=1, curr_depth=0)
session_dir_base = os.path.join(base_dir, lab, expt, animal)
else:
multisession_paths = []
sessions_single = [{
'lab': hparams['lab'], 'expt': hparams['expt'], 'animal': hparams['animal'],
'session': hparams['session']}]
session_dir_base = os.path.join(
base_dir, hparams['lab'], hparams['expt'], hparams['animal'], hparams['session'])
# construct session_dir
if hparams.get('multisession', None) is not None and len(hparams.get('sessions_csv', [])) == 0:
session_dir = os.path.join(session_dir_base, 'multisession-%02i' % hparams['multisession'])
# overwrite sessions_single with whatever is in requested multisession
sessions_single = read_session_info_from_csv(os.path.join(session_dir, 'session_info.csv'))
for sess in sessions_single:
sess.pop('save_dir', None)
elif len(sessions_single) > 1:
# check if this combo of experiments exists in previous multi-sessions
found_match = False
multi_idx = None
for session_multi in multisession_paths:
csv_file = os.path.join(session_multi, 'session_info.csv')
sessions_multi = read_session_info_from_csv(csv_file)
for d in sessions_multi:
# save path doesn't matter for comparison
d.pop('save_dir', None)
# compare to collection of single sessions above
set_l1 = set(tuple(sorted(d.items())) for d in sessions_single)
set_l2 = set(tuple(sorted(d.items())) for d in sessions_multi)
set_diff = set_l1.symmetric_difference(set_l2)
if len(set_diff) == 0:
# found match; record index
found_match = True
multi_idx = int(session_multi.split('-')[-1])
break
# create new multisession if match was not found
if not found_match:
multi_idxs = [
int(session_multi.split('-')[-1]) for session_multi in multisession_paths]
if len(multi_idxs) == 0:
multi_idx = 0
else:
multi_idx = max(multi_idxs) + 1
else:
pass
session_dir = os.path.join(session_dir_base, 'multisession-%02i' % multi_idx)
else:
session_dir = session_dir_base
return session_dir, sessions_single
def get_expt_dir(hparams, model_class=None, model_type=None, expt_name=None):
"""Get output directories associated with a particular model class/type/testtube expt name.
Examples
--------
* autoencoder: :obj:`session_dir/ae/conv/08_latents/expt_name`
* arhmm: :obj:`session_dir/arhmm/08_latents/16_states/0e+00_kappa/gaussian/expt_name`
* arhmm-labels: :obj:`session_dir/arhmm-labels/16_states/0e+00_kappa/gaussian/expt_name`
* neural->ae decoder: :obj:`session_dir/neural-ae/08_latents/ff/mctx/expt_name`
* neural->arhmm decoder:
:obj:`session_dir/neural-ae/08_latents/16_states/0e+00_kappa/ff/mctx/expt_name`
* bayesian decoder:
:obj:`session_dir/arhmm-decoding/08_latents/16_states/0e+00_kappa/gaussian/mctx/expt_name`
Parameters
----------
hparams : :obj:`dict`
specify model hyperparameters
model_class : :obj:`str`, optional
will search :obj:`hparams` if not present
model_type : :obj:`str`, optional
will search :obj:`hparams` if not present
expt_name : :obj:`str`, optional
will search :obj:`hparams` if not present
Returns
-------
:obj:`str`
contains data info (lab/expt/animal/session) as well as model info (e.g. n_ae_latents) and
expt_name
"""
import copy
if model_class is None:
model_class = hparams['model_class']
if model_type is None:
model_type = hparams['model_type']
if expt_name is None:
expt_name = hparams['experiment_name']
# get results dir
if model_class == 'ae':
model_path = os.path.join('ae', model_type, '%02i_latents' % hparams['n_ae_latents'])
if hparams.get('ae_multisession', None) is not None:
# using a multisession autoencoder; assumes multisessionis at animal level
# (rather than experiment level), i.e.
# - latent session dir: lab/expt/animal/multisession-xx
# - en/decoding session dir: lab/expt/animal/session
hparams_ = copy.deepcopy(hparams)
hparams_['session'] = 'all'
hparams_['multisession'] = hparams['ae_multisession']
session_dir, _ = get_session_dir(hparams_)
else:
session_dir = hparams['session_dir']
elif model_class == 'neural-ae' or model_class == 'ae-neural':
brain_region = get_region_dir(hparams)
model_path = os.path.join(
model_class, '%02i_latents' % hparams['n_ae_latents'], model_type, brain_region)
session_dir = hparams['session_dir']
elif model_class == 'neural-arhmm' or model_class == 'arhmm-neural':
brain_region = get_region_dir(hparams)
model_path = os.path.join(
model_class, '%02i_latents' % hparams['n_ae_latents'],
'%02i_states' % hparams['n_arhmm_states'],
'%.0e_kappa' % hparams['kappa'], model_type, brain_region)
session_dir = hparams['session_dir']
elif model_class == 'arhmm' or model_class == 'hmm':
model_path = os.path.join(
model_class, '%02i_latents' % hparams['n_ae_latents'],
'%02i_states' % hparams['n_arhmm_states'],
'%.0e_kappa' % hparams['kappa'], hparams['noise_type'])
if hparams.get('arhmm_multisession', None) is not None:
# using a multisession autoencoder with single session arhmm; assumes multisession
# is at animal level (rather than experiment level), i.e.
# - latent session dir: lab/expt/animal/multisession-xx
# - arhmm session dir: lab/expt/animal/session
hparams_ = copy.deepcopy(hparams)
hparams_['session'] = 'all'
hparams_['multisession'] = hparams['arhmm_multisession']
session_dir, _ = get_session_dir(hparams_)
else:
session_dir = hparams['session_dir']
elif model_class == 'arhmm-labels' or model_class == 'hmm-labels':
model_path = os.path.join(
model_class, '%02i_states' % hparams['n_arhmm_states'],
'%.0e_kappa' % hparams['kappa'], hparams['noise_type'])
if hparams.get('arhmm_multisession', None) is not None:
# using a multisession autoencoder with single session arhmm; assumes multisession
# is at animal level (rather than experiment level), i.e.
# - latent session dir: lab/expt/animal/multisession-xx
# - arhmm session dir: lab/expt/animal/session
hparams_ = copy.deepcopy(hparams)
hparams_['session'] = 'all'
hparams_['multisession'] = hparams['arhmm_multisession']
session_dir, _ = get_session_dir(hparams_)
else:
session_dir = hparams['session_dir']
elif model_class == 'bayesian-decoding':
brain_region = get_region_dir(hparams)
model_path = os.path.join(
'bayesian-decoding', '%02i_latents' % hparams['n_ae_latents'],
'%02i_states' % hparams['n_arhmm_states'],
'%.0e_kappa' % hparams['kappa'], hparams['noise_type'], brain_region)
session_dir, _ = get_session_dir(hparams)
else:
raise ValueError('"%s" is an invalid model class' % model_class)
expt_dir = os.path.join(session_dir, model_path, expt_name)
return expt_dir
def read_session_info_from_csv(session_file):
"""Read csv file that contains lab/expt/animal/session info.
Parameters
----------
session_file : :obj:`str`
/full/path/to/session_info.csv
Returns
-------
:obj:`list` of :obj:`dict`
dict for each session which contains lab/expt/animal/session
"""
import csv
sessions_multi = []
# load and parse csv file that contains single session info
with open(session_file) as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
sessions_multi.append(dict(row))
return sessions_multi
def export_session_info_to_csv(session_dir, ids_list):
"""Export list of sessions to csv file.
Parameters
----------
session_dir : :obj:`str`
absolute path for where to save :obj:`session_info.csv` file
ids_list : :obj:`list` of :obj:`dict`
dict for each session which contains lab/expt/animal/session
"""
import csv
session_file = os.path.join(session_dir, 'session_info.csv')
if not os.path.isdir(session_dir):
os.makedirs(session_dir)
with open(session_file, mode='w') as f:
session_writer = csv.DictWriter(f, fieldnames=list(ids_list[0].keys()))
session_writer.writeheader()
for ids in ids_list:
session_writer.writerow(ids)
def contains_session(session_dir, session_id):
"""Determine if session defined by `session_id` dict is in the multi-session `session_dir`.
Parameters
----------
session_dir : :obj:`str`
absolute path to multi-session directory that contains a :obj:`session_info.csv` file
session_id : :obj:`dict`
must contain keys 'lab', 'expt', 'animal' and 'session'
Returns
-------
:obj:`bool`
"""
session_ids = read_session_info_from_csv(os.path.join(session_dir, 'session_info.csv'))
contains_sess = False
for sess_id in session_ids:
sess_id.pop('save_dir', None)
if sess_id == session_id:
contains_sess = True
break
return contains_sess
def find_session_dirs(hparams):
"""Find all session dirs (single- and multi-session) that contain the session in hparams.
Parameters
----------
hparams : :obj:`dict`
must contain keys 'lab', 'expt', 'animal' and 'session'
Returns
-------
:obj:`list` of :obj:`str`
list of session directories containing session defined in :obj:`hparams`
"""
# TODO: refactor like get_session_dir?
ids = {s: hparams[s] for s in ['lab', 'expt', 'animal', 'session']}
lab = hparams['lab']
expts = get_subdirs(os.path.join(hparams['save_dir'], lab))
# need to grab all multi-sessions as well as the single session
session_dirs = [] # full paths
session_ids = [] # dict of lab/expt/animal/session
for expt in expts:
if expt[:5] == 'multi':
session_dir = os.path.join(hparams['save_dir'], lab, expt)
if contains_session(session_dir, ids):
session_dirs.append(session_dir)
session_ids.append({
'lab': lab, 'expt': 'all', 'animal': '', 'session': '',
'multisession': int(expt[-2:])})
continue
else:
animals = get_subdirs(os.path.join(
hparams['save_dir'], lab, expt))
for animal in animals:
if animal[:5] == 'multi':
session_dir = os.path.join(hparams['save_dir'], lab, expt, animal)
if contains_session(session_dir, ids):
session_dirs.append(session_dir)
session_ids.append({
'lab': lab, 'expt': expt, 'animal': 'all', 'session': '',
'multisession': int(animal[-2:])})
continue
else:
sessions = get_subdirs(os.path.join(
hparams['save_dir'], lab, expt, animal))
for session in sessions:
session_dir = os.path.join(
hparams['save_dir'], lab, expt, animal, session)
if session[:5] == 'multi':
if contains_session(session_dir, ids):
session_dirs.append(session_dir)
session_ids.append({
'lab': lab, 'expt': expt, 'animal': animal, 'session': 'all',
'multisession': int(session[-2:])})
else:
tmp_ids = {'lab': lab, 'expt': expt, 'animal': animal, 'session': session}
if tmp_ids == ids:
session_dirs.append(session_dir)
session_ids.append({
'lab': lab, 'expt': expt, 'animal': animal, 'session': session,
'multisession': None})
return session_dirs, session_ids
def experiment_exists(hparams, which_version=False):
"""Search testtube versions to find if experiment with the same hyperparameters has been fit.
Parameters
----------
hparams : :obj:`dict`
needs to contain enough information to specify a test tube experiment (model + training
parameters)
which_version : :obj:`bool`, optional
:obj:`True` to return version number
Returns
-------
variable
- :obj:`bool` if :obj:`which_version=False`
- :obj:`tuple` (:obj:`bool`, :obj:`int`) if :obj:`which_version=True`
"""
import pickle
try:
tt_versions = get_subdirs(hparams['expt_dir'])
except StopIteration:
# no versions yet
if which_version:
return False, None
else:
return False
# get model-specific params
hparams_less = get_model_params(hparams)
found_match = False
version = None
for version in tt_versions:
# load hparams
version_file = os.path.join(hparams['expt_dir'], version, 'meta_tags.pkl')
try:
with open(version_file, 'rb') as f:
hparams_ = pickle.load(f)
if all([hparams_[key] == hparams_less[key] for key in hparams_less.keys()]):
# found match - did it finish training?
if hparams_['training_completed']:
found_match = True
break
except IOError:
continue
if which_version and found_match:
return found_match, int(version.split('_')[-1])
elif which_version and not found_match:
return found_match, None
else:
return found_match
def get_model_params(hparams):
"""Returns dict containing all params considered essential for defining a model in that class.
Parameters
----------
hparams : :obj:`dict`
all relevant hparams for the given model class will be pulled from this dict
Returns
-------
:obj:`dict`
hparams dict
"""
model_class = hparams['model_class']
# start with general params
hparams_less = {
'rng_seed_data': hparams['rng_seed_data'],
'trial_splits': hparams['trial_splits'],
'train_frac': hparams['train_frac'],
'rng_seed_model': hparams['rng_seed_model'],
'model_class': hparams['model_class'],
'model_type': hparams['model_type'],
}
if model_class == 'ae' or model_class == 'vae':
hparams_less['n_ae_latents'] = hparams['n_ae_latents']
hparams_less['fit_sess_io_layers'] = hparams['fit_sess_io_layers']
hparams_less['learning_rate'] = hparams['learning_rate']
hparams_less['l2_reg'] = hparams['l2_reg']
elif model_class == 'arhmm' or model_class == 'hmm':
hparams_less['n_arhmm_lags'] = hparams['n_arhmm_lags']
hparams_less['noise_type'] = hparams['noise_type']
hparams_less['kappa'] = hparams['kappa']
hparams_less['ae_experiment_name'] = hparams['ae_experiment_name']
hparams_less['ae_version'] = hparams['ae_version']
hparams_less['ae_model_type'] = hparams['ae_model_type']
hparams_less['n_ae_latents'] = hparams['n_ae_latents']
elif model_class == 'arhmm-labels' or model_class == 'hmm-labels':
hparams_less['n_arhmm_lags'] = hparams['n_arhmm_lags']
hparams_less['noise_type'] = hparams['noise_type']
hparams_less['kappa'] = hparams['kappa']
elif model_class == 'neural-ae' or model_class == 'ae-neural':
hparams_less['ae_experiment_name'] = hparams['ae_experiment_name']
hparams_less['ae_version'] = hparams['ae_version']
hparams_less['ae_model_type'] = hparams['ae_model_type']
hparams_less['n_ae_latents'] = hparams['n_ae_latents']
elif model_class == 'neural-arhmm' or model_class == 'arhmm-neural':
hparams_less['arhmm_experiment_name'] = hparams['arhmm_experiment_name']
hparams_less['arhmm_version'] = hparams['arhmm_version']
hparams_less['n_arhmm_states'] = hparams['n_arhmm_states']
hparams_less['n_arhmm_lags'] = hparams['n_arhmm_lags']
hparams_less['noise_type'] = hparams['noise_type']
hparams_less['kappa'] = hparams['kappa']
hparams_less['ae_model_type'] = hparams['ae_model_type']
hparams_less['n_ae_latents'] = hparams['n_ae_latents']
elif model_class == 'bayesian-decoding':
raise NotImplementedError
else:
raise NotImplementedError('"%s" is not a valid model class' % model_class)
# decoder arch params
if model_class == 'neural-ae' or model_class == 'ae-neural' \
or model_class == 'neural-arhmm' or model_class == 'arhmm-neural':
hparams_less['n_lags'] = hparams['n_lags']
hparams_less['l2_reg'] = hparams['l2_reg']
hparams_less['model_type'] = hparams['model_type']
hparams_less['n_hid_layers'] = hparams['n_hid_layers']
if hparams['n_hid_layers'] != 0:
hparams_less['n_hid_units'] = hparams['n_hid_units']
hparams_less['activation'] = hparams['activation']
return hparams_less
def export_hparams(hparams, exp):
"""Export hyperparameter dictionary.
The dict is export once as a csv file (for easy human reading) and again as a pickled dict
(for easy python loading/parsing).
Parameters
----------
hparams : :obj:`dict`
hyperparameter dict to export
exp : :obj:`test_tube.Experiment` object
defines where parameters are saved
"""
import pickle
# save out as pickle
meta_file = os.path.join(hparams['expt_dir'], 'version_%i' % exp.version, 'meta_tags.pkl')
with open(meta_file, 'wb') as f:
pickle.dump(hparams, f)
# save out as csv
exp.tag(hparams)
exp.save()
def get_lab_example(hparams, lab, expt):
"""Helper function to load data-specific hyperparameters and update hparams.
These values are loaded from the json file defined by :obj:`lab` and :obj:`expt` in the
:obj:`.behavenet` user directory. See
https://behavenet.readthedocs.io/en/latest/source/installation.html#adding-a-new-dataset
for more information.
Parameters
----------
hparams : :obj:`dict`
hyperparmeter dict to update
lab : :obj:`str`
lab id
expt : :obj:`str`
expt id
"""
import json
from behavenet import _get_params_dir
params_file = os.path.join(_get_params_dir(), str('%s_%s_params.json' % (lab, expt)))
with open(params_file, 'r') as f:
dparams = json.load(f)
hparams.update(dparams)
def get_region_dir(hparams):
"""Return brain region string that combines region name and inclusion info.
If not subsampling regions, will return :obj:`'all'`
If using neural activity from *only* specified region, will return e.g. :obj:`'mctx-single'`
If using neural activity from all *but* specified region (leave-one-out), will return e.g.
:obj:`'mctx-loo'`
Parameters
----------
hparams : :obj:`dict`
must contain the key 'subsample_regions', else function assumes no subsampling
Returns
-------
:obj:`str`
region directory name
"""
if hparams.get('subsample_regions', 'none') == 'none':
region_dir = 'all'
elif hparams['subsample_regions'] == 'single':
region_dir = str('%s-single' % hparams['region'])
elif hparams['subsample_regions'] == 'loo':
region_dir = str('%s-loo' % hparams['region'])
else:
raise ValueError('"%s" is an invalid regioin sampling type' % hparams['subsample_regions'])
return region_dir
def create_tt_experiment(hparams):
"""Create test-tube experiment for logging training and storing models.
Parameters
----------
hparams : :obj:`dict`
dictionary of hyperparameters defining experiment that will be saved as a csv file
Returns
-------
:obj:`tuple`
- if experiment defined by hparams already exists, returns :obj:`(None, None, None)`
- if experiment does not exist, returns :obj:`(hparams, sess_ids, exp)`
"""
from test_tube import Experiment
# get session_dir
hparams['session_dir'], sess_ids = get_session_dir(hparams)
if not os.path.isdir(hparams['session_dir']):
os.makedirs(hparams['session_dir'])
export_session_info_to_csv(hparams['session_dir'], sess_ids)
hparams['expt_dir'] = get_expt_dir(hparams)
if not os.path.isdir(hparams['expt_dir']):
os.makedirs(hparams['expt_dir'])
# check to see if experiment already exists
if experiment_exists(hparams):
return None, None, None
exp = Experiment(
name=hparams['experiment_name'],
debug=False,
save_dir=os.path.dirname(hparams['expt_dir']))
exp.save()
hparams['version'] = exp.version
return hparams, sess_ids, exp
def build_data_generator(hparams, sess_ids, export_csv=True):
"""Helper function to build data generator from hparams dict.
Parameters
----------
hparams : :obj:`dict`
needs to contain information specifying data inputs to model
sess_ids : :obj:`list` of :obj:`dict`
each entry is a session dict with keys 'lab', 'expt', 'animal', 'session'
export_csv : :obj:`bool`, optional
export csv file containing session info (useful when fitting multi-sessions)
Returns
-------
:obj:`ConcatSessionsGenerator` object
data generator
"""
from behavenet.data.data_generator import ConcatSessionsGenerator
from behavenet.data.utils import get_data_generator_inputs
print('using data from following sessions:')
for ids in sess_ids:
print('%s' % os.path.join(
hparams['save_dir'], ids['lab'], ids['expt'], ids['animal'], ids['session']))
hparams, signals, transforms, paths = get_data_generator_inputs(hparams, sess_ids)
if hparams.get('trial_splits', None) is not None:
# assumes string of form 'train;val;test;gap'
trs = [int(tr) for tr in hparams['trial_splits'].split(';')]
trial_splits = {'train_tr': trs[0], 'val_tr': trs[1], 'test_tr': trs[2], 'gap_tr': trs[3]}
else:
trial_splits = None
print('constructing data generator...', end='')
data_generator = ConcatSessionsGenerator(
hparams['data_dir'], sess_ids,
signals_list=signals, transforms_list=transforms, paths_list=paths,
device=hparams['device'], as_numpy=hparams['as_numpy'], batch_load=hparams['batch_load'],
rng_seed=hparams['rng_seed_data'], trial_splits=trial_splits,
train_frac=hparams['train_frac'])
# csv order will reflect dataset order in data generator
if export_csv:
export_session_info_to_csv(os.path.join(
hparams['expt_dir'], str('version_%i' % hparams['version'])), sess_ids)
print('done')
print(data_generator)
return data_generator
def get_best_model_version(expt_dir, measure='val_loss', best_def='min', n_best=1):
"""Get best model version from a test tube experiment.
Parameters
----------
expt_dir : :obj:`str`
test tube experiment directory containing version_%i subdirectories
measure : :obj:`str`, optional
heading in csv file that is used to determine which model is best
best_def : :obj:`str`, optional
how :obj:`measure` should be parsed; 'min' | 'max'
n_best : :obj:`int`, optional
top `n_best` models are returned
Returns
-------
:obj:`list`
list of best models, with best first
"""
import pickle
import pandas as pd
# gather all versions
versions = get_subdirs(expt_dir)
# load csv files with model metrics (saved out from test tube)
metrics = []
for i, version in enumerate(versions):
# make sure training has been completed
meta_file = os.path.join(expt_dir, version, 'meta_tags.pkl')
if not os.path.exists(meta_file):
continue
with open(meta_file, 'rb') as f:
meta_tags = pickle.load(f)
if not meta_tags['training_completed']:
continue
# read metrics csv file
metric = pd.read_csv(os.path.join(expt_dir, version, 'metrics.csv'))
# get validation loss of best model
if best_def == 'min':
val_loss = metric[measure].min()
elif best_def == 'max':
val_loss = metric[measure].max()
metrics.append(pd.DataFrame({'loss': val_loss, 'version': version}, index=[i]))
# put everything in pandas dataframe
metrics_df = pd.concat(metrics, sort=False)
# get version with smallest loss
if n_best == 1:
if best_def == 'min':
best_versions = [metrics_df['version'][metrics_df['loss'].idxmin()]]
elif best_def == 'max':
best_versions = [metrics_df['version'][metrics_df['loss'].idxmax()]]
else:
if best_def == 'min':
best_versions = np.asarray(
metrics_df['version'][metrics_df['loss'].nsmallest(n_best, 'all').index])
elif best_def == 'max':
raise NotImplementedError
if best_versions.shape[0] != n_best:
print('More versions than specified due to same validation loss')
# convert string to integer
best_versions = [int(version.split('_')[-1]) for version in best_versions]
return best_versions
def get_best_model_and_data(hparams, Model, load_data=True, version='best', data_kwargs=None):
"""Load the best model (and data) defined by hparams out of all available test-tube versions.
Parameters
----------
hparams : :obj:`dict`
needs to contain enough information to specify both a model and the associated data
Model : :obj:`behavenet.models` object
model type
load_data : :obj:`bool`, optional
version : :obj:`str` or :obj:`int`, optional
can be 'best' to load best model
data_kwargs : :obj:`dict`, optional
additional kwargs for data generator
Returns
-------
:obj:`tuple`
- model (:obj:`behavenet.models` object)
- data generator (:obj:`ConcatSessionsGenerator` object or :obj:`NoneType`)
"""
import torch
from behavenet.data.data_generator import ConcatSessionsGenerator
# get session_dir
hparams['session_dir'], sess_ids = get_session_dir(hparams)
expt_dir = get_expt_dir(hparams)
# get best model version
if version == 'best':
best_version_int = get_best_model_version(expt_dir)[0]
best_version = str('version_{}'.format(best_version_int))
else:
if isinstance(version, str) and version[0] == 'v':
# assume we got a string of the form 'version_{%i}'
best_version = version
else:
best_version = str('version_{}'.format(version))
# get int representation as well
version_dir = os.path.join(expt_dir, best_version)
arch_file = os.path.join(version_dir, 'meta_tags.pkl')
model_file = os.path.join(version_dir, 'best_val_model.pt')
if not os.path.exists(model_file) and not os.path.exists(model_file + '.meta'):
model_file = os.path.join(version_dir, 'best_val_model.ckpt')
print('Loading model defined in %s' % arch_file)
with open(arch_file, 'rb') as f:
hparams_new = pickle.load(f)
# update paths if performing analysis on a different machine
hparams_new['data_dir'] = hparams['data_dir']
hparams_new['session_dir'] = hparams['session_dir']
hparams_new['expt_dir'] = expt_dir
hparams_new['use_output_mask'] = hparams.get('use_output_mask', False)
hparams_new['device'] = 'cpu'
# build data generator
hparams_new, signals, transforms, paths = get_data_generator_inputs(hparams_new, sess_ids)
if load_data:
# sometimes we want a single data_generator for multiple models
if data_kwargs is None:
data_kwargs = {}
data_generator = ConcatSessionsGenerator(
hparams_new['data_dir'], sess_ids,
signals_list=signals, transforms_list=transforms, paths_list=paths,
device=hparams_new['device'], as_numpy=hparams_new['as_numpy'],
batch_load=hparams_new['batch_load'], rng_seed=hparams_new['rng_seed_data'],
**data_kwargs)
else:
data_generator = None
# build models
model = Model(hparams_new)
model.version = int(best_version.split('_')[1])
model.load_state_dict(torch.load(model_file, map_location=lambda storage, loc: storage))
model.to(hparams_new['device'])
model.eval()
return model, data_generator
def _clean_tt_dir(hparams):
"""Delete all (unnecessary) subdirectories in the model directory (created test-tube)"""
import shutil
# get subdirs
version_dir = os.path.join(hparams['expt_dir'], 'version_%i' % hparams['version'])
subdirs = get_subdirs(version_dir)
for subdir in subdirs:
shutil.rmtree(os.path.join(version_dir, subdir))
def _print_hparams(hparams):
"""Pretty print hparams to console."""
import commentjson
config_files = ['data', 'compute', 'training', 'model']
for config_file in config_files:
print('\n%s CONFIG:' % config_file.upper())
config_json = commentjson.load(open(hparams['%s_config' % config_file], 'r'))
for key in config_json.keys():
print(' {}: {}'.format(key, hparams[key]))
print('')
|
{"hexsha": "9ad4383d7a1710ee29829bd83e8b114847333911", "size": 38297, "ext": "py", "lang": "Python", "max_stars_repo_path": "behavenet/fitting/utils.py", "max_stars_repo_name": "cxrodgers/behavenet", "max_stars_repo_head_hexsha": "061b0b30f5d03b9d5be0dd965d81dc37b7409070", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "behavenet/fitting/utils.py", "max_issues_repo_name": "cxrodgers/behavenet", "max_issues_repo_head_hexsha": "061b0b30f5d03b9d5be0dd965d81dc37b7409070", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "behavenet/fitting/utils.py", "max_forks_repo_name": "cxrodgers/behavenet", "max_forks_repo_head_hexsha": "061b0b30f5d03b9d5be0dd965d81dc37b7409070", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9989816701, "max_line_length": 99, "alphanum_fraction": 0.6267592762, "include": true, "reason": "import numpy", "num_tokens": 9329}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import numpy as np
from geoedfframework.utils.GeoEDFError import GeoEDFError
""" Helper module for converting nuneric values to colors
"""
def val2color(value,min_value,max_value):
try:
if (math.isnan(value)):
return '#000000'
clip_value = np.clip(value,min_value,max_value)
hue = int((clip_value-min_value)/(max_value-min_value)*255)
red = hue
green = 255 - 2*abs(hue-128)
blue = 255 - hue
except:
raise GeoEDFError('Could not convert value to a color')
return '#{0:02X}{1:02X}{2:02X}'.format(red,green,blue)
|
{"hexsha": "4fd39a7bb8b595f5ff8112ff2616d433f39b19d1", "size": 658, "ext": "py", "lang": "Python", "max_stars_repo_path": "wqpmap/GeoEDF/processor/helper/ColorHelper.py", "max_stars_repo_name": "rkalyanapurdue/processors", "max_stars_repo_head_hexsha": "e420f28b9fdf395af18389aa6f457cf8b44c0ca1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "wqpmap/GeoEDF/processor/helper/ColorHelper.py", "max_issues_repo_name": "rkalyanapurdue/processors", "max_issues_repo_head_hexsha": "e420f28b9fdf395af18389aa6f457cf8b44c0ca1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wqpmap/GeoEDF/processor/helper/ColorHelper.py", "max_forks_repo_name": "rkalyanapurdue/processors", "max_forks_repo_head_hexsha": "e420f28b9fdf395af18389aa6f457cf8b44c0ca1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.32, "max_line_length": 67, "alphanum_fraction": 0.6367781155, "include": true, "reason": "import numpy", "num_tokens": 178}
|
import numpy as np
import theano
import theano.tensor as T
import time
x = T.tensor4('x')
x = theano.shared(
np.random.rand(32, 128, 256, 256).astype(theano.config.floatX),
'x')
filters = theano.shared(
np.random.rand(256, 128, 3, 3).astype(theano.config.floatX),
'filters')
# B x 1 x 1 x T
y = theano.gpuarray.dnn.dnn_conv(
img=x,
kerns=filters,
border_mode='half',
precision='float32')
f = theano.function([], y)
y_ = f()
#f.sync_shared()
t0 = time.time()
for i in range(50):
print i
y_ = f()
# f.sync_shared()
t1 = time.time()
print t1 - t0
|
{"hexsha": "6f3e4c6271d1f534b002653e273cb8c7e09f74ed", "size": 589, "ext": "py", "lang": "Python", "max_stars_repo_path": "theano_conv.py", "max_stars_repo_name": "ReyhaneAskari/theano_experiments", "max_stars_repo_head_hexsha": "f03b57fc2347557f0761d102e7bac8e095dc7291", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-06T08:50:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T08:50:13.000Z", "max_issues_repo_path": "theano_conv.py", "max_issues_repo_name": "ReyhaneAskari/theano_experiments", "max_issues_repo_head_hexsha": "f03b57fc2347557f0761d102e7bac8e095dc7291", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "theano_conv.py", "max_forks_repo_name": "ReyhaneAskari/theano_experiments", "max_forks_repo_head_hexsha": "f03b57fc2347557f0761d102e7bac8e095dc7291", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.6333333333, "max_line_length": 67, "alphanum_fraction": 0.6332767402, "include": true, "reason": "import numpy,import theano", "num_tokens": 192}
|
/*
* TmxJ2735.hpp
*
* Created on: Apr 27, 2016
* Author: ivp
*/
#ifndef TMX_MESSAGES_TMXJ2735_HPP_
#define TMX_MESSAGES_TMXJ2735_HPP_
#include <cerrno>
#include <memory>
#include <stdexcept>
#include <stdio.h>
#include <asn_application.h>
#include <boost/any.hpp>
#include <tmx/TmxApiMessages.h>
#include <tmx/messages/J2735Exception.hpp>
#include <tmx/messages/SaeJ2735Traits.hpp>
#include <tmx/messages/routeable_message.hpp>
namespace tmx {
namespace messages {
namespace j2735 {
/*
* Return a unique key for the given message, which by default is 0. If a message can
* be identified by a unique key, then it should specialize this function.
*
* @return An integer unique identifier for the supplied J2735 message, or 0 if no key can be identified.
*/
template <typename MsgType>
int get_j2735_message_key(std::shared_ptr<typename MsgType::message_type> message) { return 0; }
} /* End namespace j2735 */
/**
* A template class for all J2735 messages. This class is a decoded version of the
* specific J2735 data type structure built with the ASN.1 compiler, but represented
* in a boost::property_tree. This type can be used in a handler if you want the
* decode version of the message, which would make the most sense.
*/
template <typename DataType>
class TmxJ2735Message: public tmx::xml_message
{
public:
/// The J2735 data type
typedef j2735::SaeJ2735Traits<DataType> traits_type;
typedef typename traits_type::message_type message_type;
typedef typename traits_type::asn_type asn_type;
typedef TmxJ2735Message<DataType> type;
/**
* @return The ASN.1 descriptor for the J2735 data type
*/
static asn_type *get_descriptor()
{
static constexpr const asn_type *descriptor = j2735::get_descriptor<traits_type>();
asn_type *ret = (asn_type *)descriptor;
if (!ret)
BOOST_THROW_EXCEPTION(J2735Exception("Null ASN descriptor type discovered for " +
battelle::attributes::type_id_name<message_type>() + "."));
return ret;
}
static constexpr const int get_default_messageId() {
return j2735::get_default_messageId<traits_type>();
}
static constexpr const char *get_messageTag() {
return j2735::get_messageTag<traits_type>();
}
static constexpr const char *get_messageType() {
return j2735::get_messageType<traits_type>();
}
static constexpr const char *MessageType = tmx::messages::api::MSGSUBTYPE_J2735_STRING;
static constexpr const char *MessageSubType = get_messageType();
/**
* Create a J2735 Message of this type, optionally using a pointer to the J2735
* data type structure. If no pointer is given, then the message is empty until
* contents can be loaded in some other manner.
* @param data Pointer to the J2735 data
*/
TmxJ2735Message(message_type *data = 0):
tmx::xml_message(),
_j2735_data(data, [](message_type *p) { j2735::j2735_destroy<traits_type>(p); } ) { }
/**
* Copy constructor
*/
TmxJ2735Message(const type& msg):
tmx::xml_message(msg), _j2735_data(msg._j2735_data) { }
/**
* Copy constructor from a different XML message
*/
TmxJ2735Message(const tmx::xml_message &msg):
tmx::xml_message(msg),
_j2735_data(NULL, [](message_type *p) { j2735::j2735_destroy<traits_type>(p); } ) { }
/**
* Copy from existing shared pointer of same type. Current ownership is still
* maintained in the existing shared pointer, but reference count is increased.
*/
TmxJ2735Message(std::shared_ptr<message_type> other):
tmx::xml_message(), _j2735_data(other) { }
/**
* Destructor
*/
virtual ~TmxJ2735Message() { }
/*
* Define assignment operator to keep compiler from creating a default which
* will copy the _j2735_data pointer which can cause exceptions
*/
type& operator=(const type &msg)
{
if (this != &msg)
{
_j2735_data.reset();
_j2735_data = msg._j2735_data;
tmx::xml_message::operator=(msg);
}
return *this;
}
message_container_type get_container() const
{
if (!_j2735_data)
return xml_message::get_container();
// Make a copy of the current container and serialize to it
message_container_type copy(xml_message::get_container());
copy.load<XML>(as_string(*_j2735_data));
return copy;
}
/**
* Returns a pointer to a filled in J2735 data structure, taken from an XML serialization of the property tree.
* @return The pointer to the structure
*/
std::shared_ptr<message_type> get_j2735_data()
{
if (!_j2735_data && !is_empty())
{
message_type *tmp = 0;
std::string myData = this->to_string();
asn_dec_rval_t rval;
rval = xer_decode(NULL, get_descriptor(), (void **)&tmp, myData.c_str(), myData.size());
if (rval.code != RC_OK)
{
std::stringstream err;
err << "Unable to decode " << MessageSubType << " from " << myData <<
"\nFailed after " << rval.consumed << " bytes.";
BOOST_THROW_EXCEPTION(J2735Exception(err.str()));
}
_j2735_data.reset(tmp, [](message_type *p) { j2735::j2735_destroy<traits_type>(p); } );
}
return _j2735_data;
}
/**
* Sets the J2735 data structure by serializing the given data pointer into the XML container,
* and then re-constructing a new copy of the data pointer. The supplied pointer is <b>not</b>
* managed by this object.
*
* @param data A pointer to a filled in J2735 data structure
*/
void set_j2735_data(const message_type *data)
{
_j2735_data.reset();
clear();
if (data)
set_contents(as_string<message_type>(*data));
}
using xml_message::flush;
int get_messageKey() {
return j2735::get_j2735_message_key<type>(get_j2735_data());
}
protected:
/**
* Populates the property tree from an XML serialization of the supplied J2735 data structure
*/
virtual void flush(message_container_type &container) const
{
if (_j2735_data)
{
std::stringstream ss;
ss << as_string(*_j2735_data);
container.load<XML>(ss);
}
}
int print_xml(FILE *stream, asn_type *descr, const void *data) const
{
return xer_fprint(stream, descr, (void *)data);
}
std::shared_ptr<message_type> _j2735_data;
private:
template <typename T = message_type>
std::string as_string(const T &data, asn_type *descr = get_descriptor()) const
{
char *buffer;
size_t bufSize;
FILE *mStream = open_memstream(&buffer, &bufSize);
if (mStream == NULL)
{
std::string errMsg(strerror(errno));
BOOST_THROW_EXCEPTION(J2735Exception("Unable to open stream in memory: " + errMsg));
}
if (print_xml(mStream, descr, &data) < 0)
BOOST_THROW_EXCEPTION(J2735Exception("Unable to stream XML contents in memory: Unknown error"));
fclose(mStream);
std::string xml(buffer, bufSize);
free(buffer);
buffer = NULL;
return xml;
}
};
} /* End namespace messages */
} /* End namespace tmx */
// Automatically include the encoded and decoded messages
#include <tmx/messages/TmxJ2735Codec.hpp>
#endif /* TMX_MESSAGES_TMXJ2735_HPP_ */
|
{"hexsha": "70b3f4076a6dfc87792ffddef7990d36d8f7a785", "size": 7099, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/tmx/TmxApi/tmx/messages/TmxJ2735.hpp", "max_stars_repo_name": "gbaumgardner/V2I-Hub", "max_stars_repo_head_hexsha": "447eb51d70059540919c72d8076809a58c807ef1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/tmx/TmxApi/tmx/messages/TmxJ2735.hpp", "max_issues_repo_name": "gbaumgardner/V2I-Hub", "max_issues_repo_head_hexsha": "447eb51d70059540919c72d8076809a58c807ef1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/tmx/TmxApi/tmx/messages/TmxJ2735.hpp", "max_forks_repo_name": "gbaumgardner/V2I-Hub", "max_forks_repo_head_hexsha": "447eb51d70059540919c72d8076809a58c807ef1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2021-04-30T21:46:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-01T19:05:34.000Z", "avg_line_length": 28.8577235772, "max_line_length": 113, "alphanum_fraction": 0.6902380617, "num_tokens": 1864}
|
import logging
from collections import OrderedDict
import numpy as np
import pandas as pd
from ceteris_paribus.utils import transform_into_Series
def individual_variable_profile(explainer, new_observation, y=None, variables=None, grid_points=101,
variable_splits=None):
"""
Calculate ceteris paribus profile
:param explainer: a model to be explained
:param new_observation: a new observation for which the profiles are calculated
:param y: y true labels for `new_observation`. If specified then will be added to ceteris paribus plots
:param variables: collection of variables selected for calculating profiles
:param grid_points: number of points for profile
:param variable_splits: dictionary of splits for variables, in most cases created with `_calculate_variable_splits()`. If None then it will be calculated based on validation data avaliable in the `explainer`.
:return: instance of CeterisParibus class
"""
variables = _get_variables(variables, explainer)
if not isinstance(new_observation, pd.core.frame.DataFrame):
new_observation = np.array(new_observation)
if new_observation.ndim == 1:
# make 1D array 2D
new_observation = new_observation.reshape((1, -1))
new_observation = pd.DataFrame(new_observation, columns=explainer.var_names)
else:
try:
new_observation.columns = explainer.var_names
except ValueError as e:
raise ValueError("Mismatched number of variables {} instead of {}".format(len(new_observation.columns),
len(explainer.var_names)))
if y is not None:
y = transform_into_Series(y)
cp_profile = CeterisParibus(explainer, new_observation, y, variables, grid_points, variable_splits)
return cp_profile
def _get_variables(variables, explainer):
"""
Get valid variables for the profile
:param variables: collection of variables
:param explainer: Explainer object
:return: collection of variables
"""
if variables:
if not set(variables).issubset(explainer.var_names):
raise ValueError('Invalid variable names')
else:
variables = explainer.var_names
return variables
def _valid_variable_splits(variable_splits, variables):
"""
Validate variable splits
"""
if set(variable_splits.keys()) == set(variables):
return True
else:
logging.warning("Variable splits are incorrect - wrong set of variables supplied. Parameter is ignored")
return False
class CeterisParibus:
def __init__(self, explainer, new_observation, y, selected_variables, grid_points, variable_splits):
"""
Creates Ceteris Paribus object
:param explainer: explainer wrapping the model
:param new_observation: DataFrame with observations for which the profiles will be calculated
:param y: pandas Series with labels for the observations
:param selected_variables: variables for which the profiles are calculated
:param grid_points: number of points in a single variable split if calculated automatically
:param variable_splits: mapping of variables into points the profile will be calculated, if None then calculate with the function `_calculate_variable_splits`
"""
self._data = explainer.data
self._predict_function = explainer.predict_fun
self._grid_points = grid_points
self._label = explainer.label
self.all_variable_names = explainer.var_names
self.new_observation = new_observation
self.selected_variables = list(selected_variables)
variable_splits = self._get_variable_splits(variable_splits)
self.profile = self._calculate_profile(variable_splits)
self.new_observation_values = self.new_observation[self.selected_variables]
self.new_observation_predictions = self._predict_function(self.new_observation)
self.new_observation_true = y
def _get_variable_splits(self, variable_splits):
"""
Helper function for calculating variable splits
"""
if variable_splits is None or not _valid_variable_splits(variable_splits, self.selected_variables):
variables_dict = self._data.to_dict(orient='series')
chosen_variables_dict = dict((var, variables_dict[var]) for var in self.selected_variables)
variable_splits = self._calculate_variable_splits(chosen_variables_dict)
return variable_splits
def _calculate_profile(self, variable_splits):
"""
Calculate DataFrame profile
"""
profiles_list = [self._single_variable_df(var_name, var_split)
for var_name, var_split in variable_splits.items()]
profile = pd.concat(profiles_list, ignore_index=True)
return profile
def _calculate_single_split(self, X_var):
"""
Calculate the split for a single variable
:param X_var: variable data - pandas Series
:return: selected subset of values for the variable
"""
if np.issubdtype(X_var.dtype, np.floating):
# grid points might be larger than the number of unique values
quantiles = np.linspace(0, 1, self._grid_points)
return np.quantile(X_var, quantiles)
else:
return np.unique(X_var)
def _calculate_variable_splits(self, chosen_variables_dict):
"""
Calculate splits for the given variables
:param chosen_variables_dict: mapping of variables into the values
:return: mapping of variables into selected subsets of values
"""
return dict(
(var, self._calculate_single_split(X_var))
for (var, X_var) in chosen_variables_dict.items()
)
def _single_variable_df(self, var_name, var_split):
"""
Calculate profiles for a given variable
:param var_name: variable name
:param var_split: split values for the variable
:return: DataFrame with profiles for a given variable
"""
return pd.concat([self._single_observation_df(observation, var_name, var_split, profile_id)
for profile_id, observation in self.new_observation.iterrows()], ignore_index=True)
def _single_observation_df(self, observation, var_name, var_split, profile_id):
"""
Calculates the single profile
:param observation: observation for which the profile is calculated
:param var_name: variable name
:param var_split: split values for the variable
:param profile_id: profile id
:return: DataFrame with the calculated profile values
"""
# grid_points and self._grid_point might differ for categorical variables
grid_points = len(var_split)
X = np.tile(observation, (grid_points, 1))
X_dict = OrderedDict(zip(self.all_variable_names, X.T))
df = pd.DataFrame.from_dict(X_dict)
df[var_name] = var_split
df['_yhat_'] = self._predict_function(df)
df['_vname_'] = np.repeat(var_name, grid_points)
df['_label_'] = self._label
df['_ids_'] = profile_id
return df
def split_by(self, column):
"""
Split cp profile data frame by values of a given column
:return: sorted mapping of values to dataframes
"""
return OrderedDict(sorted(list(self.profile.groupby(column, sort=False))))
def set_label(self, label):
self._label = label
def print_profile(self):
print('Selected variables: {}'.format(self.selected_variables))
print('Training data size: {}'.format(self._data.shape[0]))
print(self.profile)
|
{"hexsha": "61596a3fa6f7b65128bad8db7fd3f36820a333ed", "size": 7863, "ext": "py", "lang": "Python", "max_stars_repo_path": "ceteris_paribus/profiles.py", "max_stars_repo_name": "vittot/pyCeterisParibus", "max_stars_repo_head_hexsha": "efe5835574026fe6b1a6993cc08cc34e67b8e018", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 22, "max_stars_repo_stars_event_min_datetime": "2019-04-06T17:33:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-13T21:46:47.000Z", "max_issues_repo_path": "ceteris_paribus/profiles.py", "max_issues_repo_name": "vittot/pyCeterisParibus", "max_issues_repo_head_hexsha": "efe5835574026fe6b1a6993cc08cc34e67b8e018", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2018-11-27T17:50:16.000Z", "max_issues_repo_issues_event_max_datetime": "2019-04-23T17:07:43.000Z", "max_forks_repo_path": "ceteris_paribus/profiles.py", "max_forks_repo_name": "vittot/pyCeterisParibus", "max_forks_repo_head_hexsha": "efe5835574026fe6b1a6993cc08cc34e67b8e018", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2018-12-12T12:24:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-06T21:09:55.000Z", "avg_line_length": 41.167539267, "max_line_length": 212, "alphanum_fraction": 0.6811649498, "include": true, "reason": "import numpy", "num_tokens": 1604}
|
"""
oldpred.py
This file contains the functions to analyze the old OpenAPS prediction algorithms from the devicestatus.json files.
It examines entries spaced out by 5 minutes (MIN_ENTRY_SPACING_MINUTE).
The data must be in the data folder in another folder with the ID only as the title.
The data must be named devicestatus.json
Main Functions: analyze_old_pred_data(bg_df, old_pred_algorithm_array, start_test_index, end_test_index, pred_minutes, show_pred_plot, save_pred_plot, show_clarke_plot, save_clarke_plot, id_str)
MedicineX OpenAPS
2017-7-26
"""
import numpy as np
from collections import namedtuple
import math
from sklearn import metrics
import ClarkeErrorGrid
import matplotlib.pyplot as plt
#The number of minutes that each time point is spaced out in the data (e.g. data is taken every 5 minutes)
DATA_SPACING = 5
#Defines the range such that any actual BG within this range will be compared to the predBG.
#(e.g. if predBG is at 0 min and there is no actual BG at 30 min, this ACTUAL_BG_RANGE will accept an actualBG
#the time 30 - ACTUAL_BG_RANGE < x < 30 + ACTUAL_BG_RANGE, or in this case, 25 < x < 35)
ACTUAL_BG_RANGE = 5
#prediction horizon for eventualBG
EVENTUALBG_PRED_MINUTES = 30
#Function to create the new prediction array, prediction time array, curr, and number of missed
def _new_pred_array(start_index, end_index, total_len):
pred_array = np.zeros(total_len)
time_array = np.zeros(total_len)
curr = 0
miss = 0
return pred_array, time_array, curr, miss
#This function checks if the current value is nan and fixes the curr and miss values to reflect it
def _check_pred_nan(pred_array, curr, miss):
if not np.isnan(pred_array[curr]):
curr += 1
else:
miss += 1
return curr, miss
#Function to get the eventualBG and actual BG. Looks at the enacted directory before the suggested directory.
#If there is no data, then it increases the miss count by 1.
def _get_other_bg(bg_df, pred_array, pred_time_array, curr, miss, start_index, data_index, bg_str):
pred_time_array[curr] = (bg_df.iloc[data_index]['created_at'] - bg_df.iloc[start_index]['created_at']) / np.timedelta64(1, 'm')
try:
pred_array[curr] = bg_df.iloc[data_index]['openaps']['enacted'][bg_str]
curr, miss = _check_pred_nan(pred_array, curr, miss)
except:
try:
pred_array[curr] = bg_df.iloc[data_index]['openaps']['suggested'][bg_str]
curr, miss = _check_pred_nan(pred_array, curr, miss)
except:
miss += 1
return pred_array, pred_time_array, curr, miss
#Function to get the predicted bg for the IOB, COB, and aCOB predictions
#Looks at enacted directory first before looking at suggested directory.
#If there is no data, then it increases the miss count by 1.
def _get_named_pred(bg_df, pred_array, pred_time_array, curr, miss, start_index, data_index, pred_str, pred_array_index):
pred_time_array[curr] = (bg_df.iloc[data_index]['created_at'] - bg_df.iloc[start_index]['created_at']) / np.timedelta64(1, 'm')
try:
pred_array[curr] = bg_df.iloc[data_index]['openaps']['enacted']['predBGs'][pred_str][pred_array_index]
curr, miss = _check_pred_nan(pred_array, curr, miss)
except:
try:
pred_array[curr] = bg_df.iloc[data_index]['openaps']['suggested']['predBGs'][pred_str][pred_array_index]
curr, miss = _check_pred_nan(pred_array, curr, miss)
except:
miss += 1
return pred_array, pred_time_array, curr, miss
#Function to get the raw actual bg array and the old prediction arrays (eventualBG, IOB, COB, aCOB)
#directly from the dataframe given the dataframe, the start_index, end_index, and the index of the pred array (eg 30 min prediction has 6 as the index of pred array)
def _get_raw_pred_array(bg_df, start_index, end_index, pred_array_index):
total_len = start_index - end_index + 1
actual_bg_array, actual_bg_time_array, actual_curr, actual_miss = _new_pred_array(start_index, end_index, total_len) #Create new arrays
eventual_pred_array, eventual_pred_time_array, eventual_curr, eventual_miss = _new_pred_array(start_index, end_index, total_len)
iob_pred_array, iob_pred_time_array, iob_curr, iob_miss = _new_pred_array(start_index, end_index, total_len)
cob_pred_array, cob_pred_time_array, cob_curr, cob_miss = _new_pred_array(start_index, end_index, total_len)
acob_pred_array, acob_pred_time_array, acob_curr, acob_miss = _new_pred_array(start_index, end_index, total_len)
for data_index in range(start_index, end_index - 1, -1):
#Fill arrays
actual_bg_array, actual_bg_time_array, actual_curr, actual_miss= _get_other_bg(bg_df, actual_bg_array, actual_bg_time_array, actual_curr, actual_miss, start_index, data_index, 'bg')
eventual_pred_array, eventual_pred_time_array, eventual_curr, eventual_miss = _get_other_bg(bg_df, eventual_pred_array, eventual_pred_time_array,
eventual_curr, eventual_miss, start_index, data_index, 'eventualBG')
iob_pred_array, iob_pred_time_array, iob_curr, iob_miss = _get_named_pred(bg_df, iob_pred_array, iob_pred_time_array,
iob_curr, iob_miss, start_index, data_index, 'IOB', pred_array_index)
cob_pred_array, cob_pred_time_array, cob_curr, cob_miss = _get_named_pred(bg_df, cob_pred_array, cob_pred_time_array,
cob_curr, cob_miss, start_index, data_index, 'COB', pred_array_index)
acob_pred_array, acob_pred_time_array, acob_curr, acob_miss = _get_named_pred(bg_df, acob_pred_array, acob_pred_time_array,
acob_curr, acob_miss, start_index, data_index, 'aCOB', pred_array_index)
#Resize arrays to remove missed data points
actual_bg_array = np.resize(actual_bg_array, total_len - actual_miss)
actual_bg_time_array = np.resize(actual_bg_time_array, total_len - actual_miss)
eventual_pred_array = np.resize(eventual_pred_array, total_len - eventual_miss)
eventual_pred_time_array = np.resize(eventual_pred_time_array, total_len - eventual_miss)
iob_pred_array = np.resize(iob_pred_array, total_len - iob_miss)
iob_pred_time_array = np.resize(iob_pred_time_array, total_len - iob_miss)
cob_pred_array = np.resize(cob_pred_array, total_len - cob_miss)
cob_pred_time_array = np.resize(cob_pred_time_array, total_len - cob_miss)
acob_pred_array = np.resize(acob_pred_array, total_len - acob_miss)
acob_pred_time_array = np.resize(acob_pred_time_array, total_len - acob_miss)
return actual_bg_array, actual_bg_time_array, eventual_pred_array, eventual_pred_time_array, iob_pred_array, iob_pred_time_array, cob_pred_array, cob_pred_time_array, acob_pred_array, acob_pred_time_array
#Finds the nearest value in time to the given values in the given array.
#The nearest value in time must be in within the ACTUAL_BG_RANGE (for example, if ACTUAL_BG_RANGE = 5
#and the time is 30, then the nearest index must lie from 25 < x < 35 or else -1 is returned)
#If there is no nearest index at all, 01 is returned
def _find_nearest_index(array, value):
nearest_index = (np.abs(array-value)).argmin() #finds the index of the time value closest to the input value
if (int(np.abs(array[nearest_index] - value)) < ACTUAL_BG_RANGE):
#If inside the ACTUAL_BG_RANGE, then return the nearest index
return nearest_index
else:
return -1
#Given the actual_bg_array, actual_bg_time_array, pred_array, pred_time_array, and num_pred_minutes,
#this function finds the nearest actual bg value to compare to the prediction value.
#If there is one, then it adds all the values to the result arrays, which are returned as a namedtuple.
#Returns the arrays such that the predBG corresponds to the actualBG in NUM_PRED_MINUTES in the future
def _find_compare_array(actual_bg_array, actual_bg_time_array, pred_array, pred_time_array, num_pred_minutes):
array_len = len(pred_array)
result_actual_bg_array = np.zeros(array_len)
result_actual_bg_time_array = np.zeros(array_len)
result_pred_array = np.zeros(array_len)
result_pred_time_array = np.zeros(array_len)
curr = 0
miss = 0
for array_index in range(array_len):
#The time that the prediction is predicting for
future_time = int(pred_time_array[array_index]) + num_pred_minutes
nearest_index = _find_nearest_index(actual_bg_time_array, future_time)
if nearest_index == -1:
miss += 1 #No corresponding bg to prediction
else:
result_actual_bg_array[curr] = actual_bg_array[nearest_index]
result_actual_bg_time_array[curr] = actual_bg_time_array[nearest_index]
result_pred_array[curr] = pred_array[array_index]
result_pred_time_array[curr] = future_time
curr += 1 #update index
result_actual_bg_array = np.resize(result_actual_bg_array, array_len - miss) #resize arrays
result_actual_bg_time_array = np.resize(result_actual_bg_time_array, array_len - miss)
result_pred_array = np.resize(result_pred_array, array_len - miss)
result_pred_time_array = np.resize(result_pred_time_array, array_len - miss)
#Created namedtuple to hold the data
OldPredData = namedtuple('OldPredData', ['result_actual_bg_array', 'result_actual_bg_time_array', 'result_pred_array', 'result_pred_time_array'])
return OldPredData(result_actual_bg_array, result_actual_bg_time_array, result_pred_array, result_pred_time_array)
#This function takes in the bg dataframe, the start and end indices, and the number of minutes in the future
#that you want to make a prediction for AKA prediction horizon (e.g. make a prediction for 30 minutes in the future).
#It returns the namedtuple with the following attributes ['result_actual_bg_array', 'result_actual_bg_time_array', 'result_pred_array', 'result_pred_time_array']
def _get_old_pred(bg_df, start_index, end_index, num_pred_minutes):
#The number of 5 minute sections until the prediction (e.g. 30 minutes = 6 sections)
pred_array_index = num_pred_minutes / DATA_SPACING
actual_bg_array, actual_bg_time_array, eventual_pred_array, eventual_pred_time_array, iob_pred_array, iob_pred_time_array, cob_pred_array, cob_pred_time_array, acob_pred_array, acob_pred_time_array = _get_raw_pred_array(bg_df, start_index, end_index, pred_array_index)
eventual_pred_data = _find_compare_array(actual_bg_array, actual_bg_time_array, eventual_pred_array, eventual_pred_time_array, 30)
iob_pred_data = _find_compare_array(actual_bg_array, actual_bg_time_array, iob_pred_array, iob_pred_time_array, num_pred_minutes)
cob_pred_data= _find_compare_array(actual_bg_array, actual_bg_time_array, cob_pred_array, cob_pred_time_array, num_pred_minutes)
acob_pred_data = _find_compare_array(actual_bg_array, actual_bg_time_array, acob_pred_array, acob_pred_time_array, num_pred_minutes)
return eventual_pred_data, iob_pred_data, cob_pred_data, acob_pred_data
#Plots old pred data given namedtuple of old data (eventualBG, acob, cob, or iob).
#Can show or save prediction plot based on show_pred_plot or save_pred_plot, respectively.
#Same goes for the Clarke Error grid with show_clarke_plot or save_clarke_plot, respectively.
#id_str, algorithm_str, minutes_str are strings of the ID, the prediction algorithm and the number of prediction minutes used for the title.
def _plot_old_pred_data(old_pred_data, show_pred_plot, save_pred_plot, show_clarke_plot, save_clarke_plot, id_str, algorithm_str, minutes_str):
actual_bg_array = old_pred_data.result_actual_bg_array
actual_bg_time_array = old_pred_data.result_actual_bg_time_array
pred_array = old_pred_data.result_pred_array
pred_time_array = old_pred_data.result_pred_time_array
#Root mean squared error
rms = math.sqrt(metrics.mean_squared_error(actual_bg_array, pred_array))
print " Root Mean Squared Error: " + str(rms)
print " Mean Absolute Error: " + str(metrics.mean_absolute_error(actual_bg_array, pred_array))
print " R^2 Coefficient of Determination: " + str(metrics.r2_score(actual_bg_array, pred_array))
plot, zone = ClarkeErrorGrid.clarke_error_grid(actual_bg_array, pred_array, id_str + " " + algorithm_str + " " + minutes_str)
print " Percent A:{}".format(float(zone[0]) / (zone[0] + zone[1] + zone[2] + zone[3] + zone[4]))
print " Percent C, D, E:{}".format(float(zone[2] + zone[3] + zone[4])/ (zone[0] + zone[1] + zone[2] + zone[3] + zone[4]))
print " Zones are A:{}, B:{}, C:{}, D:{}, E:{}\n".format(zone[0],zone[1],zone[2],zone[3],zone[4])
if save_clarke_plot: plt.savefig(id_str + algorithm_str.replace(" ", "") + minutes_str + "clarke.png")
if show_clarke_plot: plot.show()
plt.clf()
plt.plot(actual_bg_time_array, actual_bg_array, label="Actual BG", color='black', linestyle='-')
plt.plot(pred_time_array, pred_array, label="BG Prediction", color='black', linestyle=':')
plt.title(id_str + " " + algorithm_str + " " + minutes_str + " BG Analysis")
plt.ylabel("Blood Glucose Level (mg/dl)")
plt.xlabel("Time (minutes)")
plt.legend(loc='upper left')
# SHOW/SAVE PLOT DEPENDING ON THE BOOLEAN PARAMETER
if save_pred_plot: plt.savefig(id_str + algorithm_str.replace(" ","") + minutes_str + "plot.png")
if show_pred_plot: plt.show()
#Function to analyze the old OpenAPS data
def analyze_old_pred_data(bg_df, old_pred_algorithm_array, start_test_index, end_test_index, pred_minutes, show_pred_plot, save_pred_plot, show_clarke_plot, save_clarke_plot, id_str):
"""
Function that analyzes the old OpenAPS prediction models (eventualBG, aCOB, COB, and IOB)
based on what is put in the old_pred_algorithm_array. If it is empty, nothing will be plotted.
Since all the algorithms are calculated every 5 minutes, pred_minutes must be a multiple of 5.
eventualBG is only calculated by 30 minutes, so it will always be 30 minutes.
It will save the prediction plot if save_pred_plot is True and the clarke plot if save_clarke_plot is True.
It will show the prediction plot if show_pred_plot is true and the clarke plot if show_clarke_plot is True.
Input: bg_df Pandas dataframe of all of the data from ./data/[id_str]/devicestatus.json
old_pred_algorithm_array The array of the original OpenAPS prediction algorithms that you want to receive
data from. It can contain any/none of the following: "eventualBG", "acob", "cob", "iob"
start_test_index The starting index of the testing data
end_test_index The ending index of the testing data
pred_minutes The number of minutes in the future the prediction is for (predicion horizon). Must be a multiple of 5
show_pred_plot Boolean to show the prediction plot
save_pred_plot Boolean to save the prediction plot
show_clarke_plot Boolean to show the Clarke Error Grid Plot
save_clarke_plot Boolean to save the Clarke Error Grid Plot
id_str The ID of the person as a string. Used for the title
Output: None
Usage: analyze_old_pred_data(bg_df, ['iob', 'cob'], 1500, 0, 30, True, False, True, False, "00000001")
"""
if pred_minutes % 5 != 0: raise Exception("The prediction minutes is not a multiple of 5.")
eventual_pred_data, iob_pred_data, cob_pred_data, acob_pred_data = _get_old_pred(bg_df, start_test_index, end_test_index, pred_minutes)
if 'eventualBG' in old_pred_algorithm_array and pred_minutes == EVENTUALBG_PRED_MINUTES:
print(" eventualBG")
_plot_old_pred_data(eventual_pred_data, show_pred_plot, save_pred_plot, show_clarke_plot, save_clarke_plot, id_str, "eventualBG", "Pred" + str(30))
if 'iob' in old_pred_algorithm_array:
print(" iob")
_plot_old_pred_data(iob_pred_data, show_pred_plot, save_pred_plot, show_clarke_plot, save_clarke_plot, id_str, "IOB", "Pred" + str(pred_minutes))
if 'cob' in old_pred_algorithm_array:
print(" cob")
_plot_old_pred_data(cob_pred_data, show_pred_plot, save_pred_plot, show_clarke_plot, save_clarke_plot, id_str, "COB", "Pred" + str(pred_minutes))
if 'acob' in old_pred_algorithm_array:
print(" acob")
_plot_old_pred_data(acob_pred_data, show_pred_plot, save_pred_plot, show_clarke_plot, save_clarke_plot, id_str, "aCOB", "Pred" + str(pred_minutes))
|
{"hexsha": "5610a36c9fdffb6b962cf5801885a40bc8822c2c", "size": 17073, "ext": "py", "lang": "Python", "max_stars_repo_path": "oldpred.py", "max_stars_repo_name": "medicinexlab/openAPS", "max_stars_repo_head_hexsha": "76ff91a92adcf2815f97a3cf905ce3b2b6d6dfba", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2017-07-25T23:29:39.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-02T23:36:16.000Z", "max_issues_repo_path": "oldpred.py", "max_issues_repo_name": "medicinexlab/openAPS", "max_issues_repo_head_hexsha": "76ff91a92adcf2815f97a3cf905ce3b2b6d6dfba", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "oldpred.py", "max_forks_repo_name": "medicinexlab/openAPS", "max_forks_repo_head_hexsha": "76ff91a92adcf2815f97a3cf905ce3b2b6d6dfba", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-07-25T23:29:47.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-01T21:38:45.000Z", "avg_line_length": 60.1161971831, "max_line_length": 272, "alphanum_fraction": 0.7257072571, "include": true, "reason": "import numpy", "num_tokens": 4134}
|
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
from numpy import complex, zeros, power
import multiprocessing
from PySide2.QtCore import QThread, Signal
from GridCal.Engine.basic_structures import Logger
from GridCal.Engine.Simulations.PowerFlow.power_flow_results import PowerFlowResults
from GridCal.Engine.Simulations.Stochastic.monte_carlo_results import MonteCarloResults
from GridCal.Engine.Simulations.Stochastic.monte_carlo_input import MonteCarloInput
from GridCal.Engine.Core.time_series_pf_data import TimeCircuit
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.basic_structures import CDF
from GridCal.Engine.Simulations.PowerFlow.power_flow_worker import PowerFlowOptions, single_island_pf, \
power_flow_worker_args, power_flow_post_process
from GridCal.Engine.Core.time_series_pf_data import compile_time_circuit, split_time_circuit_into_islands, BranchImpedanceMode
########################################################################################################################
# Monte Carlo classes
########################################################################################################################
def make_monte_carlo_input(numerical_input_island: TimeCircuit):
"""
Generate a monte carlo input instance
:param numerical_input_island:
:return:
"""
n = numerical_input_island.nbus
Scdf = [None] * n
Icdf = [None] * n
Ycdf = [None] * n
for i in range(n):
Scdf[i] = CDF(numerical_input_island.Sbus[i, :])
Icdf[i] = CDF(numerical_input_island.Ibus[i, :])
Ycdf[i] = CDF(numerical_input_island.Yshunt_from_devices[i, :])
return MonteCarloInput(n, Scdf, Icdf, Ycdf)
class MonteCarlo(QThread):
progress_signal = Signal(float)
progress_text = Signal(str)
done_signal = Signal()
name = 'Monte Carlo'
def __init__(self, grid: MultiCircuit, options: PowerFlowOptions, mc_tol=1e-3, batch_size=100, max_mc_iter=10000,
opf_time_series_results=None):
"""
Monte Carlo simulation constructor
:param grid: MultiGrid instance
:param options: Power flow options
:param mc_tol: monte carlo std.dev tolerance
:param batch_size: size of the batch
:param max_mc_iter: maximum monte carlo iterations in case of not reach the precission
"""
QThread.__init__(self)
self.circuit = grid
self.options = options
self.opf_time_series_results = opf_time_series_results
self.mc_tol = mc_tol
self.batch_size = batch_size
self.max_mc_iter = max_mc_iter
self.results = None
self.logger = Logger()
self.pool = None
self.returned_results = list()
self.__cancel__ = False
def get_steps(self):
"""
Get time steps list of strings
"""
p = self.results.points_number
return ['point:' + str(l) for l in range(p)]
def update_progress_mt(self, res):
"""
"""
t, _ = res
progress = (t + 1) / self.max_mc_iter * 100
self.progress_signal.emit(progress)
self.returned_results.append(res)
def run_multi_thread(self):
"""
Run the monte carlo simulation
@return: MonteCarloResults instance
"""
self.__cancel__ = False
# initialize the grid time series results
# we will append the island results with another function
# self.circuit.time_series_results = TimeSeriesResults(0, 0, [])
self.pool = multiprocessing.Pool()
it = 0
variance_sum = 0.0
std_dev_progress = 0
v_variance = 0
n = len(self.circuit.buses)
m = self.circuit.get_branch_number()
mc_results = MonteCarloResults(n, m, name='Monte Carlo')
avg_res = PowerFlowResults()
avg_res.initialize(n, m)
# compile circuits
numerical_circuit = self.circuit.compile_time_series()
# perform the topological computation
calc_inputs_dict = numerical_circuit.compute(branch_tolerance_mode=self.options.branch_impedance_tolerance_mode,
ignore_single_node_islands=self.options.ignore_single_node_islands)
mc_results.bus_types = numerical_circuit.bus_types
v_sum = zeros(n, dtype=complex)
self.progress_signal.emit(0.0)
while (std_dev_progress < 100.0) and (it < self.max_mc_iter) and not self.__cancel__:
self.progress_text.emit('Running Monte Carlo: Variance: ' + str(v_variance))
mc_results = MonteCarloResults(n, m, self.batch_size, name='Monte Carlo')
# for each partition of the profiles...
for t_key, calc_inputs in calc_inputs_dict.items():
# For every island, run the time series
for island_index, numerical_island in enumerate(calc_inputs):
# set the time series as sampled
monte_carlo_input = make_monte_carlo_input(numerical_island)
mc_time_series = monte_carlo_input(self.batch_size, use_latin_hypercube=False)
Vbus = numerical_island.Vbus
branch_rates = numerical_island.branch_rates
# short cut the indices
b_idx = numerical_island.original_bus_idx
br_idx = numerical_island.original_branch_idx
self.returned_results = list()
t = 0
while t < self.batch_size and not self.__cancel__:
Ysh, Ibus, Sbus = mc_time_series.get_at(t)
args = (t, self.options, numerical_island, Vbus, Sbus, Ibus, branch_rates)
self.pool.apply_async(power_flow_worker_args, (args,), callback=self.update_progress_mt)
# wait for all jobs to complete
self.pool.close()
self.pool.join()
# collect results
self.progress_text.emit('Collecting batch results...')
for t, res in self.returned_results:
# store circuit results at the time index 't'
mc_results.S_points[t, numerical_island.original_bus_idx] = res.Sbus
mc_results.V_points[t, numerical_island.original_bus_idx] = res.voltage
mc_results.Sbr_points[t, numerical_island.original_branch_idx] = res.Sbranch
mc_results.loading_points[t, numerical_island.original_branch_idx] = res.loading
mc_results.losses_points[t, numerical_island.original_branch_idx] = res.losses
# compile MC results
self.progress_text.emit('Compiling results...')
mc_results.compile()
# compute the island branch results
island_avg_res = numerical_island.compute_branch_results(mc_results.voltage[b_idx])
# apply the island averaged results
avg_res.apply_from_island(island_avg_res, b_idx=b_idx, br_idx=br_idx)
# Compute the Monte Carlo values
it += self.batch_size
mc_results.append_batch(mc_results)
v_sum += mc_results.get_voltage_sum()
v_avg = v_sum / it
v_variance = abs((power(mc_results.V_points - v_avg, 2.0) / (it - 1)).min())
# progress
variance_sum += v_variance
err = variance_sum / it
if err == 0:
err = 1e-200 # to avoid division by zeros
mc_results.error_series.append(err)
# emmit the progress signal
std_dev_progress = 100 * self.mc_tol / err
if std_dev_progress > 100:
std_dev_progress = 100
self.progress_signal.emit(max((std_dev_progress, it / self.max_mc_iter * 100)))
# compute the averaged branch magnitudes
mc_results.sbranch = avg_res.Sbranch
mc_results.losses = avg_res.losses
# print('V mc: ', mc_results.voltage)
# send the finnish signal
self.progress_signal.emit(0.0)
self.progress_text.emit('Done!')
self.done_signal.emit()
return mc_results
def run_single_thread(self):
"""
Run the monte carlo simulation
@return:
"""
self.__cancel__ = False
# initialize the grid time series results
# we will append the island results with another function
# self.circuit.time_series_results = TimeSeriesResults(0, 0, [])
# Sbase = self.circuit.Sbase
it = 0
variance_sum = 0.0
std_dev_progress = 0
v_variance = 0
# n = len(self.circuit.buses)
# m = self.circuit.get_branch_number()
#
# # compile circuits
# numerical_circuit = self.circuit.compile_time_series()
#
# # perform the topological computation
# calc_inputs_dict = numerical_circuit.compute(branch_tolerance_mode=self.options.branch_impedance_tolerance_mode,
# ignore_single_node_islands=self.options.ignore_single_node_islands)
#
# mc_results = MonteCarloResults(n, m, name='Monte Carlo')
# avg_res = PowerFlowResults()
# avg_res.initialize(n, m)
# compile the multi-circuit
numerical_circuit = compile_time_circuit(circuit=self.circuit,
apply_temperature=False,
branch_tolerance_mode=BranchImpedanceMode.Specified,
opf_results=self.opf_time_series_results)
# do the topological computation
calculation_inputs = split_time_circuit_into_islands(numeric_circuit=numerical_circuit,
ignore_single_node_islands=self.options.ignore_single_node_islands)
mc_results_master = MonteCarloResults(n=numerical_circuit.nbus,
m=numerical_circuit.nbr,
p=self.max_mc_iter,
bus_names=numerical_circuit.bus_names,
branch_names=numerical_circuit.branch_names,
bus_types=numerical_circuit.bus_types,
name='Monte Carlo')
avg_res = PowerFlowResults(n=numerical_circuit.nbus,
m=numerical_circuit.nbr,
n_tr=numerical_circuit.ntr,
n_hvdc=numerical_circuit.nhvdc,
bus_names=numerical_circuit.bus_names,
branch_names=numerical_circuit.branch_names,
transformer_names=numerical_circuit.tr_names,
hvdc_names=numerical_circuit.hvdc_names,
bus_types=numerical_circuit.bus_types)
n = numerical_circuit.nbus
m = numerical_circuit.nbr
v_sum = zeros(n, dtype=complex)
self.progress_signal.emit(0.0)
while (std_dev_progress < 100.0) and (it < self.max_mc_iter) and not self.__cancel__:
self.progress_text.emit('Running Monte Carlo: Variance: ' + str(v_variance))
batch_results = MonteCarloResults(n=numerical_circuit.nbus,
m=numerical_circuit.nbr,
p=self.max_mc_iter,
bus_names=numerical_circuit.bus_names,
branch_names=numerical_circuit.branch_names,
bus_types=numerical_circuit.bus_types,
name='Monte Carlo')
# For every island, run the time series
for island_index, numerical_island in enumerate(calculation_inputs):
# short cut the indices
bus_idx = numerical_island.original_bus_idx
br_idx = numerical_island.original_branch_idx
# set the time series as sampled
monte_carlo_input = make_monte_carlo_input(numerical_island)
mc_time_series = monte_carlo_input(self.batch_size, use_latin_hypercube=False)
Vbus = numerical_island.Vbus[0, :]
# run the time series
for t in range(self.batch_size):
# set the power values
Y, I, S = mc_time_series.get_at(t)
res = single_island_pf(circuit=numerical_island,
Vbus=Vbus,
Sbus=S,
Ibus=I,
branch_rates=numerical_island.branch_rates[0, :],
options=self.options,
logger=self.logger)
batch_results.S_points[t, bus_idx] = res.Sbus
batch_results.V_points[t, bus_idx] = res.voltage
batch_results.Sbr_points[t, br_idx] = res.Sbranch
batch_results.loading_points[t, br_idx] = res.loading
batch_results.losses_points[t, br_idx] = res.losses
self.progress_text.emit('Compiling results...')
batch_results.compile()
# compute the island branch results
Sbranch, Ibranch, Vbranch, loading, \
losses, flow_direction, Sbus = power_flow_post_process(numerical_island,
Sbus=batch_results.S_points.mean(axis=0)[bus_idx],
V=batch_results.V_points.mean(axis=0)[bus_idx],
branch_rates=numerical_island.branch_rates[0, :])
# apply the island averaged results
avg_res.Sbus[bus_idx] = Sbus
avg_res.voltage[bus_idx] = batch_results.voltage[bus_idx]
avg_res.Sbranch[br_idx] = Sbranch
avg_res.Ibranch[br_idx] = Ibranch
avg_res.Vbranch[br_idx] = Vbranch
avg_res.loading[br_idx] = loading
avg_res.losses[br_idx] = losses
avg_res.flow_direction[br_idx] = flow_direction
# Compute the Monte Carlo values
it += self.batch_size
mc_results_master.append_batch(batch_results)
v_sum += mc_results_master.get_voltage_sum()
v_avg = v_sum / it
v_variance = abs((power(mc_results_master.V_points - v_avg, 2.0) / (it - 1)).min())
# progress
variance_sum += v_variance
err = variance_sum / it
if err == 0:
err = 1e-200 # to avoid division by zeros
mc_results_master.error_series.append(err)
# emmit the progress signal
std_dev_progress = 100 * self.mc_tol / err
if std_dev_progress > 100:
std_dev_progress = 100
self.progress_signal.emit(max((std_dev_progress, it / self.max_mc_iter * 100)))
# compile results
mc_results_master.bus_types = numerical_circuit.bus_types
# send the finnish signal
self.progress_signal.emit(0.0)
self.progress_text.emit('Done!')
self.done_signal.emit()
return mc_results_master
def run(self):
"""
Run the monte carlo simulation
@return:
"""
# print('LHS run')
self.__cancel__ = False
if self.options.multi_thread:
self.results = self.run_multi_thread()
else:
self.results = self.run_single_thread()
# send the finnish signal
self.progress_signal.emit(0.0)
self.progress_text.emit('Done!')
self.done_signal.emit()
def cancel(self):
"""
Cancel the simulation
:return:
"""
self.__cancel__ = True
self.progress_signal.emit(0.0)
self.progress_text.emit('Cancelled')
self.done_signal.emit()
|
{"hexsha": "d1a559dbfdbea5d020fad791f9a09d9b2b41a761", "size": 17376, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/GridCal/Engine/Simulations/Stochastic/monte_carlo_driver.py", "max_stars_repo_name": "vineetjnair9/GridCal", "max_stars_repo_head_hexsha": "5b63cbae45cbe176b015e5e99164a593f450fe71", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/GridCal/Engine/Simulations/Stochastic/monte_carlo_driver.py", "max_issues_repo_name": "vineetjnair9/GridCal", "max_issues_repo_head_hexsha": "5b63cbae45cbe176b015e5e99164a593f450fe71", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/GridCal/Engine/Simulations/Stochastic/monte_carlo_driver.py", "max_forks_repo_name": "vineetjnair9/GridCal", "max_forks_repo_head_hexsha": "5b63cbae45cbe176b015e5e99164a593f450fe71", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9811320755, "max_line_length": 128, "alphanum_fraction": 0.5735497238, "include": true, "reason": "from numpy", "num_tokens": 3501}
|
#!/usr/bin/python
from __future__ import division
import copy
import numpy as np
from matplotlib import animation
from matplotlib import pyplot as plt
import constants
from utils import cart2pol, pol2cart
def animate(frame):
global positions
update_boids()
scatter.set_offsets(positions.transpose())
def init_positions(lower_limits, upper_limits):
"""
return array with positions initialised
"""
width = upper_limits - lower_limits
return lower_limits[:, np.newaxis] + np.random.rand(2, constants.BOIDS_COUNT) * width[:, np.newaxis]
def calculate_centers(list_ind_1, list_ind_2, square_distances, distance):
"""
Calculate the centers of list_ind_1 with neighborhood list_ind_2 and distance
"""
global positions
list_neighbors = np.where(square_distances < distance, 1, 0)
list_neighbors -= np.identity(list_neighbors.shape[0], dtype="int64")
list_neighbors = list_neighbors[list_ind_1, :]
list_neighbors = list_neighbors[:, list_ind_2]
# matrix n_1 * n_2
nb_neighbors = np.sum(list_neighbors, axis=1, dtype="float64")
nb_neighbors = nb_neighbors[:, np.newaxis]
# matrix n_1 * 1
ind_has_neighbors = tuple(np.where(nb_neighbors != 0)[0])
nb_neighbors[ind_has_neighbors, 0] = np.divide(np.ones((len(ind_has_neighbors)), dtype="float64"),
np.array(nb_neighbors[ind_has_neighbors, 0], dtype="float64"))
centers = (np.array(list_neighbors, dtype=float) # (n_1, n_2) @ (n_2, 1) = (n_1, 1)
@ positions[:, list_ind_2].T) * nb_neighbors # * (n_1, 1)
return copy.deepcopy(centers)
def get_ind(species):
"""
Get the list of indices for one species.
"""
assert 0 <= species <= constants.RELATIONS.shape[0], 'this species doesnt exist'
# particular cases:
# if there is only one species or it is first species
if species == 0:
return list(np.arange(0, constants.IND[0]))
# other species
else:
return list(
np.arange(np.sum(constants.IND[:species]), np.sum(constants.IND[:species]) + constants.IND[species]))
def cohesion_separation_alignment(species, square_distances, velocity_differences):
"""
Apply cohesion-separation-alignment for a species
"""
global velocities
ind_species = get_ind(species)
# cohesion:
middle = calculate_centers(ind_species, ind_species, square_distances, constants.ATTRACTION_DISTANCE[species])
# we get a n_i*2 Matrix with middles
has_neighbors = np.where(middle[:, 0] != 0, 1, 0)
has_neighbors = has_neighbors[np.newaxis, :]
# For boids that have neighbors near, we apply cohesion
direction_to_middle = (middle.T - positions[:, ind_species]) * has_neighbors
velocities[:, ind_species] += direction_to_middle * constants.MOVE_TO_MIDDLE_STRENGTH[species]
# separation
middle = calculate_centers(ind_species, ind_species, square_distances, constants.ALERT_DISTANCE[species])
has_neighbors = np.where(middle[:, 0] != 0, 1, 0)
has_neighbors = has_neighbors[np.newaxis, :]
direction_to_middle = (middle.T - positions[:, ind_species]) * has_neighbors
velocities[:, ind_species] -= direction_to_middle * constants.SEPARATION_STRENGTH[species]
# alignment
dist = square_distances[ind_species, :]
dist = dist[:, ind_species]
very_far = dist > constants.FORMATION_FLYING_DISTANCE[species]
# N * N matrix with True if the neighbor is away False else
velocity_differences_if_close = np.copy(velocity_differences[:, :, ind_species])
velocity_differences_if_close = velocity_differences_if_close[:, ind_species, :]
# matrix (2, n_i, n_i)
velocity_differences_if_close[0, :, :][very_far] = 0
velocity_differences_if_close[1, :, :][very_far] = 0
# (2,n_i) (2, n_i)
velocities[:, ind_species] -= np.mean(velocity_differences_if_close, 1) \
* constants.FORMATION_FLYING_STRENGTH[species]
def update_boids():
"""
Update the boids each frame
"""
global positions, velocities
# 2*N*N separation matrix
separations = positions[:, np.newaxis, :] - positions[:, :, np.newaxis]
# we look at the separation distances
squared_displacements = separations * separations
# N*N matrix with x^2 + y^2
square_distances = np.sum(squared_displacements, 0)
velocity_differences = velocities[:, np.newaxis, :] - velocities[:, :, np.newaxis]
for i in range(constants.RELATIONS.shape[0]):
# for species n°i, we apply cohesion, separation, alignment
ind_i = get_ind(i)
cohesion_separation_alignment(i, square_distances, velocity_differences)
flee = np.where(constants.RELATIONS[i, :] == -1)[0]
chase = np.where(constants.RELATIONS[i, :] == 1)[0]
ind_flee = []
ind_chase = []
for ind in flee:
ind_flee += get_ind(ind)
for ind in chase:
ind_chase += get_ind(ind)
# ATTRACTION_DISTANCE to replace here (temporary) by a new parameter
chase_boids(ind_i, ind_flee, square_distances, constants.FLEE_DISTANCE[i], constants.FLEE_STRENGTH[i],
chase=False)
chase_boids(ind_i, ind_chase, square_distances, constants.CHASE_DISTANCE[i], constants.CHASE_STRENGTH[i],
chase=True)
limit_vel()
positions = positions + velocities
wrap()
def chase_boids(ind_chase, ind_prey, square_distances, distance, strength, chase):
"""
boids ind_chase chase or flee boids from ind_prey
"""
global velocities
middle = calculate_centers(ind_chase, ind_prey, square_distances, distance)
# we get a n_i*2 Matrix with middles
has_neighbors = np.where(middle[:, 0] != 0, 1, 0)
has_neighbors = has_neighbors[np.newaxis, :]
# For boids that have neighbors near, we apply cohesion
direction_to_middle = (middle.T - positions[:, ind_chase]) * has_neighbors
if chase:
velocities[:, ind_chase] += direction_to_middle * strength
else:
velocities[:, ind_chase] -= direction_to_middle * strength
def limit_vel(): # cart2pol and pol2cart cost a lot, to optimize
"""
Limit the velocity
"""
global velocities
rho, phy = cart2pol(velocities[0, :], velocities[1, :])
rho = np.where(rho > constants.MAX_FORCE, constants.MAX_FORCE, rho)
rho = np.where(rho < constants.MIN_FORCE, constants.MIN_FORCE, rho)
velocities[0, :], velocities[1, :] = pol2cart(rho, phy)
def wrap():
"""
Make sure that the boids stay into the simulation
"""
global positions
# we update the x positions to stay in the simulation
pos_x = np.where(positions[0, :] < 0, positions[0, :] + constants.X_LIMIT, positions[0, :])
pos_x = np.where(pos_x > constants.X_LIMIT, pos_x - constants.X_LIMIT, pos_x)
pos_x = pos_x[np.newaxis, :]
# we update the y positions to stay in the simulation
pos_y = np.where(positions[1, :] < 0, positions[1, :] + constants.Y_LIMIT, positions[1, :])
pos_y = np.where(pos_y > constants.Y_LIMIT, pos_y - constants.Y_LIMIT, pos_y)
pos_y = pos_y[np.newaxis, :]
positions = np.concatenate((pos_x, pos_y), axis=0)
# initial positions and velocities
positions = init_positions(np.array([10, 10]), np.array([constants.X_LIMIT - 10, constants.Y_LIMIT - 10]))
velocities = init_positions(np.array([5, -50]), np.array([12, 50]))
# animation
figure = plt.figure(figsize=[35, 35])
axes = plt.axes(xlim=(0, constants.LIMITS[0]), ylim=(0, constants.LIMITS[1]))
scatter = axes.scatter(positions[0, :], positions[1, :], c=constants.LIST_COLOR,
marker='o', lw=0.5)
anim = animation.FuncAnimation(figure, animate, frames=200, interval=30)
plt.show()
|
{"hexsha": "8ebd4a3d095eb0eceb20f0fabd9f38038647fd1b", "size": 7804, "ext": "py", "lang": "Python", "max_stars_repo_path": "simple_boids.py", "max_stars_repo_name": "Dradoue/Simple_boids", "max_stars_repo_head_hexsha": "b96cdd45d32444ac2acedaac3cac4bed3922305e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "simple_boids.py", "max_issues_repo_name": "Dradoue/Simple_boids", "max_issues_repo_head_hexsha": "b96cdd45d32444ac2acedaac3cac4bed3922305e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "simple_boids.py", "max_forks_repo_name": "Dradoue/Simple_boids", "max_forks_repo_head_hexsha": "b96cdd45d32444ac2acedaac3cac4bed3922305e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2280701754, "max_line_length": 114, "alphanum_fraction": 0.6736289083, "include": true, "reason": "import numpy", "num_tokens": 1956}
|
! This Software is property of University College London. The Licensee shall reproduce a copyright notice on every
! copy of the Software (including partial copies) and on any accompanying manuals and documentation in the form
! "Copyright (c) University College London, All rights reserved". Trademark and other proprietary notices must also
! be reproduced but the Licensee has no other right to use the name, arms, trademark, logo or other designation
! of University College London.
module output_handle_module
use lattice_setup_module
use simulation_setup_module
use mechanism_setup_module
use energetics_setup_module, only: nclusters, clusterOcc, clustergraphmultipl
use kmc_simulation_handle_module
use energetics_handle_module, only: globalenergy
use lattice_handle_module
use sampling_handle_module
implicit none
contains
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
subroutine write_history_header()
implicit none
integer i
if (.not.output_snapshots) return
write(ihistory,'(' // int2str(ngasspecs+1) // '(a,2x))') &
'Gas_Species: ', (trim(gasspecsnames(i)),i=1,ngasspecs)
write(ihistory,'(' // int2str(nsurfspecs+1) // '(a,2x))') &
'Surface_Species: ', (trim(surfspecsnames(i)),i=1,nsurfspecs)
if (allocated(cellsiteneighsf)) then ! If periodic lattice has been specified save the simulation box
write(ihistory,'(a)') 'Simulation_Box: '
do i = 1,2
write(ihistory,'(2x,2(ES32.16E3,2x))') ncellsrep(i)*vcell(1,i), ncellsrep(i)*vcell(2,i)
enddo
endif
write(ihistory,'(' // int2str(nsitetypes+1) // '(a,2x))') &
'Site_Types: ', (trim(sitetypenames(i)),i=1,nsitetypes)
end subroutine write_history_header
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
subroutine save_snaphshot()
implicit none
integer i, j
if (.not.output_snapshots) return
if (snap_on_event) then
if (mod(curstep-1_8,dkeventsnap) == 0_8) then
snapshnum = snapshnum + 1_8
! *** Write sample to history file
write(ihistory,'(a,I20,1x,I20,1x,ES30.16,1x,ES30.16,1x,ES30.16)') 'configuration ', snapshnum, curstep-1_8, prevtime, temp+tramp*curtime, globalenergy
do i = 1,nsites
write(ihistory,'(4(I10,1x))') i,(latticestate(i,j), j = 1,3)
enddo
write(ihistory,'('//int2str(ngasspecs)//'(I20,1x))') (gasspecsnums(j), j = 1,ngasspecs)
endif
else
if (snap_on_logtime) then
do while (snaptime <= min(curtime,maxtime) + 2*tiny(dtsnap))
snapshnum = snapshnum + 1_8
! *** Write sample to history file
write(ihistory,'(a,I20,1x,I20,1x,ES30.16,1x,ES30.16,1x,ES30.16)') 'configuration ', snapshnum, curstep-1_8, snaptime, temp+tramp*snaptime, globalenergy
do i = 1,nsites
write(ihistory,'(4(I10,1x))') i,(latticestate(i,j), j = 1,3)
enddo
write(ihistory,'('//int2str(ngasspecs)//'(I20,1x))') (gasspecsnums(j), j = 1,ngasspecs)
snaptime = snaptime*dtsnap
enddo
else
do while (snaptime <= min(curtime,maxtime) + 2*tiny(dtsnap))
snapshnum = snapshnum + 1_8
! *** Write sample to history file
write(ihistory,'(a,I20,1x,I20,1x,ES30.16,1x,ES30.16,1x,ES30.16)') 'configuration ', snapshnum, curstep-1_8, snaptime, temp+tramp*snaptime, globalenergy
do i = 1,nsites
write(ihistory,'(4(I10,1x))') i,(latticestate(i,j), j = 1,3)
!
!write(Histwrite) i
!write(Histwrite) latticestate(i,1)
!write(Histwrite) latticestate(i,2)
!write(Histwrite) latticestate(i,3)
enddo
write(ihistory,'('//int2str(ngasspecs)//'(I20,1x))') (gasspecsnums(j), j = 1,ngasspecs)
snaptime = snaptime + dtsnap
enddo
endif
endif
return
end subroutine save_snaphshot
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
subroutine write_procstat_header()
implicit none
integer i
if (.not.output_procstat) return
! If processes statistics saving requested write the names of the elementary
! steps in the header of the corresponding file
write(iprocstat,'(' // int2str(nelemsteps+1) // '(a30,1x))') 'Overall', &
(trim(elemstepnames(i)),i=1,nelemsteps)
end subroutine write_procstat_header
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
subroutine save_procstatistics()
implicit none
integer i
if (.not.output_procstat) return
if (procstat_on_event) then
if (mod(curstep-1_8,dkeventprocstat) == 0_8) then
procstatnum = procstatnum + 1_8
! *** Write process statistics info
write(iprocstat,'(a,I20,1x,I20,1x,ES30.16)') 'configuration ', procstatnum, curstep-1_8, prevtime
write(iprocstat,'(' // int2str(nelemsteps+1) // '(ES30.16,1x))') (elemstep_avgtime(i), i = 0,nelemsteps)
write(iprocstat,'(' // int2str(nelemsteps+1) // '(I20,1x))') (elemstep_noccur(i), i = 0,nelemsteps)
endif
else
if (procstat_on_logtime) then
do while (procstattime <= min(curtime,maxtime) + 2*tiny(dtprocstat))
procstatnum = procstatnum + 1_8
! *** Write process statistics info
write(iprocstat,'(a,I20,1x,I20,1x,ES30.16)') 'configuration ', procstatnum, curstep-1_8, procstattime
write(iprocstat,'(' // int2str(nelemsteps+1) // '(ES30.16,1x))') (elemstep_avgtime(i), i = 0,nelemsteps)
write(iprocstat,'(' // int2str(nelemsteps+1) // '(I20,1x))') (elemstep_noccur(i), i = 0,nelemsteps)
procstattime = procstattime*dtprocstat
enddo
else
do while (procstattime <= min(curtime,maxtime) + 2*tiny(dtprocstat))
procstatnum = procstatnum + 1_8
! *** Write process statistics info
write(iprocstat,'(a,I20,1x,I20,1x,ES30.16)') 'configuration ', procstatnum, curstep-1_8, procstattime
write(iprocstat,'(' // int2str(nelemsteps+1) // '(ES30.16,1x))') (elemstep_avgtime(i), i = 0,nelemsteps)
write(iprocstat,'(' // int2str(nelemsteps+1) // '(I30,1x))') (elemstep_noccur(i), i = 0,nelemsteps)
procstattime = procstattime + dtprocstat
enddo
endif
endif
return
end subroutine save_procstatistics
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
subroutine write_specnums_header()
implicit none
integer i
if (.not.output_specnum) return
! If species number reporting requested write the names of the surface and
! gas species in the header of the corresponding file
write(ispecnum,'(a20,1x,a20,1x,a30,1x,a30,1x,a30,' // trim(int2str(nsurfspecs+ngasspecs)) // '(a20,1x))') &
'Entry','Nevents','Time','Temperature','Energy',(trim(surfspecsnames(i)),i=1,nsurfspecs), &
(trim(gasspecsnames(i)),i=1,ngasspecs)
end subroutine write_specnums_header
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
subroutine save_specnums(mproc)
use state_setup_module, only: nadsorb
implicit none
integer i, mproc, i2
if (.not.output_specnum) return
if (specnum_on_event) then
if (specnum_on_eleventoccur) then
if (proctypesites(mproc,0) == ieleventoccur) then
specnumnum = specnumnum + 1_8
! *** Write species numbers info
write(ispecnum,'(I20,1x,I20,1x,ES30.16,1x,ES30.16,1x,ES30.16,' // trim(int2str(nsurfspecs+ngasspecs)) // '(I20,1x))') &
specnumnum, curstep-1_8, prevtime,temp+tramp*prevtime,globalenergy, &
(sum(adsorbspecposi(1:nadsorb,0),mask = adsorbspecposi(1:nadsorb,0) == i)/i,i=1,nsurfspecs), &
(gasspecsnums(i),i=1,ngasspecs)
! Extra output files
write(Propfnum,'(ES30.16,1x)', advance='no') (propvec(i), i=1, nSAparams)
write(Propfnum,*) ' '
write(PropCountfnum,'(ES30.16,1x)', advance='no') (propCountvec(i) , i=1, nSAparams) ! Include truncation term ! Write process statistics info
write(PropCountfnum,*) ' '
write(SAfnum,'(ES30.16,1x)', advance='no') (elemstep_noccur(i) - propCountvec(i) , i=1, nSAparams) ! Record W for sensitivity analysis, Include truncation term
write(SAfnum,*) ' '
write(IntegSpecfnum,'(ES30.16,1x)', advance='no') (spec_cum(i) , i=1, nsurfspecs)
write(IntegSpecfnum,*) ' '
endif
else
if (mod(curstep-1_8,dkeventspecnum) == 0_8) then
specnumnum = specnumnum + 1_8
! *** Write species numbers info
write(ispecnum,'(I20,1x,I20,1x,ES30.16,1x,ES30.16,1x,ES30.16,' // trim(int2str(nsurfspecs+ngasspecs)) // '(I20,1x))') &
specnumnum, curstep-1_8, prevtime,temp+tramp*prevtime,globalenergy, &
(sum(adsorbspecposi(1:nadsorb,0),mask = adsorbspecposi(1:nadsorb,0) == i)/i,i=1,nsurfspecs), &
(gasspecsnums(i),i=1,ngasspecs)
! Extra output files
write(Propfnum,'(ES30.16,1x)', advance='no') (propvec(i), i=1, nSAparams)
write(Propfnum,*) ' '
write(PropCountfnum,'(ES30.16,1x)', advance='no') (propCountvec(i) , i=1, nSAparams) ! Include truncation term ! Write process statistics info
write(PropCountfnum,*) ' '
write(SAfnum,'(ES30.16,1x)', advance='no') (elemstep_noccur(i) - propCountvec(i) , i=1, nSAparams) ! Record W for sensitivity analysis, Include truncation term
write(SAfnum,*) ' '
write(IntegSpecfnum,'(ES30.16,1x)', advance='no') (spec_cum(i) , i=1, nsurfspecs)
write(IntegSpecfnum,*) ' '
endif
endif
else
if (specnum_on_logtime) then
do while (specnumtime <= min(curtime,maxtime) + 2*tiny(dtspecnum))
specnumnum = specnumnum + 1_8
! *** Write species numbers info
write(ispecnum,'(I20,1x,I20,1x,ES30.16,1x,ES30.16,1x,ES30.16,' // trim(int2str(nsurfspecs+ngasspecs)) // '(I20,1x))') &
specnumnum, curstep-1_8, specnumtime,temp+tramp*specnumtime,globalenergy, &
(sum(adsorbspecposi(1:nadsorb,0),mask = adsorbspecposi(1:nadsorb,0) == i)/i,i=1,nsurfspecs), &
(gasspecsnums(i),i=1,ngasspecs)
! Extra output files
write(Propfnum,'(ES30.16,1x)', advance='no') (propvec(i), i=1, nSAparams)
write(Propfnum,*) ' '
write(PropCountfnum,'(ES30.16,1x)', advance='no') (propCountvec(i) + propvec(i) * (specnumtime - prevtime), i=1, nSAparams) ! Include truncation term ! Write process statistics info
write(PropCountfnum,*) ' '
write(SAfnum,'(ES30.16,1x)', advance='no') (elemstep_noccur(i) - ( propCountvec(i) + propvec(i) * (specnumtime - prevtime) ), i=1, nSAparams) ! Record W for sensitivity analysis, Include truncation term
write(SAfnum,*) ' '
write(IntegSpecfnum,'(ES30.16,1x)', advance='no') (spec_cum(i) + ( sum(adsorbspecposi(1:nadsorb,0),mask = adsorbspecposi(1:nadsorb,0) == i)/i ) * (specnumtime - prevtime), i=1, nsurfspecs)
write(IntegSpecfnum,*) ' '
specnumtime = specnumtime * dtspecnum
enddo
else
do while (specnumtime <= min(curtime,maxtime) + 2*tiny(dtspecnum))
specnumnum = specnumnum + 1_8
! *** Write species numbers info
write(ispecnum,'(I20,1x,I20,1x,ES30.16,1x,ES30.16,1x,ES30.16,' // trim(int2str(nsurfspecs+ngasspecs)) // '(I20,1x))') &
specnumnum, curstep-1_8, specnumtime,temp+tramp*specnumtime,globalenergy, &
(sum(adsorbspecposi(1:nadsorb,0),mask = adsorbspecposi(1:nadsorb,0) == i)/i,i=1,nsurfspecs), &
(gasspecsnums(i),i=1,ngasspecs)
! Extra output files
write(Propfnum,'(ES30.16,1x)', advance='no') (propvec(i), i=1, nSAparams)
write(Propfnum,*) ' '
write(PropCountfnum,'(ES30.16,1x)', advance='no') (propCountvec(i) + propvec(i) * (specnumtime - prevtime), i=1, nSAparams) ! Include truncation term ! Write process statistics info
write(PropCountfnum,*) ' '
write(SAfnum,'(ES30.16,1x)', advance='no') (elemstep_noccur(i) - ( propCountvec(i) + propvec(i) * (specnumtime - prevtime) ), i=1, nSAparams) ! Record W for sensitivity analysis, Include truncation term
write(SAfnum,*) ' '
write(IntegSpecfnum,'(ES30.16,1x)', advance='no') (spec_cum(i) + ( sum(adsorbspecposi(1:nadsorb,0),mask = adsorbspecposi(1:nadsorb,0) == i)/i ) * (specnumtime - prevtime), i=1, nsurfspecs)
write(IntegSpecfnum,*) ' '
specnumtime = specnumtime + dtspecnum
enddo
endif
endif
do i = 1,nelemsteps
if (dtPrior > 0.d0) then
propCountvec(i) = propCountvec(i) + propvec(i) * dtPrior
endif
end do
do i = 1,nsurfspecs
if (dtPrior > 0.d0) then
spec_cum(i) = spec_cum(i) + ( sum(adsorbspecposi(1:nadsorb,0),mask = adsorbspecposi(1:nadsorb,0) == i)/i ) * dtPrior
endif
enddo
end subroutine save_specnums
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
subroutine dump_propensity_tables()
use state_setup_module, only: nadsorb
implicit none
integer i, iproc, j, si, sj, k, m, tempint, cntr
integer, allocatable :: indxk(:)
real(8) tempvar !, activenrg, preexpfac, eventpropensity0
real(8), allocatable :: propensarray(:)
logical flagsitedif, takesprecedensejoveri
! Caution: this subroutine must be ran as the last procedure before terminating
! i.e. *after* writing the restart file
do cntr = 1,2
if (cntr == 1) then
allocate(indxk(nprocesses))
allocate(propensarray(nprocesses))
do iproc = 1,nprocesses
indxk(iproc) = iproc
propensarray(iproc) = procpropenst0(iproc)
enddo
elseif (cntr == 2) then
deallocate(event_times_heap)
deallocate(event_times_labels)
deallocate(event_times_indexes)
deallocate(proctypesites)
deallocate(procpropenst0)
deallocate(procdeltaenrg)
deallocate(nadsorprocparticip)
deallocate(adsorprocparticip)
deallocate(indxk)
deallocate(propensarray)
nprocesses = 0
curtime = 0.d0
debug_report_processes = .false.
call catalogue_all_processes()
allocate(indxk(nprocesses))
allocate(propensarray(nprocesses))
do iproc = 1,nprocesses
indxk(iproc) = iproc
propensarray(iproc) = procpropenst0(iproc)
enddo
endif
! In the following loops we are just sorting the propensities based on the:
! (i) magnitude
! (ii) the sites at which the corresponding process takes place
! The propensities with higher magnitude take precedence. If two propensities correspond to the
! same elementary step, the one happening at the site with the higher number takes precedence (and
! of course for multi-site processes, each site is checked). It may happen that two propensities
! are equal but correspond to processes with different site types, in this case the one with the
! more sites participating takes precedence.
do i = 1,nprocesses
do j = i+1,nprocesses
takesprecedensejoveri = .false.
if (propensarray(i) < propensarray(j)) then
takesprecedensejoveri = .true.
elseif ( (dabs(propensarray(i)) < 1.d-135 .and. dabs(propensarray(j)) < 1.d-135) .or. &
(dabs(propensarray(i)/propensarray(j)-1.d0) < 5.d-16) ) then
! if (dabs(propensarray(i)) < 1.d-15) then
! continue
! endif
if (elemstepnsites(proctypesites(indxk(i),0)) < elemstepnsites(proctypesites(indxk(j),0))) then
takesprecedensejoveri = .true.
elseif (elemstepnsites(proctypesites(indxk(i),0)) == elemstepnsites(proctypesites(indxk(j),0))) then
flagsitedif = .false.
do k = 1,elemstepnsites(proctypesites(indxk(i),0))
si = proctypesites(indxk(i),k)
sj = proctypesites(indxk(j),k)
if (si < sj) then
takesprecedensejoveri = .true.
flagsitedif = .true.
exit
elseif (si > sj) then
flagsitedif = .true.
exit
endif
enddo
if (.not.flagsitedif .and. proctypesites(indxk(i),0) < proctypesites(indxk(j),0)) then
takesprecedensejoveri = .true.
endif
endif
endif
if (takesprecedensejoveri) then
tempvar = propensarray(i)
propensarray(i) = propensarray(j)
propensarray(j) = tempvar
tempint = indxk(i)
indxk(i) = indxk(j)
indxk(j) = tempint
endif
enddo
enddo
if (cntr == 1) then
open(unit=iprocpropensdbg,file=trim(cprocpropensdbgfname),status='unknown')
do i = 1,nprocesses
k = indxk(i)
write(iprocpropensdbg,'(ES32.16E3,' // int2str(elemstepnsites(proctypesites(k,0))+1) // '(1x,i10))') &
propensarray(i), proctypesites(k,0), &
(proctypesites(k,m), m = 1,elemstepnsites(proctypesites(k,0)))
enddo
close(iprocpropensdbg)
elseif (cntr == 2) then
open(unit=iprocinicatpropensdbg,file=trim(cprocinicatpropensdbgfname),status='unknown')
do i = 1,nprocesses
k = indxk(i)
write(iprocinicatpropensdbg,'(ES32.16E3,' // int2str(elemstepnsites(proctypesites(k,0))+1) // '(1x,i10))') &
propensarray(i), proctypesites(k,0), &
(proctypesites(k,m), m = 1,elemstepnsites(proctypesites(k,0)))
enddo
close(iprocinicatpropensdbg)
endif
enddo
open(unit=iprocpropensstatedbg,file=trim(cprocpropensstatedbgfname),status='unknown')
write(iprocpropensstatedbg,'(a)') 'initial_state'
do i = 1,nadsorb
if (adsorbspecposi(i,0) > 0) then
write(iprocpropensstatedbg,'(a,' // int2str(surfspecsdent(adsorbspecposi(i,0))) // '(1x,i10))') &
' seed_on_sites ' // trim(surfspecsnames(adsorbspecposi(i,0))), &
(adsorbspecposi(i,k), k = 1,surfspecsdent(adsorbspecposi(i,0)))
endif
enddo
write(iprocpropensstatedbg,'(a)') 'end_initial_state'
close(iprocpropensstatedbg)
end subroutine dump_propensity_tables
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
subroutine dump_cluster_contribution_tables()
use state_setup_module, only: nadsorb
use energetics_setup_module, only: clustergraphmultipl, clusterenrg, &
clusternsites
implicit none
integer i, iclust, j, jcluster, si, sj, k, m, tempint, cntr
integer, allocatable :: indxk(:)
real(8) tempvar
real(8), allocatable :: energarray(:)
logical flagsitedif, takesprecedensejoveri
! Caution: this subroutine must be ran as the last procedure before terminating
! i.e. *after* writing the restart file
do cntr = 1,2
if (cntr == 1) then
allocate(indxk(nglobclust))
allocate(energarray(nglobclust))
do iclust = 1,nglobclust
indxk(iclust) = iclust
jcluster = globclustypesites(iclust,0)
energarray(iclust) = clusterenrg(jcluster)/clustergraphmultipl(jcluster)
enddo
elseif (cntr == 2) then
deallocate(globclustypesites)
deallocate(nadsorglobclusparticip)
deallocate(adsorglobclusparticip)
deallocate(indxk)
deallocate(energarray)
nglobclust = 0
debug_report_globenerg = .false.
call initialize_energetics()
allocate(indxk(nglobclust))
allocate(energarray(nglobclust))
do iclust = 1,nglobclust
indxk(iclust) = iclust
jcluster = globclustypesites(iclust,0)
energarray(iclust) = clusterenrg(jcluster)/clustergraphmultipl(jcluster)
enddo
endif
! In the following loops we are just sorting the energy contributions based on the:
! (i) magnitude
! (ii) the sites of the corresponding cluster
! The clusters with higher energies take precedence. If two global clusters correspond to the
! same pattern, the one involving a site with the higher number takes precedence (and
! of course for multi-site clusters, each site is checked). It may happen that two global cluster energies
! are equal but correspond to clusters with different site types, in this case the one with the
! more sites participating takes precedence.
do i = 1,nglobclust
do j = i+1,nglobclust
takesprecedensejoveri = .false.
if (energarray(i) < energarray(j)) then
takesprecedensejoveri = .true.
elseif ( (dabs(energarray(i)) < 1.d-35 .and. dabs(energarray(j)) < 1.d-35) .or. &
(dabs(energarray(i)/energarray(j)-1.d0) < 5.d-16) ) then
! if (dabs(energarray(i)) < 1.d-15) then
! continue
! endif
if (clusternsites(globclustypesites(indxk(i),0)) < clusternsites(globclustypesites(indxk(j),0))) then
takesprecedensejoveri = .true.
elseif (clusternsites(globclustypesites(indxk(i),0)) == clusternsites(globclustypesites(indxk(j),0))) then
flagsitedif = .false.
do k = 1,clusternsites(globclustypesites(indxk(i),0))
si = globclustypesites(indxk(i),k)
sj = globclustypesites(indxk(j),k)
if (si < sj) then
takesprecedensejoveri = .true.
flagsitedif = .true.
exit
elseif (si > sj) then
flagsitedif = .true.
exit
endif
enddo
if (.not.flagsitedif .and. globclustypesites(indxk(i),0) < globclustypesites(indxk(j),0)) then
takesprecedensejoveri = .true.
endif
endif
endif
if (takesprecedensejoveri) then
tempvar = energarray(i)
energarray(i) = energarray(j)
energarray(j) = tempvar
tempint = indxk(i)
indxk(i) = indxk(j)
indxk(j) = tempint
endif
enddo
enddo
if (cntr == 1) then
open(unit=iclusterdbg,file=trim(cclusterdbgfname),status='unknown')
do i = 1,nglobclust
k = indxk(i)
write(iclusterdbg,'(ES32.16E3,' // int2str(clusternsites(globclustypesites(k,0))+1) // '(1x,i10))') &
energarray(i), globclustypesites(k,0), &
(globclustypesites(k,m), m = 1,clusternsites(globclustypesites(k,0)))
enddo
close(iclusterdbg)
elseif (cntr == 2) then
open(unit=iclusterinidbg,file=trim(cclusterinidbgfname),status='unknown')
do i = 1,nglobclust
k = indxk(i)
write(iclusterinidbg,'(ES32.16E3,' // int2str(clusternsites(globclustypesites(k,0))+1) // '(1x,i10))') &
energarray(i), globclustypesites(k,0), &
(globclustypesites(k,m), m = 1,clusternsites(globclustypesites(k,0)))
enddo
close(iclusterinidbg)
endif
enddo
open(unit=iprocpropensstatedbg,file=trim(cprocpropensstatedbgfname),status='unknown')
write(iprocpropensstatedbg,'(a)') 'initial_state'
do i = 1,nadsorb
if (adsorbspecposi(i,0) > 0) then
write(iprocpropensstatedbg,'(a,' // int2str(surfspecsdent(adsorbspecposi(i,0))) // '(1x,i10))') &
' seed_on_sites ' // trim(surfspecsnames(adsorbspecposi(i,0))), &
(adsorbspecposi(i,k), k = 1,surfspecsdent(adsorbspecposi(i,0)))
endif
enddo
write(iprocpropensstatedbg,'(a)') 'end_initial_state'
close(iprocpropensstatedbg)
end subroutine dump_cluster_contribution_tables
!@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
end module output_handle_module
|
{"hexsha": "1ab22fcaa79e87167c219071124ba3cb1cb2c036", "size": 25541, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Fortran_src/output_handle_module.f90", "max_stars_repo_name": "WayneYann/Zacros-Wrapper", "max_stars_repo_head_hexsha": "992f239530600ecf84f07f9ab7c4152b9bb64c25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-03T20:35:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-03T20:35:05.000Z", "max_issues_repo_path": "Fortran_src/output_handle_module.f90", "max_issues_repo_name": "WayneYann/Zacros-Wrapper", "max_issues_repo_head_hexsha": "992f239530600ecf84f07f9ab7c4152b9bb64c25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Fortran_src/output_handle_module.f90", "max_forks_repo_name": "WayneYann/Zacros-Wrapper", "max_forks_repo_head_hexsha": "992f239530600ecf84f07f9ab7c4152b9bb64c25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5916905444, "max_line_length": 217, "alphanum_fraction": 0.5717865393, "num_tokens": 7565}
|
# This file is adapted from astropy/io/fits/connect.py in the developer version
# of Astropy. It can be removed once support for Astropy v0.2 is dropped (since
# Astropy v0.3 and later will include it).
# Copyright (c) 2011-2013, Astropy Developers
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# * Neither the name of the Astropy Team nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, division, print_function
import os
import re
import warnings
import numpy as np
from astropy.utils import OrderedDict
from astropy.io import registry as io_registry
from astropy.table import Table
from astropy import log
from astropy import units as u
from astropy.io.fits import HDUList, TableHDU, BinTableHDU, GroupsHDU
from astropy.io.fits.hdu.hdulist import fitsopen as fits_open
from . import six
# FITS file signature as per RFC 4047
FITS_SIGNATURE = (b"\x53\x49\x4d\x50\x4c\x45\x20\x20\x3d\x20\x20\x20\x20\x20"
b"\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20"
b"\x20\x54")
# Keywords to remove for all tables that are read in
REMOVE_KEYWORDS = ['XTENSION', 'BITPIX', 'NAXIS', 'NAXIS1', 'NAXIS2',
'PCOUNT', 'GCOUNT', 'TFIELDS']
# Column-specific keywords
COLUMN_KEYWORDS = ['TFORM[0-9]+',
'TBCOL[0-9]+',
'TSCAL[0-9]+',
'TZERO[0-9]+',
'TNULL[0-9]+',
'TTYPE[0-9]+',
'TUNIT[0-9]+',
'TDISP[0-9]+',
'TDIM[0-9]+',
'THEAP']
def is_column_keyword(keyword):
for c in COLUMN_KEYWORDS:
if re.match(c, keyword) is not None:
return True
return False
def is_fits(origin, args, kwargs):
"""
Determine whether `origin` is a FITS file.
Parameters
----------
origin : str or readable file-like object
Path or file object containing a potential FITS file.
Returns
-------
is_fits : bool
Returns `True` if the given file is a FITS file.
"""
if isinstance(args[0], six.string_types):
if args[0].lower().endswith(('.fits', '.fits.gz', '.fit', '.fit.gz')):
return True
elif origin == 'read':
with open(args[0], 'rb') as f:
sig = f.read(30)
return sig == FITS_SIGNATURE
elif hasattr(args[0], 'read'):
pos = args[0].tell()
sig = args[0].read(30)
args[0].seek(pos)
return sig == FITS_SIGNATURE
elif isinstance(args[0], (HDUList, TableHDU, BinTableHDU, GroupsHDU)):
return True
else:
return False
def read_table_fits(input, hdu=None):
"""
Read a Table object from an FITS file
Parameters
----------
input : str or file-like object or compatible `astropy.io.fits` HDU object
If a string, the filename to read the table from. If a file object, or
a compatible HDU object, the object to extract the table from. The
following `astropy.io.fits` HDU objects can be used as input:
- :class:`~astropy.io.fits.hdu.table.TableHDU`
- :class:`~astropy.io.fits.hdu.table.BinTableHDU`
- :class:`~astropy.io.fits.hdu.table.GroupsHDU`
- :class:`~astropy.io.fits.hdu.hdulist.HDUList`
hdu : int or str, optional
The HDU to read the table from.
"""
if isinstance(input, six.string_types):
input = fits_open(input)
to_close = input
else:
to_close = None
if hasattr(input, 'read'):
input = fits_open(input)
try:
# Parse all table objects
tables = OrderedDict()
if isinstance(input, HDUList):
for ihdu, hdu_item in enumerate(input):
if isinstance(hdu_item, (TableHDU, BinTableHDU, GroupsHDU)):
tables[ihdu] = hdu_item
if len(tables) > 1:
if hdu is None:
warnings.warn("hdu= was not specified but multiple tables"
" are present, reading in first available"
" table (hdu={0})".format(list(tables.keys())[0]))
hdu = list(tables.keys())[0]
# hdu might not be an integer, so we first need to convert it
# to the correct HDU index
hdu = input.index_of(hdu)
if hdu in tables:
table = tables[hdu]
else:
raise ValueError("No table found in hdu={0}".format(hdu))
elif len(tables) == 1:
table = tables[list(tables.keys())[0]]
else:
raise ValueError("No table found")
elif isinstance(input, (TableHDU, BinTableHDU, GroupsHDU)):
table = input
else:
raise ValueError("Input should be a string, a file-like object, "
"an HDUList, TableHDU, BinTableHDU, or "
"GroupsHDU instance")
# Check if table is masked
masked = False
for col in table.columns:
if col.null is not None:
masked = True
break
# Convert to an astropy.table.Table object
t = Table(table.data, masked=masked)
# Copy over null values if needed
if masked:
for col in table.columns:
t[col.name].set_fill_value(col.null)
t[col.name].mask[t[col.name] == col.null] = True
# Copy over units
for col in table.columns:
if col.unit is not None:
try:
t[col.name].units = u.Unit(col.unit, format='fits')
except ValueError:
t[col.name].units = u.UnrecognizedUnit(col.unit)
# TODO: deal properly with unsigned integers
for key, value, comment in table.header.cards:
if key in ['COMMENT', 'HISTORY']:
if key in t.meta:
t.meta[key].append(value)
else:
t.meta[key] = [value]
elif key in t.meta: # key is duplicate
if isinstance(t.meta[key], list):
t.meta[key].append(value)
else:
t.meta[key] = [t.meta[key], value]
elif (is_column_keyword(key.upper()) or
key.upper() in REMOVE_KEYWORDS):
pass
else:
t.meta[key] = value
# TODO: implement masking
finally:
if to_close is not None:
to_close.close()
return t
def write_table_fits(input, output, overwrite=False):
"""
Write a Table object to a FITS file
Parameters
----------
input : Table
The table to write out.
output : str
The filename to write the table to.
overwrite : bool
Whether to overwrite any existing file without warning.
"""
# Check if output file already exists
if isinstance(output, six.string_types) and os.path.exists(output):
if overwrite:
os.remove(output)
else:
raise IOError("File exists: {0}".format(output))
# Create a new HDU object
if input.masked:
table_hdu = BinTableHDU(np.array(input.filled()))
for col in table_hdu.columns:
# The astype is necessary because if the string column is less
# than one character, the fill value will be N/A by default which
# is too long, and so no values will get masked.
fill_value = input[col.name].get_fill_value()
col.null = fill_value.astype(input[col.name].dtype)
else:
table_hdu = BinTableHDU(np.array(input))
# Set units for output HDU
for col in table_hdu.columns:
if input[col.name].units is not None:
col.unit = input[col.name].units.to_string(format='fits')
for key, value in input.meta.items():
if is_column_keyword(key.upper()) or key.upper() in REMOVE_KEYWORDS:
log.warn("Meta-data keyword {0} will be ignored since it "
"conflicts with a FITS reserved keyword".format(key))
if isinstance(value, list):
for item in value:
try:
table_hdu.header.append((key, item))
except ValueError:
log.warn("Attribute `{0}` of type {1} cannot be written "
"to FITS files - skipping".format(key,
type(value)))
else:
try:
table_hdu.header[key] = value
except ValueError:
log.warn("Attribute `{0}` of type {1} cannot be written to "
"FITS files - skipping".format(key, type(value)))
# Write out file
table_hdu.writeto(output)
try:
io_registry.register_reader('fits', Table, read_table_fits)
io_registry.register_writer('fits', Table, write_table_fits)
io_registry.register_identifier('fits', Table, is_fits)
except: # FITS readers/writers have already been registered
pass
|
{"hexsha": "db5c805702a351fa62fd20e140fd8594da3b85da", "size": 10596, "ext": "py", "lang": "Python", "max_stars_repo_path": "glue/external/fits_io.py", "max_stars_repo_name": "yuvallanger/glue", "max_stars_repo_head_hexsha": "1e27b47328db1e9a44eb6734e894a897c4b693be", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "glue/external/fits_io.py", "max_issues_repo_name": "yuvallanger/glue", "max_issues_repo_head_hexsha": "1e27b47328db1e9a44eb6734e894a897c4b693be", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "glue/external/fits_io.py", "max_forks_repo_name": "yuvallanger/glue", "max_forks_repo_head_hexsha": "1e27b47328db1e9a44eb6734e894a897c4b693be", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2912621359, "max_line_length": 84, "alphanum_fraction": 0.5892789732, "include": true, "reason": "import numpy,from astropy", "num_tokens": 2422}
|
# transact helper method
function transact(fa::FinancialAsset, qty::Int, fill::Float64)
if fa.quantity + qty == 0
fa.basis = 0
fa.quantity = 0
else
fa.basis = ((fa.basis * fa.quantity) + (fill * qty)) / (fa.quantity + qty)
fa.quantity += qty
end
fa
end
|
{"hexsha": "7791e165ca7c2f2a5dde736b4bb062af523e885c", "size": 303, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/operators.jl", "max_stars_repo_name": "JuliaQuant/Grist.jl", "max_stars_repo_head_hexsha": "b1a30a736dbdb280da9a18bbb7363b3a51ad81de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2015-03-20T13:11:55.000Z", "max_stars_repo_stars_event_max_datetime": "2017-05-01T11:33:56.000Z", "max_issues_repo_path": "src/operators.jl", "max_issues_repo_name": "JuliaQuant/Grist.jl", "max_issues_repo_head_hexsha": "b1a30a736dbdb280da9a18bbb7363b3a51ad81de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2015-03-20T12:31:42.000Z", "max_issues_repo_issues_event_max_datetime": "2015-03-29T01:32:40.000Z", "max_forks_repo_path": "src/operators.jl", "max_forks_repo_name": "JuliaQuant/Grist.jl", "max_forks_repo_head_hexsha": "b1a30a736dbdb280da9a18bbb7363b3a51ad81de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2016-04-25T01:28:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T18:48:29.000Z", "avg_line_length": 23.3076923077, "max_line_length": 82, "alphanum_fraction": 0.5709570957, "num_tokens": 92}
|
[STATEMENT]
lemma [code]: "exewf_sort sub S \<equiv> (S = {} \<or> exenormalized_sort sub S \<and> exesort_ex sub S)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. exewf_sort sub S \<equiv> S = full_sort \<or> exenormalized_sort sub S \<and> exesort_ex sub S
[PROOF STEP]
by simp (smt ball_empty bot_set_def empty_Collect_eq)
|
{"llama_tokens": 134, "file": "Metalogic_ProofChecker_SortsExe", "length": 1}
|
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
import numpy as np
import tf.transformations
from plotter import plotter
class husky_pi():
def __init__(self, set_point, dt = 0.1, Teval = 1., simulation = True):
self.position = np.zeros(3)
self.vel_v = np.zeros(3)
self.vel_w = np.zeros(3)
self.velocity = np.zeros(2)
self.euler = np.zeros(3)
self.Quater = np.zeros(4)
self.dt = dt
self.Teval = Teval
self.error = np.zeros((3,2))
self.u0 = np.zeros(2)
self.u = np.zeros(2)
self.set_point = set_point
self.node = rospy.init_node('DQPID', anonymous=False)
if simulation:
self.Publisher = rospy.Publisher("/husky_velocity_controller/cmd_vel", Twist, queue_size=1)
self.Subscriber = rospy.Subscriber("/husky_velocity_controller/odom", Odometry, self.callback_pose, queue_size=1)
else:
self.Publisher = rospy.Publisher("/husky_velocity_controller/cmd_vel ", Twist, queue_size=1)
self.Subscriber = rospy.Subscriber("/odometry/filtered", Odometry, self.callback_pose, queue_size=1)
self.rate = rospy.Rate(10.) # 10hz
self.msg = Twist()
self.action_vx = np.zeros(2)
self.action_wz = np.zeros(2)
self.reward = -1.
self.execution = np.divide(self.Teval,self.dt).astype(int)
self.temporal_vx = np.zeros(self.execution)
self.temporal_wz = np.zeros(self.execution)
# to plot
self.plotter = plotter('Velocities', 'u', 'positions', 'action_vx' , 'action_wz')
self.time = 0.
def update(self, action, depth):
for _ in range(self.execution):
self.action_vx = action[0:2]
self.action_wz = action[2:4]
#I save all the velocities during execution, I can use it later to calculate the reward differently
#TODO
self.temporal_vx = self.velocity[0]
self.temporal_wz = self.velocity[1]
# update errors
self.error[2] = self.error[1]
self.error[1] = self.error[0]
self.error[0][0] = self.set_point[0] - self.velocity[0]
self.error[0][1] = self.set_point[1] - self.velocity[1]
# get controller commands
self.u[0] = self.controller_pid(self.error[0][0], self.error[1][0], self.error[2][0], self.action_vx, self.u0[0])
self.u[1] = self.controller_pid(self.error[0][1], self.error[1][1], self.error[2][1], self.action_wz, self.u0[1])
self.u = np.clip(self.u, -0.8, 0.8)
self.u0 = self.u
# to publish
self.msg.linear.x = self.u[0]
self.msg.angular.z = self.u[1]
self.Publisher.publish(self.msg)
# to plot
self.time = self.time + self.dt
self.plotter.update(self.velocity, self.u, self.position, self.time, depth, self.action_vx, self.action_wz)
# to keep sampling rate
self.rate.sleep()
#print('temporal_state', np.mean(self.temporal_vx), np.mean(self.temporal_wz), 'vel', self.velocity )
mean_state = np.array([np.mean(self.temporal_vx), np.mean(self.temporal_wz)])
return mean_state#self.velocity
def controller_pid(self, et, et1, et2, action, u0):
Kp = action[0]
Ti = action[1]
Td = 0.
k1 = Kp*(1+Td/self.dt)
k2 =-Kp*(1+2*Td/self.dt-self.dt/Ti)
k3 = Kp*(Td/Ti)
u = u0 + k1*et + k2*et1 + k3*et2
return u
def get_gaussian_reward(self, state, set_point):
a_gauss = np.power(0.035,2.) #0.017
exponent = np.zeros(len(set_point))
for _ in range(len(set_point)):
exponent[_] = np.power((state[_] - set_point[_]), 2.)
exponent_total = np.sum(exponent)
self.reward = -1. + 2*np.exp(-0.5*(exponent_total/a_gauss))
# save reward to plot it
self.plotter.update_reward(self.reward)
return self.reward
def wrapToPi(self, angles):
if angles > np.pi:
angles = angles - 2*np.pi
elif angles < -np.pi:
angles = angles + 2*np.pi
return angles
def callback_pose(self, msg_odometry):
x = msg_odometry.pose.pose.position.x
y = msg_odometry.pose.pose.position.y
z = msg_odometry.pose.pose.position.z
self.position = np.array([x, y, z])
vx = msg_odometry.twist.twist.linear.x
vy = msg_odometry.twist.twist.linear.y
vz = msg_odometry.twist.twist.linear.z
self.vel_v = np.array([vx, vy, vz])
wx = msg_odometry.twist.twist.angular.x
wy = msg_odometry.twist.twist.angular.y
wz = msg_odometry.twist.twist.angular.z
self.vel_w = np.array([wx, wy, wz])
Qx = msg_odometry.pose.pose.orientation.x
Qy = msg_odometry.pose.pose.orientation.y
Qz = msg_odometry.pose.pose.orientation.z
Qw = msg_odometry.pose.pose.orientation.w
#z y x representation
#Quater=[Qz,Qy,Qx,Qw];
#Quater = np.array([Qw,Qx,Qy,Qz]) # este es el que eestaba usando
self.Quater = np.array([Qx,Qy,Qz, Qw])
#z y x representation of quaternions
euler_original = tf.transformations.euler_from_quaternion(self.Quater) #[rad]
self.euler = [ self.wrapToPi(_) for _ in euler_original]
#self.velocity = np.array([vx, wz])
a = 0.9*self.velocity[0] + 0.1*vx
b = 0.9*self.velocity[1] + 0.1*wz
self.velocity = np.array([a, b])
def stop(self):
self.msg.linear.x = 0.
self.msg.angular.z = 0.
self.Publisher.publish(self.msg)
|
{"hexsha": "4832ffa253ef045db4e3ce7cf6d770935dd22cab", "size": 5797, "ext": "py", "lang": "Python", "max_stars_repo_path": "husky.py", "max_stars_repo_name": "INTELYMEC/Double_QPID", "max_stars_repo_head_hexsha": "3a84d58d2ce22eed2e695eea1b0497c37a010266", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2018-11-02T13:34:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T00:07:42.000Z", "max_issues_repo_path": "husky.py", "max_issues_repo_name": "INTELYMEC/Double_QPID", "max_issues_repo_head_hexsha": "3a84d58d2ce22eed2e695eea1b0497c37a010266", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "husky.py", "max_forks_repo_name": "INTELYMEC/Double_QPID", "max_forks_repo_head_hexsha": "3a84d58d2ce22eed2e695eea1b0497c37a010266", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-11-02T13:33:51.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-25T11:09:02.000Z", "avg_line_length": 36.923566879, "max_line_length": 126, "alphanum_fraction": 0.5865102639, "include": true, "reason": "import numpy", "num_tokens": 1576}
|
#!/usr/bin/python
import numpy as np
import cv2
import argparse
import dewarp
import feature_matching
import optimal_seamline
import blending
import cropping
import os
# --------------------------------
# output video resolution
W = 2560
H = 1280
# --------------------------------
# field of view, width of de-warped image
FOV = 194.0
W_remap = 1380
# --------------------------------
# params for template matching
templ_shape = (60, 16)
offsetYL = 160
offsetYR = 160
maxL = 80
maxR = 80
# --------------------------------
# params for optimal seamline and multi-band blending
W_lbl = 120
blend_level = 7
# --------------------------------
dir_path = os.path.dirname(os.path.realpath(__file__))
cwd = os.getcwd()
# --------------------------------
def Hcalc(cap, xmap, ymap):
"""Calculate and return homography for stitching process."""
Mlist = []
frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
for frame_no in np.arange(0, frame_count, int(frame_count / 10)):
cap.set(cv2.CAP_PROP_POS_FRAMES, frame_no)
ret, frame = cap.read()
if ret:
# defish / unwarp
cam1 = cv2.remap(frame[:, :1280], xmap, ymap, cv2.INTER_LINEAR)
cam2 = cv2.remap(frame[:, 1280:], xmap, ymap, cv2.INTER_LINEAR)
cam1_gray = cv2.cvtColor(cam1, cv2.COLOR_BGR2GRAY)
cam2_gray = cv2.cvtColor(cam2, cv2.COLOR_BGR2GRAY)
# shift the remapped images along x-axis
shifted_cams = np.zeros((H * 2, W, 3), np.uint8)
shifted_cams[H:, int((W - W_remap) / 2):int((W + W_remap) / 2)] = cam2
shifted_cams[:H, :int(W_remap / 2)] = cam1[:, int(W_remap / 2):]
shifted_cams[:H, W - int(W_remap / 2):] = cam1[:, :int(W_remap / 2)]
# find matches and extract pairs of correspondent matching points
matchesL = feature_matching.getMatches_goodtemplmatch(
cam1_gray[offsetYL:H - offsetYL, int(W / 2):],
cam2_gray[offsetYL:H - offsetYL, :W_remap - int(W / 2)],
templ_shape, maxL)
matchesR = feature_matching.getMatches_goodtemplmatch(
cam2_gray[offsetYR:H - offsetYR, int(W / 2):],
cam1_gray[offsetYR:H - offsetYR, :W_remap - int(W / 2)],
templ_shape, maxR)
matchesR = matchesR[:, -1::-1]
matchesL = matchesL + (int((W - W_remap) / 2), offsetYL)
matchesR = matchesR + (int((W - W_remap) / 2) + int(W / 2), offsetYR)
zipped_matches = list(zip(matchesL, matchesR))
matches = np.int32([e for i in zipped_matches for e in i])
pts1 = matches[:, 0]
pts2 = matches[:, 1]
# find homography from pairs of correspondent matchings
M, status = cv2.findHomography(pts2, pts1, cv2.RANSAC, 4.0)
Mlist.append(M)
M = np.average(np.array(Mlist), axis=0)
print (M)
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
return M
def main(input, output):
cap = cv2.VideoCapture(input)
# define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output, fourcc, 30.0, (W, H))
# obtain xmap and ymap
xmap, ymap = dewarp.buildmap(Ws=W_remap, Hs=H, Wd=1280, Hd=1280, fov=FOV)
# calculate homography
M = Hcalc(cap, xmap, ymap)
# calculate vertical boundary of warped image, for later cropping
top, bottom = cropping.verticalBoundary(M, W_remap, W, H)
# estimate empty (invalid) area of warped2
EAof2 = np.zeros((H, W, 3), np.uint8)
EAof2[:, int((W - W_remap) / 2) + 1:int((W + W_remap) / 2) - 1] = 255
EAof2 = cv2.warpPerspective(EAof2, M, (W, H))
# process the first frame
ret, frame = cap.read()
if ret:
# de-warp
cam1 = cv2.remap(frame[:, :1280], xmap, ymap, cv2.INTER_LINEAR)
cam2 = cv2.remap(frame[:, 1280:], xmap, ymap, cv2.INTER_LINEAR)
# shift the remapped images along x-axis
shifted_cams = np.zeros((H * 2, W, 3), np.uint8)
shifted_cams[H:, int((W - W_remap) / 2):int((W + W_remap) / 2)] = cam2
shifted_cams[:H, :int(W_remap / 2)] = cam1[:, int(W_remap / 2):]
shifted_cams[:H, int(W - W_remap / 2):] = cam1[:, :int(W_remap / 2)]
# warp cam2 using homography M
warped2 = cv2.warpPerspective(shifted_cams[H:], M, (W, H))
warped1 = shifted_cams[:H]
# crop to get a largest rectangle, and resize to maintain resolution
warped1 = cv2.resize(warped1[top:bottom], (W, H))
warped2 = cv2.resize(warped2[top:bottom], (W, H))
# image labeling (find minimum error boundary cut)
mask, minloc_old = optimal_seamline.imgLabeling(
warped1[:, int(W_remap / 2) - W_lbl:int(W_remap / 2)],
warped2[:, int(W_remap / 2) - W_lbl:int(W_remap / 2)],
warped1[:, W - int(W_remap / 2):W - int(W_remap / 2) + W_lbl],
warped2[:, W - int(W_remap / 2):W - int(W_remap / 2) + W_lbl],
(W, H), int(W_remap / 2) - W_lbl, W - int(W_remap / 2))
labeled = warped1 * mask + warped2 * (1 - mask)
# fill empty area of warped1 and warped2, to avoid darkening
warped1[:, int(W_remap / 2):W - int(W_remap /
2)] = warped2[:, int(W_remap / 2):W - int(W_remap / 2)]
warped2[EAof2 == 0] = warped1[EAof2 == 0]
# multi band blending
blended = blending.multi_band_blending(
warped1, warped2, mask, blend_level)
cv2.imshow('p', blended.astype(np.uint8))
cv2.waitKey(0)
# write results from phases
out.write(blended.astype(np.uint8))
cv2.imwrite('C:/Users/Admin/Desktop/fisheye/inPy3/output/0.png', cam1)
cv2.imwrite('C:/Users/Admin/Desktop/fisheye/inPy3/output/1.png', cam2)
cv2.imwrite('C:/Users/Admin/Desktop/fisheye/inPy3/output/2.png', shifted_cams)
cv2.imwrite('C:/Users/Admin/Desktop/fisheye/inPy3/output/3.png', warped2)
cv2.imwrite('C:/Users/Admin/Desktop/fisheye/inPy3/output/4.png', warped1)
cv2.imwrite('C:/Users/Admin/Desktop/fisheye/inPy3/output/5.png', frame)
cv2.imwrite('C:/Users/Admin/Desktop/fisheye/inPy3/output/labeled.png', labeled.astype(np.uint8))
cv2.imwrite('C:/Users/Admin/Desktop/fisheye/inPy3/output/blended.png', blended.astype(np.uint8))
# process each frame
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
# de-warp
cam1 = cv2.remap(frame[:, :1280], xmap, ymap, cv2.INTER_LINEAR)
cam2 = cv2.remap(frame[:, 1280:], xmap, ymap, cv2.INTER_LINEAR)
# shift the remapped images along x-axis
shifted_cams = np.zeros((H * 2, W, 3), np.uint8)
shifted_cams[H:, int((W - W_remap) / 2):int((W + W_remap) / 2)] = cam2
shifted_cams[:H, :int(W_remap / 2)] = cam1[:, int(W_remap / 2):]
shifted_cams[:H, W - int(W_remap / 2):] = cam1[:, :int(W_remap / 2)]
# warp cam2 using homography M
warped2 = cv2.warpPerspective(shifted_cams[H:], M, (W, H))
warped1 = shifted_cams[:H]
# crop to get a largest rectangle
# and resize to maintain resolution
warped1 = cv2.resize(warped1[top:bottom], (W, H))
warped2 = cv2.resize(warped2[top:bottom], (W, H))
# image labeling (find minimum error boundary cut)
mask, minloc_old = optimal_seamline.imgLabeling(
warped1[:, int(W_remap / 2) - W_lbl:int(W_remap / 2)],
warped2[:, int(W_remap / 2) - W_lbl:int(W_remap / 2)],
warped1[:, W - int(W_remap / 2):W - int(W_remap / 2) + W_lbl],
warped2[:, W - int(W_remap / 2):W - int(W_remap / 2) + W_lbl],
(W, H), int(W_remap / 2) - W_lbl, W - int(W_remap / 2), minloc_old)
labeled = warped1 * mask + warped2 * (1 - mask)
# fill empty area of warped1 and warped2, to avoid darkening
warped1[:, int(W_remap / 2):W - int(W_remap /
2)] = warped2[:, int(W_remap / 2):W - int(W_remap / 2)]
warped2[EAof2 == 0] = warped1[EAof2 == 0]
# multi band blending
blended = blending.multi_band_blending(
warped1, warped2, mask, blend_level)
# write the remapped frame
out.write(blended.astype(np.uint8))
cv2.imshow('warped', blended.astype(np.uint8))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser(
description="A summer research project to seamlessly stitch \
dual-fisheye video into 360-degree videos")
ap.add_argument('input', metavar='INPUT.XYZ',
help="path to the input dual fisheye video")
ap.add_argument('-o', '--output', metavar='OUTPUT.XYZ', required=False,
default=dir_path + '/output/output.MP4',
help="path to the output equirectangular video")
args = vars(ap.parse_args())
main(args['input'], args['output'])
|
{"hexsha": "6a9c2fcff7b36903f1afbb8bfda43402ba51f354", "size": 9347, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "suzhengpeng/dual-fisheye-video-stitching", "max_stars_repo_head_hexsha": "b578c2f974fcd38f17a4bd3d811b04675c099776", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-14T23:15:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-17T08:26:43.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "suzhengpeng/dual-fisheye-video-stitching", "max_issues_repo_head_hexsha": "b578c2f974fcd38f17a4bd3d811b04675c099776", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-07-01T01:07:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-15T21:38:42.000Z", "max_forks_repo_path": "main.py", "max_forks_repo_name": "suzhengpeng/dual-fisheye-video-stitching", "max_forks_repo_head_hexsha": "b578c2f974fcd38f17a4bd3d811b04675c099776", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-17T03:31:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-12T10:21:40.000Z", "avg_line_length": 40.9956140351, "max_line_length": 104, "alphanum_fraction": 0.576441639, "include": true, "reason": "import numpy", "num_tokens": 2713}
|
#!/usr/bin/env python
# Title :loader.py
# Author :Venkatraman Narayanan, Bala Murali Manoghar, Vishnu Shashank Dorbala, Aniket Bera, Dinesh Manocha
# Copyright :"Copyright 2020, Proxemo project"
# Version :1.0
# License :"MIT"
# Maintainer :Venkatraman Narayanan, Bala Murali Manoghar
# Email :vnarayan@terpmail.umd.edu, bsaisudh@terpmail.umd.edu
# ==============================================================================
import cv2
import numpy as np
from pose_tracking.real_sense_wrapper import Real_Sense_Camera
from pose_tracking.cubemos_wrapper import Cubemos_Tacker
class Skel_Temporal():
"""Skeleton gait generator class."""
def __init__(self, skel_id, do_not_ignore_false_limbs=True):
"""Constructor
Args:
skel_id (int): Skeleton ID
do_not_ignore_false_limbs (bool, optional): Ignore false limbs?. Defaults to True.
"""
self.id = skel_id
self.skel_temporal = []
self.do_not_ignore_false_limbs = do_not_ignore_false_limbs
def add(self, skel_ti):
"""Add skeleton to tracking.
Args:
skel_ti (np.array): skeleton co-ordinates
"""
if not np.any(skel_ti == -1) or self.ignore_false_limbs:
if len(self.skel_temporal) > 75:
self.skel_temporal.pop(0)
self.skel_temporal.append(skel_ti)
def __eq__(self, other):
"""Skeleton compare
Args:
other (obj): Skeleton Object
Returns:
[bool]: Same/different skeleton
"""
try:
if self.id == other.id:
return True
else:
return False
except:
if self.id == other:
return True
else:
return False
def get_embedding(self):
"""Convert Temporal gait cycle to Image sequence.
Returns:
[np.array]: Gait cycle embedded as image
"""
skel_temporal_np = np.array(self.skel_temporal)
# make root as (0, 0, 0)
# even if number of frames is less than 75 it will be resized to 244*244
skel_temporal_np = skel_temporal_np - \
np.expand_dims(skel_temporal_np[:, 0, :], axis=1)
skel_temporal_img = cv2.resize(skel_temporal_np, (244, 244))
return skel_temporal_img
class Skel_Tracker():
"""Skeleton Tracking Class."""
def __init__(self, do_not_ignore_false_limbs=True):
"""Constructor.
Args:
do_not_ignore_false_limbs (bool, optional): Ignore false limbs?. Defaults to True.
"""
self.skel_tracks = []
self.img_embeddings = []
self.do_not_ignore_false_limbs = do_not_ignore_false_limbs
def update(self, skel_nps, skel_ids):
"""Add skeleton pose to sequence.
Args:
skel_nps (np.array): Skeleton co-ordinates
skel_ids (list): Skeleton IDs
"""
# add skeleton corresponding to id
for skel_np, skel_id in zip(skel_nps, skel_ids):
try:
# ID already present - update
ndx = self.skel_tracks.index(skel_id)
skel_temporal = self.skel_tracks[ndx]
skel_temporal.add(skel_np)
except ValueError:
# new human - add
skel_temporal = Skel_Temporal(
skel_id, self.do_not_ignore_false_limbs)
skel_temporal.add(skel_np)
self.skel_tracks.append(skel_temporal)
# delete obselete human ids
skel_ids = np.asarray(skel_ids)
ndx_to_delete = []
for ndx, skel_temporal in enumerate(self.skel_tracks):
if not any(skel_ids == skel_temporal.id):
# tracked id is not present in current frame
ndx_to_delete.append(ndx)
for ndx, value in enumerate(ndx_to_delete):
# considering ndx_to_delete will be sorted in ascending order
# while poping elements one by one, the index has to be decreased
# by number of elements already deleted
self.skel_tracks.pop(value - ndx)
def get_embedding(self):
"""Generate image embedding for entire gait sequence.
Returns:
[list]: image embeddings, skeleton IDs
"""
self.img_embeddings = []
ids = []
for skel_track in self.skel_tracks:
self.img_embeddings.append(skel_track.get_embedding())
ids.append(skel_track.id)
self.img_embeddings = np.asarray(self.img_embeddings)
return self.img_embeddings, ids
def display_embedding(self):
"""View image embeddings."""
imgs = self.img_embeddings[0] # np.empty((244,244,3))
print(self.img_embeddings.shape)
for img in self.img_embeddings[1:]:
print("--")
imgs = np.hstack((imgs, img))
print(imgs.shape)
cv2.imshow("embeddings", imgs.astype(np.uint8))
class Track_Human_Pose():
"""Main gait tracking loop."""
def __init__(self, display=True, verbose=True):
"""Constructor.
Args:
display (bool, optional): Show skeleton detections?. Defaults to True.
verbose (bool, optional): Generate verbose log?. Defaults to True.
"""
self.verbose = verbose
self.display = display
self.camera = Real_Sense_Camera(5, 3)
self.cubemos = Cubemos_Tacker(self.camera.intrinsics)
self.skel_tracker = Skel_Tracker()
def get_pose(self):
"""Get human skeletons."""
# capture
self.camera.capture()
# get skeletons
self.cubemos.track_skeletons(self.camera.color_image,
self.camera.depth_image_align)
self.cubemos.render_skeletons(self.camera.color_image)
if self.display:
# Stack both images horizontally
images = np.hstack((self.camera.color_image,
self.camera.depth_colormap))
images = np.hstack((images, self.camera.color_image))
# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', images)
def track_pose(self):
"""Track skeleton gaits."""
self.skel_tracker.update(self.cubemos.skel3d_np,
self.cubemos.skel_ids)
def cleanup(self):
"""Cleanup workspace setup."""
self.camera.cleanup()
if __name__ == "__main__":
track_pose = Track_Human_Pose(display=True)
while True:
track_pose.get_pose()
if track_pose.display:
key = cv2.waitKey(1) & 0xFF
# press the 'q' key to stop the video stream
if key == ord("q"):
break
|
{"hexsha": "1cf12d17bde2167a7d6fcf6dedbb457793e036ca", "size": 6895, "ext": "py", "lang": "Python", "max_stars_repo_path": "pose_tracking/human_tracking_3D.py", "max_stars_repo_name": "vijay4313/proxemo", "max_stars_repo_head_hexsha": "98c4e2133047aa8519cc2f482b59565d9160e81a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2020-08-18T17:31:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T10:37:31.000Z", "max_issues_repo_path": "pose_tracking/human_tracking_3D.py", "max_issues_repo_name": "bsaisudh/proxemo", "max_issues_repo_head_hexsha": "7b09828c3b63b01617824c3b27a059584eb11ca4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-11-09T10:18:19.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-31T21:34:37.000Z", "max_forks_repo_path": "pose_tracking/human_tracking_3D.py", "max_forks_repo_name": "bsaisudh/proxemo", "max_forks_repo_head_hexsha": "7b09828c3b63b01617824c3b27a059584eb11ca4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2020-08-15T16:46:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-08T06:54:03.000Z", "avg_line_length": 33.9655172414, "max_line_length": 116, "alphanum_fraction": 0.5840464104, "include": true, "reason": "import numpy", "num_tokens": 1561}
|
%% Demo of *plot_littlewood_paley_1d*
%% Usage
% littlewood = *plot_littlewood_paley_1d*(filters) (see
% <matlab:doc('plot_littlewood_paley_1d') plot_littlewood_paley_1d>).
%
%% Description
% *plot_littlewood_paley* computes, at every frequency, the
% Littlewood-Paley sum of a filter bank, i.e. the total power spectral
% density
% \sum_{j, \theta} |\hat{\psi_j} (\omega)|^2 + |\hat{\phi_J}(\omega)|^2
% If this sum is between $(1-epsilon)$ and $1$ for small $epsilon,
% the associated wavelet transform is proved to be contractive and
% almost unitary.
% In this demo, we display the Littlewood-Paley sum of a dyadic Morlet
% wavelet filter bank, with a very low averaging size of 8 samples. The
% Littlewood-Paley sum is shown in red, while the lowpass filter phi and
% the bandpass filters psi are respectively shown in green and blue.
figure;
T = 2^3;
interpolation = 2^10;
filt_opt.Q = 1;
filt_opt.J = T_to_J(T,filt_opt);
dyadic_filters = morlet_filter_bank_1d(T*interpolation,filt_opt);
plot_littlewood_paley_1d(dyadic_filters);
title('Q = 1 ; T = 8 samples (interpolated)');
% A more realistic example is constructed with an averaging size of 4096
% samples and a quality factor of 8. These values are typical in audio
% signal processing. The lowpass filter has such a narrow bandwidth that it
% is almost not visible in this second plot.
figure;
T = 2^12;
filt_opt.Q = 8;
filt_opt.J = T_to_J(T,filt_opt);
audio_filters = morlet_filter_bank_1d(T,filt_opt);
plot_littlewood_paley_1d(audio_filters);
title('Q = 8 ; T = 4096 samples');
|
{"author": "scatnet", "repo": "scatnet", "sha": "59d935afa20359845282a3518134e24244862c1f", "save_path": "github-repos/MATLAB/scatnet-scatnet", "path": "github-repos/MATLAB/scatnet-scatnet/scatnet-59d935afa20359845282a3518134e24244862c1f/demo/display/demo_plot_littlewood_paley_1d.m"}
|
# ARIMA Model
from statsmodels.tsa.arima.model import ARIMA
import numpy as np
import pandas as pd
import datetime
class ArimaModel:
def __init__(self, order=(5,1,0)):
self.order = order
def predict_with_arima(self, dataset, code='PT', y='Total_Cases', days=5):
tmp = dataset[[y+code]]
history = [x for x in tmp.values]
news = []
data_mod = tmp.reset_index()
data_mod.columns = ["Date",y+code]
for i in range(days):
model = ARIMA(history[i:], order=self.order,enforce_stationarity=False)
model_fit = model.fit()
output = model_fit.forecast()
yhat = output[0]
history.append(np.array([round(yhat)]))
xn = datetime.datetime.strptime(data_mod.iloc[-1]['Date'], '%m/%d/%y') \
+ datetime.timedelta(days=i+1)
news.append(
pd.Series([xn.strftime("%m/%d/%y"), round(yhat)], index=data_mod.columns)
)
data_mod = pd.DataFrame(news)
data_mod.set_index('Date', inplace=True, drop=True)
data_mod.columns = ["ARIMA"+code]
return data_mod
|
{"hexsha": "33ff36794c444e4f7f28320c51ebbb93e6d84161", "size": 1173, "ext": "py", "lang": "Python", "max_stars_repo_path": "predictions/Models/ArimaModel.py", "max_stars_repo_name": "BrunoMartins11/covid-19-API", "max_stars_repo_head_hexsha": "d6f6c725688ad54007efafb6f01b7326126885d7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-04-29T11:02:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-20T18:15:50.000Z", "max_issues_repo_path": "predictions/Models/ArimaModel.py", "max_issues_repo_name": "BrunoMartins11/covid-19-API", "max_issues_repo_head_hexsha": "d6f6c725688ad54007efafb6f01b7326126885d7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "predictions/Models/ArimaModel.py", "max_forks_repo_name": "BrunoMartins11/covid-19-API", "max_forks_repo_head_hexsha": "d6f6c725688ad54007efafb6f01b7326126885d7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5142857143, "max_line_length": 89, "alphanum_fraction": 0.5720375107, "include": true, "reason": "import numpy,from statsmodels", "num_tokens": 279}
|
"""Communication logic for controlling a Hanover sign"""
import numpy as np
from pyflipdot.data import ImagePacket
class HanoverSign:
"""A Hanover sign
Attributes:
address (int): Address of the sign
flip (bool): True if the sign is upside-down
height (int): Pixel height of the sign
width (int): Pixel width of the sidn
"""
def __init__( # pylint: disable=too-many-arguments
self,
address: int,
width: int,
height: int,
flip: bool = False):
"""Constructor for a hanover sign
Args:
name (str): Friendly name
address (int): Address of the sign
width (int): Pixel width of the sign
height (int): Pixel height of the sign
flip (bool, optional): True if the sign is upside-down
"""
self.address = address
self.width = width
self.height = height
self.flip = flip
def to_image_packet(self, image_data: np.array) -> ImagePacket:
"""Produces a serial packet from an image
Args:
image_data (np.array): Image data
Returns:
ImagePacket: packet
Raises:
ValueError: Image incompatible with the sign
"""
# Check image is correct format for sign
(rows, columns) = image_data.shape
if (self.height != rows) or (self.width != columns):
raise ValueError(
"{}x{} image incompatible with sign ({}x{})".format(
columns, rows, self.width, self.height))
# Rotate image 180, if necessary
if self.flip:
image_data = np.rot90(image_data, 2)
return ImagePacket(self.address, image_data)
def create_image(self) -> np.ndarray:
"""Creates a blank image
Returns:
np.ndarray: The blank image
"""
return np.full((self.height, self.width), False)
|
{"hexsha": "e452aa5f8dea7be85c6c4715bb4e12302406eed6", "size": 1987, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyflipdot/sign.py", "max_stars_repo_name": "briggySmalls/hanover_flipdot", "max_stars_repo_head_hexsha": "2b14f57541eb039090527197f01cc3da004ab339", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-01-06T11:22:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-29T15:15:38.000Z", "max_issues_repo_path": "pyflipdot/sign.py", "max_issues_repo_name": "briggySmalls/hanover_flipdot", "max_issues_repo_head_hexsha": "2b14f57541eb039090527197f01cc3da004ab339", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2019-06-24T20:04:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-26T11:07:40.000Z", "max_forks_repo_path": "pyflipdot/sign.py", "max_forks_repo_name": "briggySmalls/hanover_flipdot", "max_forks_repo_head_hexsha": "2b14f57541eb039090527197f01cc3da004ab339", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-08T14:38:14.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-08T14:38:14.000Z", "avg_line_length": 28.7971014493, "max_line_length": 68, "alphanum_fraction": 0.5656768998, "include": true, "reason": "import numpy", "num_tokens": 431}
|
subroutine php_endite(a,itask)
!-----------------------------------------------------------------------
! DESCRIPTION
! This routine checks convergence and performs updates at:
! - itask=1 The end of an internal iteration
! - itask=2 The end of the internal loop iteration
!-----------------------------------------------------------------------
use typre
use def_parame
use Mod_PhysicalProblem
implicit none
class(PhysicalProblem) :: a
integer(ip) :: itask
call a%Timer%Endite%Tic
select case(itask)
case(0)
!Things to be done before the convergence check
call a%SpecificEndite(10)
!Compute convergence residual of the internal iteration
call a%Cvgunk(one)
call a%SpecificEndite(zero)
case(1)
call a%SpecificEndite(one)
case(2)
!Compute convergence residual of the external iteration
call a%Cvgunk(two)
call a%SpecificEndite(two)
case(4)
!Compute convergence residual of case coupling iteration
call a%Cvgunk(4)
call a%SpecificEndite(4)
end select
call a%Timer%Endite%Toc
end subroutine php_endite
|
{"hexsha": "40b0fcb7528cac2d91266f77d612ac9f6c6fcc1c", "size": 1186, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Sources/modules/PhysicalProblem/php_endite.f90", "max_stars_repo_name": "ciaid-colombia/InsFEM", "max_stars_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-24T08:19:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-24T08:19:54.000Z", "max_issues_repo_path": "Sources/modules/PhysicalProblem/php_endite.f90", "max_issues_repo_name": "ciaid-colombia/InsFEM", "max_issues_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Sources/modules/PhysicalProblem/php_endite.f90", "max_forks_repo_name": "ciaid-colombia/InsFEM", "max_forks_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3773584906, "max_line_length": 75, "alphanum_fraction": 0.5758853288, "num_tokens": 284}
|
[STATEMENT]
lemma sup_state_conv2:
"P \<turnstile> s1 \<le>\<^sub>i s2 = (P \<turnstile> fst s1 [\<le>] fst s2 \<and> P \<turnstile> snd s1 [\<le>\<^sub>\<top>] snd s2)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<turnstile> s1 \<le>\<^sub>i s2 = (P \<turnstile> fst s1 [\<le>] fst s2 \<and> P \<turnstile> snd s1 [\<le>\<^sub>\<top>] snd s2)
[PROOF STEP]
by (cases s1, cases s2) simp
|
{"llama_tokens": 188, "file": "JinjaThreads_BV_JVM_SemiType", "length": 1}
|
import numpy as np
import pandas as pd
from os.path import join as joinPaths
from os.path import isdir
from os.path import isfile
from os import listdir as ls
from IPython.display import display, Markdown, Latex
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib.pyplot import cm
from multiprocessing import Pool
from glob import glob
from os import path
import scipy
from scipy import integrate
from scipy.signal import butter, lfilter
# Definition of constants
# matplotlib
PLOTWIDTH = 16
PLOTHEIGHT = 9
DEBUG = False
# deprecated file format format for Data coming from Boxes with old firmware -> depends on number of columns
columns = [
"time",
"latitude",
"longitude",
"elevation",
"rot_x",
"rot_y",
"rot_z",
"acc_x",
"acc_y",
"acc_z",
"mag_x",
"mag_y",
"mag_z",
"roll",
"pitch",
"yaw",
]
columns2 = [
"time",
"runtime",
"gpstime",
"latitude",
"longitude",
"elevation",
"rot_x",
"rot_y",
"rot_z",
"acc_x",
"acc_y",
"acc_z",
"mag_x",
"mag_y",
"mag_z",
"roll",
"pitch",
"yaw",
]
### Data aggregation and cleaning
def readLogFile(
logFilePath,
columns=columns,
skipheader=3,
verbose=False,
lowMemory=True,
errorOnBadLine=False,
engine="python",
):
"""
readLogFile(logFilePath, columns=columns, skipheader=2, skipfooter=1):
opens the given path, tries to read in the data, convert it to a dataframe
and append it.
returns a dataframe containing the data from a given csv file
"""
if verbose: print("processing file: {}".format(logFilePath))
if not isfile(logFilePath):
print("no such file: {} -> skipping".format(logFile))
return None
try:
tempDataFrame = pd.read_csv(
logFilePath,
skiprows=skipheader,
names=columns,
low_memory=lowMemory,
error_bad_lines=errorOnBadLine,
skipfooter=1,
engine=engine,
)
if verbose: print(tempDataFrame.info())
except:
print("could not process file: {}, skipping".format(logFilePath))
return None
return tempDataFrame
def cleanDataFrame(
df,
roundTimeStamp=False,
toDateTime=True,
dateTimeIndex = True,
replaceNan=True,
verbose=False,
correctTimeByGPS=True,
timeZone="Europe/Berlin",
dropDuplicateIndices=True,
):
if df.empty:
print("empty dataframe, skipping!")
return pd.DataFrame()
# convert relevant columns to strings
if replaceNan:
if verbose: print("cleaning NaNs")
df.fillna(method="ffill", inplace=True)
if roundTimeStamp:
if verbose: print("rounding time")
df["time"].round(roundTimeStamp)
if toDateTime:
if verbose: print("converting timestamps")
df["time"] = pd.to_datetime(df["time"], unit="s", utc=True)
if dateTimeIndex:
if verbose: print("converting timestamps to index")
df.set_index("time", inplace=True)
if correctTimeByGPS:
if verbose: print("correcting time stamp via GPS")
if len(df.columns) == 17: # only log file version to is egligible to gps time correction
if not GPSDateTimeCorrection(df, verbose=False):
return pd.DataFrame()
if timeZone:
try:
if verbose: print("converting time zone to: {}".format(timeZone))
df.index = df.index.tz_convert(timeZone)
except:
print("could not convert time zone to {}".format(timeZone))
if dropDuplicateIndices:
if verbose: print("dropping duplicate indices")
df = df.loc[~df.index.duplicated(keep='first')]
return df
def concatDataFiles(dataDir, cols, pattern="log_0???.txt"):
tempData = pd.DataFrame()
frames = list() # list holding all the dataframe
for dataFile in sorted(glob(path.join(dataDir, pattern))):
print(dataFile)
tempData = readLogFile(dataFile, verbose=False, columns=cols) # read in the dataFile
if tempData.empty:
print("skipping corrupt file: {}".format(dataFile))
continue
tempData = cleanDataFrame(tempData, verbose=False) # clean it -> generate index, etc.
if not tempData.empty: # append the dataframes to the global dataframe
frames.append(tempData)
if not len(frames) > 0:
print("no files found")
return None
return pd.concat(frames)
### new parallel processing of logfiles
def processDataFile(dataFile, cols=columns2, verbose=False):
tempData = pd.DataFrame()
if not isfile(dataFile):
print("not a file: {}, skipping".format(dataFile))
return pd.DataFrame()
tempData = readLogFile(dataFile, verbose=verbose, columns=cols)
if tempData.empty:
print("skipping corrupt file: {}".format(dataFile))
return pd.DataFrame()
tempData = cleanDataFrame(tempData, verbose=verbose) # clean it -> generate index, etc.
if not tempData.empty: # append the dataframes to the global dataframe
return tempData
def processDataSet_parallel(dataSet, pickleName=None, pattern = "log_0???.txt", nProcs = 32, verbose=False, substractMean=True):
if not isdir(dataSet):
print("*! not a directory, skipping")
return pd.DataFrame()
if verbose: print("* processing: {}".format(dataSet))
cols = checkLogFileVersion(dataSet, [columns, columns2])
if verbose: print("* file version checked: {}".format(cols))
pool = Pool(nProcs)
frames = list()
if verbose: print("* iterating over files")
for dfile in sorted(glob(path.join(dataSet, pattern))):
frameData = pool.apply_async(processDataFile,(dfile, cols, verbose))
frames.append(frameData)
pool.close()
pool.join()
if not len(frames) > 0:
print("*! no files found")
return pd.DataFrame()
data = pd.concat([d.get() for d in frames])
if substractMean:
if verbose: print("* substracting mean")
for comp in ("acc_x", "acc_y", "acc_z"):
try:
data[comp] -= np.mean(data[comp])
except:
print("*! could not calculate mean, data cleaning needed!")
continue
if pickleName:
if verbose: print("* exporting pickle {}".format(pickleName))
try:
data.to_pickle(path.join(dataSet, "{}".format(pickleName)))
except:
print("*! failed to export pickle!")
return data
### Functions for analysis
def fftTimeSeries(data, newFigure=True, label=None):
"""
performs a fft on the given data and plots it.
returns peak frequency
"""
if newFigure:
plt.figure()
deltaT = data.index.to_series().diff()
deltaTMean = np.mean(deltaT) / np.timedelta64(1, 's')
print(np.mean(deltaT) / np.timedelta64(1, 's'))
FFT = scipy.fftpack.fft(data)
PSD = np.abs(FFT) ** 2
Frequency = scipy.fftpack.fftfreq(len(data), deltaTMean)
Frequency_i = Frequency > 0
if label:
plt.plot(Frequency[Frequency_i], PSD[Frequency_i], label=label)
else:
plt.plot(Frequency[Frequency_i], PSD[Frequency_i])
plt.xlabel("Frequency"); plt.ylabel("Power Spectrum Density")
return Frequency[np.argmax(PSD)]
def butter_bandpass(lowcut, highcut, fs, order=5):
"""
generates a butter bandpass filter object
"""
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
"""
appliess a butter bandpass filter to the given dataDir
"""
b, a = butter_bandpass(lowcut, highcut, fs, order=order)
y = lfilter(b, a, data)
return y
def integrateVelocityAcceleration(df,
verbose=False,
resampleInterval="30ms",
filterLowCut=0.1,
filterHighCut=1,
filterFrequency=33.333,
filterOrder=3,
calculateDeflection=True,
components = ("x", "y", "z"),
applyG=True,
):
g = 9.80665
data = pd.DataFrame()
"""
1. resample
2. filter
3. integrate
if applyG:
if verbose: print("> applying g")
for comp in components:
df["acc_{}".format(comp)] = df["acc_{}".format(comp)]*g
# add raw components to data frame
for comp in components:
data.insert(column="acc_{}".format(comp),
value=df["acc_{}".format(comp)],
loc=len(data.columns)
)
"""
# resample data
if verbose: print("* resampling data to {}. Start time: {}".format(resampleInterval, df.index[0]))
# resample data and multiply with g!
for comp in components:
data.insert(column="acc_{}r".format(comp),
value=df["acc_{}".format(comp)].resample(resampleInterval).bfill()*g,
loc=len(data.columns)
)
# time ins seconds, resampled -> used for integration
t = data.index.astype(np.int64)/10**9
if verbose: print("* applying filter with order = {} frequency = {} lowcut = {} highcut = {}".format(filterOrder,
filterFrequency,
filterLowCut,
filterHighCut,
))
for comp in components:
data.insert(column="acc_{}rf".format(comp),
value=butter_bandpass_filter(data["acc_{}r".format(comp)],
filterLowCut,
filterHighCut,
filterFrequency,
order=filterOrder),
loc=len(data.columns)
)
if verbose: print("* integrating acceleration")
for comp in components:
if verbose: print("* acceleration {}".format(comp.upper()))
# integrate filtered acceleration
data.insert(column="vel_{}".format(comp),
value=integrate.cumtrapz(data["acc_{}rf".format(comp)], t, initial=0),
loc=len(data.columns)
)
if verbose: print("* integrating velocity")
for comp in components:
if verbose: print("* velocity {}".format(comp.upper()))
# integrate velocity to yield position
data.insert(column="pos_{}".format(comp),
value=integrate.cumtrapz(data["vel_{}".format(comp)], t, initial=0),
loc=len(data.columns)
)
if calculateDeflection:
if verbose: print("* calculating deflection")
data.insert(column = "deflection",
value = np.sqrt(np.power(data.pos_z, 2) + np.power(data.pos_x, 2)),
loc = len(data.columns),
)
return data
def applyIntegration_parallel(dataset,
verbose=False,
nProcs=32,
integrationInterval="10min",
resampleInterval="30ms",
filterLowCut=0.1,
filterHighCut=1,
filterFrequency=30,
filterOrder=3,
calculateDeflection=True,
components = ("x", "y", "z"),
applyG=True,
):
# create a pool of workers
pool = Pool(nProcs)
frames = list()
if verbose: print("* integration interval set to {}. Starting integration with {} threads".format(integrationInterval, nProcs))
## iterate over the sample intervalls and enable parallel integration
for t, dataSample in dataset.resample(integrationInterval):
if verbose: print("* integration start: {}".format(t))
frames.append(
pool.apply_async(
integrateVelocityAcceleration, (dataSample,
verbose,
resampleInterval,
filterLowCut,
filterHighCut,
filterFrequency,
filterOrder,
calculateDeflection,
components
)))
pool.close()
pool.join()
frames = pd.concat([d.get() for d in frames])
return frames
def applyIntegration(dataset,
verbose=False,
integrationInterval="10min",
resampleInterval="30ms",
filterLowCut=0.1,
filterHighCut=1,
filterFrequency=30,
filterOrder=3,
calculateDeflection=True,
components = ("x", "y", "z"),
applyG=True,
):
frames = list()
if verbose: print("* integration interval set to {}".format(integrationInterval))
## iterate over the sample intervalls and enable parallel integration
for t, dataSample in dataset.resample(integrationInterval):
if verbose: print("* integration start: {}".format(t))
frames.append(integrateVelocityAcceleration(dataSample,
verbose,
resampleInterval,
filterLowCut,
filterHighCut,
filterFrequency,
filterOrder,
calculateDeflection,
components
))
frames = pd.concat(frames)
return frames
def correctTime(df, runTime, gpsTimeStamp, verbose=False):
powerOnTimeUnix = gpsTimeStamp - runTime
powerOnTime = pd.to_datetime(powerOnTimeUnix, unit="s", utc=True)
if verbose: print("power on time: {}".format(powerOnTime))
correctedTime = (df.index - df.index[0]) + powerOnTime
if verbose: print("corrected power on time series: {}".format(correctedTime))
if verbose: print("inserting as new index.. ")
df.reset_index()
df.insert(loc=0, column="truetime", value=correctedTime)
df.set_index("truetime", inplace=True)
if verbose: print(df.head())
if verbose: print("done")
def GPSDateTimeCorrection(df, verbose=False):
"""
this function extracts the last valid time stamp and the corresponding run time of the box
and corrects the time index of the given data frame
"""
try:
"""
this method has a know edge case: if the last available time stamp has a time lock,
but no date lock, the time stamp might look something like this:
2000-00-00-12-13-14
which fails later in the programm when trying to generate a valid datetime object from
the time stamp (line 482). This is currently caught via an exception, however, this is far from ideal.
As there is currently no easy fix, the whole concept should be re-evaluated
"""
lastUniqueGPSTimeStamp = pd.unique(
df.loc[(df.gpstime != "0000-00-00-00-00-00") &
(df.gpstime != "2000-00-00-00-00-00")
].gpstime)[-1]
except:
print("no GPS time stamp available, skipping")
return False
runTime = df.loc[df.gpstime == lastUniqueGPSTimeStamp].runtime[0] / 1000.0 # convert to seconds!
runTimeZero = df.runtime[0]/1000.0
deltaRunTime = runTime - runTimeZero
gpsTime = df.loc[df.gpstime == lastUniqueGPSTimeStamp].gpstime[0]
if verbose: print("found time stamp: {} runtime: {}, run time since beginning: {}".format(gpsTime, runTime, (runTime - runTimeZero)))
date = gpsTime.split("-")[:3]
time = gpsTime.split("-")[3:]
try:
gpsDateTime = pd.to_datetime("{} {}".format("-".join(date), ":".join(time)), utc=True).value / 10**9
except Exception as e:
print("failed to generate gpsDateTime for {} : {}".format(date, time))
print("skipping dataframe")
return False
if verbose: print("correcting time")
correctTime(df, runTime=deltaRunTime, gpsTimeStamp=gpsDateTime)
return True
def checkLogFileVersion(logFileDir, cols, verbose=False):
"""
checks the row length of log_0000.txt in a given directory to parse the log file version
Two log file versions are available:
- Version 1: normal log file format
- Version 2: log including GPS timestamp
return: the correct columns to use
"""
# find a suitable log file
logFilePath = glob(path.join(logFileDir, "log_????.txt"))[0]
if path.isfile(logFilePath):
with open(logFilePath) as logFile:
for i, line in enumerate(logFile):
if i == 3: # first line = header, second line = overflow from last file -> hence third line used to check for file version
if len(line.split(",")) == 18:
if verbose: print("file version 2")
return cols[1]
elif len(line.split(",")) == 16:
if verbose: print("file version 1")
return cols[0]
else:
print("wrong number of columns in file {}".format(logFilePath))
break
else:
raise Exception("no such file or directory: {}".format(logFilePath))
|
{"hexsha": "f8f5648ef00f1434032b135d8a990b85579e5f63", "size": 18859, "ext": "py", "lang": "Python", "max_stars_repo_path": "yasb/bikbox.py", "max_stars_repo_name": "k323r/YASB-tools", "max_stars_repo_head_hexsha": "581dfd8979e043c8c08b138d1fe1028a10a688c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "yasb/bikbox.py", "max_issues_repo_name": "k323r/YASB-tools", "max_issues_repo_head_hexsha": "581dfd8979e043c8c08b138d1fe1028a10a688c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "yasb/bikbox.py", "max_forks_repo_name": "k323r/YASB-tools", "max_forks_repo_head_hexsha": "581dfd8979e043c8c08b138d1fe1028a10a688c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4379432624, "max_line_length": 139, "alphanum_fraction": 0.5401134737, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 3936}
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This script contains basic functions for Conv Neural Nets.
foward conv and pooling
backward conv and pooling
@author: xuping
"""
import numpy as np
import h5py
import matplotlib.pyplot as plt
def Conv_forward(A_prev, W, b, para):
'''
This is the forward propagation for a convolution layer
Input: output from previous layer A_prev (m, H_prev, W_prev, C_prev)
W --- weights, (f,f, C_prev, C)
b --- bias,
para --- contains "stride" and "pad"
return the conv output Z(m, H, W, C), cache for backpropagation
'''
(m, H_prev, W_prev, C_prev) = A_prev.shape
(f, f, C_prev, C) = W.shape
stride = para["stride"]
pad = ["pad"]
H = int((H_prev - f + 2 * pad) / stride + 1)
W = int((W_prev - f + 2 * pad) / stride + 1)
Z = np.zeros((m, H, W, C))
# padding the input
A_prev_pad = np.pad(A_prev, ((0,0), (pad,pad), (pad,pad), (0,0)), 'constant', constant_value=(0,0))
# loop all dimension
for i in range(m):
a_prev_pad = A_prev_pad[i,:,:,:] # extract the i-th training example
for h in range(H):
for w in range(W):
for c in range(C):
hstart = stride * h
hend = hstart + f
wstart = stride * w
wend = wstart + f
# extract the slice for Conv
a_slice = a_prev_pad[hstart:hend, wstart:wend, :]
# Conv step
Z[i,h,w,c] = np.sum(a_slice * W[:,:,:,c]) + b[:,:,:,c]
#end for loop
assert(Z.shape == (m, H, W, C))
# save in cache for backprop
cache = (A_prev, W, b, para)
return Z, cache
def Pool_forward(A_prev, para, mode="max"):
'''
forward progation of pooling layer
Input: A_prev(m, H_prev, W_prev, C_prev)
para -- parameters
mode -- max pooling or average
output: pooling output layer A(m, H, W, C)
'''
(m, H_prev, W_prev, C_prev) = A_prev.shape
f = para["f"]
stride = para["stride"]
H = int((H_prev - f) / stride + 1)
W = int((W_prev - f) / stride + 1)
C = C_prev
# initialize output A
A = np.zeros((m, H, W, C))
# loop each dimension
for i in range(m):
for h in range(H):
for w in range(W):
for c in range(C):
hstart = stride * h
hend = hstart + f
wstart = stride * w
wend = wstart + f
# extract the slice from A_prev
a_slice = A_prev[i, hstart:hend, wstart:wend, c]
if mode == "max":
A[i,h,w,c] = np.max(a_slice)
elif mode == "average":
A[i,h,w,c] = np.mean(a_slice)
# end for loop
assert(A.shape == (m, H, W, C))
cache = (A_prev, para)
return A, cache
def Conv_backward(dZ, cache):
'''
the backward propgation of Conv Layer
Input: dZ -- gradient of the cost wrt the OUTPUT of Conv Layer Z (m, H, W, C)
cache -- stored data from forward prop
Output: dA -- gradient of the cost wrt INPUT of Conv layer A_prev (m, H_prev, W_prev, C_prev)
dW -- gradient wrt weights of the Conv layer W(f, f, C_prev, C)
db -- gradient wrt biases b(1,1,1,C)
'''
# get all the dimensions from previous data
(A_prev, W, b, para) = cache
(m, H_prev, W_prev, C_prev) = A_prev.shape
(f, f, C_prev, C) = W.shape
stride = para["stride"]
pad = para["pad"]
(m, H, W, C) = dZ.shape
#intialize all the gradients
dA_prev = np.zeros((m, H_prev, W_prev, C_prev))
dW = np.zeros((f, f, C_prev, C))
db = np.zeros(b.shape)
#padding the data
A_prev_pad = np.pad(A_prev, ((0,0), (pad,pad), (pad,pad),(0,0)),'constant', constant_value=(0,0))
dA_prev_pad = np.pad(dA_prev, ((0,0), (pad,pad), (pad,pad),(0,0)),'constant', constant_value=(0,0))
#loop all the dimensions
for i in range(m):
a_prev = A_prev_pad[i,:,:,:]
da_prev = dA_prev_pad[i,:,:,:]
for h in range(H):
for w in range(W):
for c in range(C):
# define the corner of the slice
hstart = stride * h
hend = hstart + f
wstart = stride * w
wend = wstart + f
#extract slice
a_slice = a_prev[hstart:hend, wstart:wend, :]
# compute the derivate
da_prev[hstart:hend, wstart:wend,:] += W[:,:,:,c]*dZ[i,h,w,c]
dW[:,:,:,c] += a_slice*dZ[i,h,w,c]
db[:,:,:,c] += dZ[i,h,w,c]
#remove pad from the local derivative slice
dA_prev[i,:,:,:] = da_prev[pad:-pad, pad:-pad, :]
#end for loop
assert(dA_prev.shape == (m, H, W, C))
return dA_prev, dW, db
def Pooling_backward(dA, cache, mode="max"):
"""
Find gradients through backward prop of the pooling layer
Input: dA -- gradients wrt OUTPUT of the pooling layer
cache -- stored output data from forward prop
mode -- max pooling or average
Output: dA_prev -- the gradient wrt the INPUT of the pooling layer
"""
(A_prev, para) = cache
stride = para["stride"]
f = para["f"]
m, H_prev, W_prev, C_prev = A_prev.shape
m, H, W, C = dA.shape
#Initialize dA_prev with zeros
dA_prev = np.zeros((m, H_prev, W_prev, C_prev))
#loop all the dimensions
for i in range(m):
# extract the training exmaple from A_prev
a_prev = A_prev[i,:,:,:]
for h in range(H):
for w in range(W):
for c in range(C):
# define the corner of the slice
hstart = stride * h
hend = hstart + f
wstart = stride * w
wend = wstart + f
# compute the backprop
if mode == "max":
# extract the slice
a_slice = a_prev[hstart:hend, wstart:wend, c]
# create mask for the slice matrix
mask = (a_slice == np.max(a_slice))
# compute derivative
dA_prev[i, hstart:hend, wstart:wend, c] += mask*dA[i,h,w,c]
elif mode == "average":
# get the value
da = dA[i,h,w,c]
# compute the derivative
dA_prev[i, hstart:hend, wstart:wend, c] += da/(f+f)*np.ones((f,f))
# end loop
assert(dA_prev.shape == A_prev.shape)
return dA_prev
|
{"hexsha": "20b85e774f4f333362b32f59f4d25bf560a3cebc", "size": 6997, "ext": "py", "lang": "Python", "max_stars_repo_path": "NN_buildingblock/ConvNN.py", "max_stars_repo_name": "xupingxie/deep-learning-models", "max_stars_repo_head_hexsha": "cc76aedf9631317452f9cd7df38998e2de727816", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "NN_buildingblock/ConvNN.py", "max_issues_repo_name": "xupingxie/deep-learning-models", "max_issues_repo_head_hexsha": "cc76aedf9631317452f9cd7df38998e2de727816", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "NN_buildingblock/ConvNN.py", "max_forks_repo_name": "xupingxie/deep-learning-models", "max_forks_repo_head_hexsha": "cc76aedf9631317452f9cd7df38998e2de727816", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.319047619, "max_line_length": 103, "alphanum_fraction": 0.4899242533, "include": true, "reason": "import numpy", "num_tokens": 1879}
|
// (C) Copyright Edward Diener 2011
// Use, modification and distribution are subject to the Boost Software License,
// Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt).
#if !defined(TTI_HAS_TYPE_HPP)
#define TTI_HAS_TYPE_HPP
#include <boost/preprocessor/cat.hpp>
#include <boost/tti/gen/has_type_gen.hpp>
#include <boost/tti/gen/namespace_gen.hpp>
#include <boost/tti/detail/dtype.hpp>
#include <boost/tti/detail/dnotype.hpp>
/*
The succeeding comments in this file are in doxygen format.
*/
/** \file
*/
/// Expands to a metafunction which tests whether an inner type with a particular name exists and optionally is the same as a particular type.
/**
trait = the name of the metafunction within the tti namespace.
name = the name of the inner type.
generates a metafunction called "trait" where 'trait' is the macro parameter.
template<class TTI_T,class TTI_U>
struct trait
{
static const value = unspecified;
typedef mpl::bool_<true-or-false> type;
};
The metafunction types and return:
TTI_T = the enclosing type in which to look for our 'name'.
TTI_U = the type of the inner type named 'name' as an optional parameter.
returns = 'value' is true if the 'name' type exists within the enclosing type TTI_T
and, if type TTI_U is specified, the 'name' type is the same as the type TTI_U,
otherwise 'value' is false.
*/
#define BOOST_TTI_TRAIT_HAS_TYPE(trait,name) \
BOOST_TTI_DETAIL_TRAIT_HAS_TYPE(trait,name) \
template<class TTI_T,class TTI_U = BOOST_TTI_NAMESPACE::detail::notype> \
struct trait : \
BOOST_PP_CAT(trait,_detail) \
< \
TTI_T, \
TTI_U, \
typename BOOST_PP_CAT(trait,_detail_mpl)<TTI_T>::type \
> \
{ \
}; \
/**/
/// Expands to a metafunction which tests whether an inner type with a particular name exists and optionally is a particular type.
/**
name = the name of the inner type.
generates a metafunction called "has_type_name" where 'name' is the macro parameter.
template<class TTI_T,class TTI_U>
struct has_type_name
{
static const value = unspecified;
typedef mpl::bool_<true-or-false> type;
};
The metafunction types and return:
TTI_T = the enclosing type in which to look for our 'name'.
TTI_U = the type of the inner type named 'name' as an optional parameter.
returns = 'value' is true if the 'name' type exists within the enclosing type TTI_T
and, if type TTI_U is specified, the 'name' type is the same as the type TTI_U,
otherwise 'value' is false.
*/
#define BOOST_TTI_HAS_TYPE(name) \
BOOST_TTI_TRAIT_HAS_TYPE \
( \
BOOST_TTI_HAS_TYPE_GEN(name), \
name \
) \
/**/
#endif // TTI_HAS_TYPE_HPP
|
{"hexsha": "40ac5f15e910e5aa9db67ab3e915ce5044ea68b8", "size": 3195, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/tti/has_type.hpp", "max_stars_repo_name": "juslee/boost-svn", "max_stars_repo_head_hexsha": "6d5a03c1f5ed3e2b23bd0f3ad98d13ff33d4dcbb", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "boost/tti/has_type.hpp", "max_issues_repo_name": "juslee/boost-svn", "max_issues_repo_head_hexsha": "6d5a03c1f5ed3e2b23bd0f3ad98d13ff33d4dcbb", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "boost/tti/has_type.hpp", "max_forks_repo_name": "juslee/boost-svn", "max_forks_repo_head_hexsha": "6d5a03c1f5ed3e2b23bd0f3ad98d13ff33d4dcbb", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.95, "max_line_length": 142, "alphanum_fraction": 0.6115805947, "num_tokens": 735}
|
"""Truncated singular value decomposition."""
import logging
import numpy as np
import optht
import sklearn.base
from scipy import linalg
log = logging.getLogger(__name__)
class Tsvd(sklearn.base.BaseEstimator):
"""Truncated singular value decomposition.
Attributes
----------
left_singular_vectors_ : np.ndarray
Left singular vectors.
singular_values_ : np.ndarray
Singular values.
right_singular_vectors_ : np.ndarray
Right singular vectors.
n_features_in_ : int
Number of features input.
"""
def __init__(self,
truncation: str = 'economy',
truncation_param: float = None) -> None:
"""Instantiate :class:`Tsvd`.
Parameters
----------
truncation : str
Truncation method. Possible values are
- ``'economy'`` -- do not truncate (use economy SVD),
- ``'unknown_noise'``-- truncate using optimal hard truncation
[optht]_ with unknown noise,
- ``'known_noise'`` -- truncate using optimal hard truncation
[optht]_ with known noise,
- ``'cutoff'`` -- truncate singular values smaller than a cutoff,
or
- ``'rank'`` -- truncate singular values to a fixed rank.
truncation_param : float
Parameter whose interpretation is based on the truncation method.
For each truncation method, ``truncation_param`` is interpreted as
- ``'economy'`` -- ignored,
- ``'unknown_noise'``-- ignored,
- ``'known_noise'`` -- known noise magnitude,
- ``'cutoff'`` -- singular value cutoff, or
- ``'rank'`` -- desired rank.
Notes
-----
Optimal hard truncation [optht]_ assumes the noisy measured matrix
``X_measured`` is composed of::
X_measured = X_true + noise_magnitude * X_noise
where ``X_noise`` is composed of i.i.d. Gaussian variables with zero
mean and unit variance.
Warnings
--------
Does not consider episode features!
Examples
--------
SVD with no truncation
>>> tsvd = pykoop.Tsvd()
>>> tsvd.fit(X_msd)
Tsvd()
>>> tsvd.singular_values_
array([...])
SVD with cutoff truncation
>>> tsvd = pykoop.Tsvd(truncation='cutoff', truncation_param=1e-3)
>>> tsvd.fit(X_msd)
Tsvd(truncation='cutoff', truncation_param=0.001)
>>> tsvd.singular_values_
array([...])
SVD with manual rank truncation
>>> tsvd = pykoop.Tsvd(truncation='rank', truncation_param=2)
>>> tsvd.fit(X_msd)
Tsvd(truncation='rank', truncation_param=2)
>>> tsvd.singular_values_
array([...])
"""
self.truncation = truncation
self.truncation_param = truncation_param
def fit(self, X: np.ndarray, y: np.ndarray = None) -> 'Tsvd':
"""Compute the truncated singular value decomposition.
Parameters
----------
X : np.ndarray
Data matrix.
y : np.ndarray
Ignored.
Returns
-------
Tsvd
Instance of itself.
Raises
------
ValueError
If any of the constructor parameters are incorrect.
"""
X = sklearn.utils.validation.check_array(X)
self.n_features_in_ = X.shape[1]
# Check param value
valid_methods_noth = ['economy', 'unknown_noise']
valid_methods_th = ['known_noise', 'cutoff', 'rank']
valid_methods = valid_methods_noth + valid_methods_th
if ((self.truncation_param is None)
and (self.truncation in valid_methods_th)):
raise ValueError('`truncation_param` must be specified for method '
f'{self.truncation}.')
if (self.truncation_param is not None) and (self.truncation_param < 0):
raise ValueError('`truncation_param` must be greater than zero.')
if self.truncation not in valid_methods:
raise ValueError(f'`method` must be one of {valid_methods}.')
# Compute SVDs
Q, sig, Zh = linalg.svd(X, full_matrices=False)
# Transpose notation to make checking math easier
Z = Zh.T
# Truncate SVD
if self.truncation == 'economy':
rank = sig.shape[0]
elif self.truncation == 'unknown_noise':
rank = optht.optht(X.T, sig)
elif self.truncation == 'known_noise':
rank = optht.optht(X.T, sig, self.truncation_param)
elif self.truncation == 'cutoff':
greater_than_cutoff = np.where(sig > self.truncation_param)
if greater_than_cutoff[0].size > 0:
rank = np.max(greater_than_cutoff) + 1
else:
rank = 0
elif self.truncation == 'rank':
rank = self.truncation_param
else:
# Already checked
assert False
Q_r = Q[:, :rank]
sig_r = sig[:rank]
Z_r = Z[:, :rank]
stats = {
'method': self.truncation,
'shape': X.shape,
'rank': sig.shape[0],
'reduced_rank': rank,
'max_sv': f'{np.max(sig):.2e}',
'min_sv': f'{np.min(sig):.2e}',
'reduced_min_sv': f'{np.min(sig_r):.2e}',
}
log.info(f'``Tsvd.fit()`` stats: {stats}')
self.left_singular_vectors_ = Q_r
self.singular_values_ = sig_r
self.right_singular_vectors_ = Z_r
return self
|
{"hexsha": "08083d9a92c683b14e2c335a51b05ccd8c8ec2f5", "size": 5659, "ext": "py", "lang": "Python", "max_stars_repo_path": "pykoop/tsvd.py", "max_stars_repo_name": "decarsg/pykoop", "max_stars_repo_head_hexsha": "6a8b7c83bdc7de3419e2fac48c1035fa06966e24", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-10-18T21:49:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T16:06:01.000Z", "max_issues_repo_path": "pykoop/tsvd.py", "max_issues_repo_name": "decarsg/pykoop", "max_issues_repo_head_hexsha": "6a8b7c83bdc7de3419e2fac48c1035fa06966e24", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-10-19T18:02:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T18:45:37.000Z", "max_forks_repo_path": "pykoop/tsvd.py", "max_forks_repo_name": "decarsg/pykoop", "max_forks_repo_head_hexsha": "6a8b7c83bdc7de3419e2fac48c1035fa06966e24", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-08T14:59:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T14:59:33.000Z", "avg_line_length": 32.710982659, "max_line_length": 79, "alphanum_fraction": 0.5592860929, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1352}
|
import numpy as np
from mujoco_worldgen.util.rotation import quat_mul, quat_conjugate
def dist_pt_to_cuboid(pt1, cuboid_center, cuboid_dims, cuboid_quat):
'''
This function calculates the shortest distance between test points
and cuboids at arbitrary locations, widths and rotations
Args:
pt1 (num points x 3): test point positions
cuboid_center (num cuboids x 3): cuboid centers
cuboid_dims (num cuboids x 3): cuboid half-width
cuboid_quat (num cuboids x 4): cuboid quaternion
Returns:
Distance array of size num points x num cuboids
'''
assert cuboid_center.shape[0] == cuboid_dims.shape[0] == cuboid_quat.shape[0], \
"First dimension of cuboid_center, cuboid_dims and cuboid_quat need to match, " + \
f"but were {cuboid_center.shape[0]}, {cuboid_dims.shape[0]} and {cuboid_quat.shape[0]}."
assert pt1.shape[1] == cuboid_center.shape[1] == cuboid_dims.shape[1] == 3, \
"Second dimension of pt1, cuboid_center and cuboid_dims needs to be 3, " + \
f"but were {pt1.shape[1]}, {cuboid_center.shape[1]} and {cuboid_dims.shape[1]}."
assert cuboid_quat.shape[1] == 4, \
f"Second dimension of cuboid_quat needs to be 4, but was {cuboid_quat.shape[1]}."
# calculate relative position of test points
rel_pos = pt1[:, None, :] - cuboid_center[None, :, :]
# convert into quaternion (leading dimension is zero)
q_rel_pos = np.concatenate([np.zeros_like(rel_pos[:, :, [0]]), rel_pos], axis=-1)
# broadcast cuboid_quat by hand
cuboid_quat = np.repeat(cuboid_quat[None, :], pt1.shape[0], axis=0)
# rotate relative position in cuboid frame
# since cuboid_quat specifies how the cuboid is rotated wrt to the standard coordinate system,
# we need to rotate the test points using the inverse rotation (i.e. conjugate quaternion)
#
# For rotation of vectors using quaternions see
# https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation
q_rel_pos = quat_mul(quat_conjugate(cuboid_quat), quat_mul(q_rel_pos, cuboid_quat))
# now we can pretend that the cuboid is aligned to x-axis
# calculate vector to closest point on the cuboid
# this can be done as described here:
# https://gamedev.stackexchange.com/questions/44483/how-do-i-calculate-distance-between-a-point-and-an-axis-aligned-rectangle
dist_vec = np.maximum(0, np.abs(q_rel_pos[:, :, 1:]) - cuboid_dims[None, :, :])
# distance is length of distance vector
dist = np.linalg.norm(dist_vec, axis=-1)
return dist
|
{"hexsha": "fb252d6d305e96f6785b09b475b0c962b5f6a9e0", "size": 2602, "ext": "py", "lang": "Python", "max_stars_repo_path": "mae_envs/util/geometry.py", "max_stars_repo_name": "bglick13/multi-agent-emergence-environments", "max_stars_repo_head_hexsha": "e02d66f0734d95470d15a4508ff369a75fa093a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1317, "max_stars_repo_stars_event_min_datetime": "2019-09-17T15:50:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T18:24:24.000Z", "max_issues_repo_path": "mae_envs/util/geometry.py", "max_issues_repo_name": "jihan1218/multi-agent-predator-prey", "max_issues_repo_head_hexsha": "ebf11e601de07e80c27c87dc41837d91f53e9465", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 35, "max_issues_repo_issues_event_min_datetime": "2019-09-20T11:36:20.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:24:27.000Z", "max_forks_repo_path": "mae_envs/util/geometry.py", "max_forks_repo_name": "jihan1218/multi-agent-predator-prey", "max_forks_repo_head_hexsha": "ebf11e601de07e80c27c87dc41837d91f53e9465", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 279, "max_forks_repo_forks_event_min_datetime": "2019-09-18T00:14:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T09:39:12.000Z", "avg_line_length": 47.3090909091, "max_line_length": 129, "alphanum_fraction": 0.6898539585, "include": true, "reason": "import numpy", "num_tokens": 709}
|
import numpy as np
import torch
from . import nms_cuda, nms_cpu
from .soft_nms_cpu import soft_nms_cpu
def nms(dets, iou_thr, device_id=None):
"""Dispatch to either CPU or GPU NMS implementations.
The input can be either a torch tensor or numpy array. GPU NMS will be used
if the input is a gpu tensor or device_id is specified, otherwise CPU NMS
will be used. The returned type will always be the same as inputs.
Arguments:
dets (torch.Tensor or np.ndarray): bboxes with scores.
iou_thr (float): IoU threshold for NMS.
device_id (int, optional): when `dets` is a numpy array, if `device_id`
is None, then cpu nms is used, otherwise gpu_nms will be used.
Returns:
tuple: kept bboxes and indice, which is always the same data type as
the input.
"""
# convert dets (tensor or numpy array) to tensor
if isinstance(dets, torch.Tensor):
is_numpy = False
dets_th = dets
elif isinstance(dets, np.ndarray):
is_numpy = True
device = 'cpu' if device_id is None else 'cuda:{}'.format(device_id)
dets_th = torch.from_numpy(dets).to(device)
else:
raise TypeError(
'dets must be either a Tensor or numpy array, but got {}'.format(
type(dets)))
# execute cpu or cuda nms
if dets_th.shape[0] == 0:
inds = dets_th.new_zeros(0, dtype=torch.long)
else:
if dets_th.is_cuda:
if dets_th.shape[1] == 7:
inds = nms_cuda.nms_3d(dets_th, iou_thr)
elif dets_th.shape[1] == 5:
inds = nms_cuda.nms(dets_th, iou_thr)
else:
inds = nms_cpu.nms(dets_th, iou_thr)
if is_numpy:
inds = inds.cpu().numpy()
return dets[inds, :], inds
def soft_nms(dets, iou_thr, method='linear', sigma=0.5, min_score=1e-3):
breakpoint()
if isinstance(dets, torch.Tensor):
is_tensor = True
dets_np = dets.detach().cpu().numpy()
elif isinstance(dets, np.ndarray):
is_tensor = False
dets_np = dets
else:
raise TypeError(
'dets must be either a Tensor or numpy array, but got {}'.format(
type(dets)))
method_codes = {'linear': 1, 'gaussian': 2}
if method not in method_codes:
raise ValueError('Invalid method for SoftNMS: {}'.format(method))
new_dets, inds = soft_nms_cpu(
dets_np,
iou_thr,
method=method_codes[method],
sigma=sigma,
min_score=min_score)
if is_tensor:
return dets.new_tensor(new_dets), dets.new_tensor(
inds, dtype=torch.long)
else:
return new_dets.astype(np.float32), inds.astype(np.int64)
def nms_3d_python(dets_th, iou_thr):
# if there are no boxes, return an empty list
if dets_th.shape[0] == 0:
return []
boxes = dets_th.cpu().numpy()
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
z1 = boxes[:,4]
z2 = boxes[:,5]
cls_probs =boxes[:,6]
# sort by class probabilities
idxs = np.argsort(cls_probs)
# compute area
areas = (x2 - x1 + 1) * (y2 - y1 + 1) * (z2 - z1 + 1)
# keep looping while some indexes still remain in the indices list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
zz1 = np.maximum(z1[i], z1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
zz2 = np.minimum(z2[i], z2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
d = np.maximum(0, zz2 - zz1 + 1)
# compute the ratio of overlap
overlap = (w * h * d) / areas[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > iou_thr)[0])))
return pick
def nms_2d_python(dets_th, iou_thr):
# if there are no boxes, return an empty list
if dets_th.shape[0] == 0:
return []
boxes = dets_th.cpu().numpy()
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
cls_probs =boxes[:,4]
# sort by class probabilities
idxs = np.argsort(cls_probs)
# compute area
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# keep looping while some indexes still remain in the indices list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / areas[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > iou_thr)[0])))
return pick
|
{"hexsha": "80d691c5b3c451dbd1b0740dbd2ac6561d8ff68a", "size": 6086, "ext": "py", "lang": "Python", "max_stars_repo_path": "mmdet/ops/nms/nms_wrapper.py", "max_stars_repo_name": "arthur801031/3d-multi-resolution-rcnn", "max_stars_repo_head_hexsha": "8e5454a72f8daa174bf3eabfa5964152f04ab287", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2021-03-02T07:41:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T08:55:45.000Z", "max_issues_repo_path": "mmdet/ops/nms/nms_wrapper.py", "max_issues_repo_name": "arthur801031/3d-multi-resolution-rcnn", "max_issues_repo_head_hexsha": "8e5454a72f8daa174bf3eabfa5964152f04ab287", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-01-06T20:54:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-24T03:50:51.000Z", "max_forks_repo_path": "mmdet/ops/nms/nms_wrapper.py", "max_forks_repo_name": "arthur801031/3d-multi-resolution-rcnn", "max_forks_repo_head_hexsha": "8e5454a72f8daa174bf3eabfa5964152f04ab287", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-26T19:23:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-06T20:30:24.000Z", "avg_line_length": 31.8638743455, "max_line_length": 79, "alphanum_fraction": 0.5892211633, "include": true, "reason": "import numpy", "num_tokens": 1710}
|
/*
* Copyright (c) 2018 Ryan Berryhill, University of Toronto
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "pme/engine/consecution_checker.h"
#include "pme/engine/transition_relation.h"
#include "pme/util/find_minimal_support.h"
#define BOOST_TEST_MODULE FindMinimalSupportTest
#define BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
using namespace PME;
struct MinimalSupportFixture
{
aiger * aig;
ExternalID l0, l1, l2, l3, o0;
VariableManager vars;
std::unique_ptr<TransitionRelation> tr;
std::unique_ptr<ConsecutionChecker> checker;
MinimalSupportFixture()
{
aig = aiger_init();
l0 = 2;
l1 = 4;
l2 = 6;
l3 = 8;
// l0' = l3
aiger_add_latch(aig, l0, l3, "l0");
// l1' = l0
aiger_add_latch(aig, l1, l0, "l1");
// l2' = l1
aiger_add_latch(aig, l2, l1, "l2");
// l3' = l2
aiger_add_latch(aig, l3, l2, "l3");
// o0 = l3
aiger_add_output(aig, l3, "o0");
o0 = l3;
tr.reset(new TransitionRelation(vars, aig));
checker.reset(new ConsecutionChecker(vars, *tr));
}
~MinimalSupportFixture()
{
aiger_reset(aig);
aig = nullptr;
}
};
BOOST_AUTO_TEST_CASE(test_minimal_support_sets)
{
MinimalSupportFixture f;
ID l0 = f.tr->toInternal(f.l0);
ID l1 = f.tr->toInternal(f.l1);
ID l2 = f.tr->toInternal(f.l2);
ID l3 = f.tr->toInternal(f.l3);
Clause c0 = {negate(l0)};
Clause c1 = {negate(l1)};
Clause c2 = {negate(l2)};
Clause c3 = {negate(l3)};
f.checker->addClause(0, c0);
f.checker->addClause(1, c1);
f.checker->addClause(2, c2);
f.checker->addClause(3, c3);
ClauseIDVec frame = {0, 1, 2, 3};
ClauseIDVec support, expected;
support = findMinimalSupport(*f.checker, frame, 0);
expected = {3};
BOOST_CHECK(support == expected);
support = findMinimalSupport(*f.checker, frame, 1);
expected = {0};
BOOST_CHECK(support == expected);
support = findMinimalSupport(*f.checker, frame, 2);
expected = {1};
BOOST_CHECK(support == expected);
support = findMinimalSupport(*f.checker, frame, 3);
expected = {2};
BOOST_CHECK(support == expected);
}
|
{"hexsha": "9253d1e85c22f231144b9364b4613bc1e6b42247", "size": 3304, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/test_find_minimal_support.cpp", "max_stars_repo_name": "ryanberryhill/pme", "max_stars_repo_head_hexsha": "416be2d52c920d285cc686a56d2f30bfab66bc51", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2019-01-25T16:07:56.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-14T17:34:22.000Z", "max_issues_repo_path": "tests/test_find_minimal_support.cpp", "max_issues_repo_name": "ryanberryhill/pme", "max_issues_repo_head_hexsha": "416be2d52c920d285cc686a56d2f30bfab66bc51", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2018-08-21T22:46:41.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-14T17:36:31.000Z", "max_forks_repo_path": "tests/test_find_minimal_support.cpp", "max_forks_repo_name": "ryanberryhill/pme", "max_forks_repo_head_hexsha": "416be2d52c920d285cc686a56d2f30bfab66bc51", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7304347826, "max_line_length": 79, "alphanum_fraction": 0.6598062954, "num_tokens": 911}
|
import enum
import random
import keras.backend as K
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
class Move(enum.Enum):
PASS = 0
FORFEIT = 1
class ReferenceAgent(object):
"""Agent that never learns."""
def select_move(self):
return random.choice([Move.PASS, Move.FORFEIT])
class LearningAgent(object):
def __init__(self):
self._learning_rate = 0.05
self._model = Sequential()
self._model.add(Dense(2, input_dim=1, activation='softmax'))
self._states = []
self._actions = []
def begin_episode(self):
self._states = []
self._actions = []
def select_move(self):
self._states.append(K.ones((1, 1)))
p = self._model.predict(np.ones(1))[0]
action = np.random.choice(2, p=p)
assert action in (0, 1)
self._actions.append(action)
if action == 0:
return Move.PASS
return Move.FORFEIT
def learn(self, reward):
for s, a in zip(self._states, self._actions):
policy = self._model(s)
log_policy = K.log(policy)
chosen_p = K.gather(
K.gather(log_policy, K.constant(0, dtype='int32')),
K.constant(a, dtype='int32'))
gradients = K.gradients(chosen_p, self._model.trainable_weights)
lr = K.constant(self._learning_rate)
r = K.constant(reward)
deltas = K.batch_get_value(
[lr * gradient * r for gradient in gradients])
weights = self._model.get_weights()
updated_weights = [weight + delta
for weight, delta in zip(weights, deltas)]
self._model.set_weights(updated_weights)
def simulate_game(agent1, agent2):
"""Returns 1 if agent1 wins, -1 if agent2 wins, 0 for draw."""
agent1_moves = []
agent2_moves = []
for _ in range(3):
agent1_moves.append(agent1.select_move())
agent2_moves.append(agent2.select_move())
agent1_score = sum(1 for move in agent1_moves if move == Move.FORFEIT)
agent2_score = sum(1 for move in agent2_moves if move == Move.FORFEIT)
if agent1_score < agent2_score:
return 1
elif agent1_score > agent2_score:
return -1
return 0
def train():
rl_agent = LearningAgent()
ref_agent = ReferenceAgent()
results = []
for i in range(10000):
rl_agent.begin_episode()
result = simulate_game(rl_agent, ref_agent)
rl_agent.learn(result)
results.append(result)
print("Score %d/10" % sum(results[-10:]))
def main():
train()
if __name__ == '__main__':
main()
|
{"hexsha": "0ec506a2f44c93aad3692d727a818220f29619f4", "size": 2704, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/scratch/forfeiture.py", "max_stars_repo_name": "BachFive/GammaGo_3", "max_stars_repo_head_hexsha": "3eb8e82eef01718684ba8594be49fdac04503e5e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/scratch/forfeiture.py", "max_issues_repo_name": "BachFive/GammaGo_3", "max_issues_repo_head_hexsha": "3eb8e82eef01718684ba8594be49fdac04503e5e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/scratch/forfeiture.py", "max_forks_repo_name": "BachFive/GammaGo_3", "max_forks_repo_head_hexsha": "3eb8e82eef01718684ba8594be49fdac04503e5e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-11T21:55:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-11T21:55:31.000Z", "avg_line_length": 28.4631578947, "max_line_length": 76, "alphanum_fraction": 0.6031804734, "include": true, "reason": "import numpy", "num_tokens": 653}
|
MODULE readin_data
USE stel_kinds
USE general_dimensions
INTEGER,DIMENSION(:),ALLOCATABLE :: list
REAL(rprec), DIMENSION(:), ALLOCATABLE :: hiota, hphip,
1 hpres
REAL(rprec), DIMENSION(:), ALLOCATABLE :: xn_v, xm_v
REAL(rprec), DIMENSION(:), ALLOCATABLE :: lmnsh, bmnch,
1 bsupvmnch, bsupumnch
REAL(rprec), DIMENSION(:), ALLOCATABLE :: lmnch, bmnsh, ! RS110909 - For ASYMMETRIC input
1 bsupvmnsh, bsupumnsh
LOGICAL :: lscreen
END MODULE readin_data
|
{"hexsha": "5391c3e8b6df56adbddd6935f20522e4926ac3ed", "size": 544, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "COBRAVMEC/Sources/readin_data.f", "max_stars_repo_name": "joseluisvelasco/STELLOPT", "max_stars_repo_head_hexsha": "e064ebb96414d5afc4e205f43b44766558dca2af", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2020-05-08T01:47:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T10:35:28.000Z", "max_issues_repo_path": "COBRAVMEC/Sources/readin_data.f", "max_issues_repo_name": "joseluisvelasco/STELLOPT", "max_issues_repo_head_hexsha": "e064ebb96414d5afc4e205f43b44766558dca2af", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 77, "max_issues_repo_issues_event_min_datetime": "2020-05-08T07:18:09.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T11:20:33.000Z", "max_forks_repo_path": "COBRAVMEC/Sources/readin_data.f", "max_forks_repo_name": "joseluisvelasco/STELLOPT", "max_forks_repo_head_hexsha": "e064ebb96414d5afc4e205f43b44766558dca2af", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-02-10T13:47:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T12:53:43.000Z", "avg_line_length": 38.8571428571, "max_line_length": 109, "alphanum_fraction": 0.625, "num_tokens": 181}
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains shared fixtures for the device tests."""
import argparse
import os
import numpy as np
import pytest
from _pytest.runner import pytest_runtest_makereport as orig_pytest_runtest_makereport
import pennylane as qml
# ==========================================================
# pytest fixtures
# seed for random functions
np.random.seed(42)
# Tolerance for analytic tests
TOL = 1e-6
# Tolerance for non-analytic tests
TOL_STOCHASTIC = 0.05
# Number of shots to call the devices with
N_SHOTS = 1e6
# List of all devices that are included in PennyLane
LIST_CORE_DEVICES = {
"default.qubit",
"default.qubit.torch",
"default.qubit.tf",
"default.qubit.autograd",
}
@pytest.fixture(scope="function")
def tol():
"""Numerical tolerance for equality tests. Returns a different tolerance for tests
probing analytic or non-analytic devices, which allows us to define the
standard for deterministic or stochastic test results dynamically."""
def _tol(shots):
if shots is None:
return float(os.environ.get("TOL", TOL))
return TOL_STOCHASTIC
return _tol
@pytest.fixture(scope="session")
def init_state():
"""Fixture to create an n-qubit random initial state vector."""
def _init_state(n):
state = np.random.random([2**n]) + np.random.random([2**n]) * 1j
state /= np.linalg.norm(state)
return state
return _init_state
@pytest.fixture(scope="session")
def skip_if():
"""Fixture to skip tests."""
def _skip_if(dev, capabilities):
"""Skip test if device has any of the given capabilities."""
dev_capabilities = dev.capabilities()
for capability, value in capabilities.items():
# skip if capability not found, or if capability has specific value
if capability not in dev_capabilities or dev_capabilities[capability] == value:
pytest.skip(
f"Test skipped for {dev.name} device with capability {capability}:{value}."
)
return _skip_if
@pytest.fixture(scope="function")
def device(device_kwargs):
"""Fixture to create a device."""
# internally used by pytest
__tracebackhide__ = True # pylint:disable=unused-variable
def _device(wires):
device_kwargs["wires"] = wires
try:
dev = qml.device(**device_kwargs)
except qml.DeviceError:
dev_name = device_kwargs["name"]
# exit the tests if the device cannot be created
pytest.exit(
f"Device {dev_name} cannot be created. To run the device tests on an external device, the "
f"plugin and all of its dependencies must be installed."
)
capabilities = dev.capabilities()
if capabilities.get("model", None) != "qubit":
# exit the tests if device based on cv model (currently not supported)
pytest.exit("The device test suite currently only runs on qubit-based devices.")
return dev
return _device
def pytest_runtest_setup(item):
"""Skip tests marked as broken."""
# skip tests marked as broken
for mark in item.iter_markers(name="broken"):
if mark.args:
pytest.skip(f"Broken test skipped: {mark.args}")
else:
pytest.skip("Test skipped as corresponding code base is currently broken!")
# ============================
# These functions are required to define the device name to run the tests for
class StoreDictKeyPair(argparse.Action):
"""Argparse action for storing key-value pairs as a dictionary.
For example, calling a CLI program with ``--mydict v1=k1 v2=5``:
>>> parser.add_argument("--mydict", dest="my_dict", action=StoreDictKeyPair, nargs="+")
>>> args = parser.parse()
>>> args.my_dict
{"v1": "k1", "v2": "5"}
Note that all keys will be strings.
"""
# pylint: disable=too-few-public-methods
def __init__(self, option_strings, dest, nargs=None, **kwargs):
self._nargs = nargs
super().__init__(option_strings, dest, nargs=nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
my_dict = {}
for kv in values:
k, v = kv.split("=")
my_dict[k] = v
setattr(namespace, self.dest, my_dict)
def pytest_addoption(parser):
"""Add command line option to pytest."""
if hasattr(parser, "add_argument"):
# parser is a argparse.Parser object
addoption = parser.add_argument
else:
# parser is a pytest.config.Parser object
addoption = parser.addoption
# The options are the three arguments every device takes
addoption("--device", action="store", default=None, help="The device to test.")
addoption(
"--shots",
action="store",
default=None,
help="Number of shots to use in stochastic mode.",
)
addoption(
"--analytic",
action="store",
default=None,
help="Whether to run the tests in stochastic or exact mode.",
)
addoption(
"--skip-ops",
action="store_true",
default=False,
help="Skip tests that use unsupported device operations.",
)
addoption(
"--device-kwargs",
dest="device_kwargs",
action=StoreDictKeyPair,
default={},
nargs="+",
metavar="KEY=VAL",
help="Additional device kwargs.",
)
def pytest_generate_tests(metafunc):
"""Set up device_kwargs fixture from command line options.
The fixture defines a dictionary of keyword argument that can be used to instantiate
a device via `qml.device(**device_kwargs)` in the test. This allows us to potentially
change kwargs in the test before creating the device.
"""
opt = metafunc.config.option
list_of_device_kwargs = []
if opt.device is None:
devices_to_test = LIST_CORE_DEVICES
else:
devices_to_test = [opt.device]
for dev in devices_to_test:
device_kwargs = {"name": dev}
# if shots specified in command line,
# add to the device kwargs
if opt.shots is not None:
# translate command line string to None if necessary
device_kwargs["shots"] = None if (opt.shots == "None") else int(opt.shots)
# store user defined device kwargs
device_kwargs.update(opt.device_kwargs)
list_of_device_kwargs.append(device_kwargs)
# define the device_kwargs parametrization:
# all tests that take device_kwargs as an argument will be
# run on the different fixtures
if "device_kwargs" in metafunc.fixturenames:
metafunc.parametrize("device_kwargs", list_of_device_kwargs)
def pytest_runtest_makereport(item, call):
"""Post-processing test reports to exclude those known to be failing."""
tr = orig_pytest_runtest_makereport(item, call)
if "skip_unsupported" in item.keywords and item.config.option.skip_ops:
if call.excinfo is not None:
# Exclude failing test cases for unsupported operations/observables
# and those using not implemented features
if (
call.excinfo.type == qml.DeviceError
and "supported" in str(call.excinfo.value)
or call.excinfo.type == NotImplementedError
):
tr.wasxfail = "reason:" + str(call.excinfo.value)
tr.outcome = "skipped"
return tr
|
{"hexsha": "5edf9cb615f6c0ce94f81b5fc4d073a42d03fbe7", "size": 8361, "ext": "py", "lang": "Python", "max_stars_repo_path": "pennylane/devices/tests/conftest.py", "max_stars_repo_name": "MoritzWillmann/pennylane", "max_stars_repo_head_hexsha": "2b07d22cfcc6406ba28e5c647062340b240a4ee5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 539, "max_stars_repo_stars_event_min_datetime": "2018-11-13T08:45:42.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-27T18:17:16.000Z", "max_issues_repo_path": "pennylane/devices/tests/conftest.py", "max_issues_repo_name": "MoritzWillmann/pennylane", "max_issues_repo_head_hexsha": "2b07d22cfcc6406ba28e5c647062340b240a4ee5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 588, "max_issues_repo_issues_event_min_datetime": "2018-11-14T10:21:47.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-28T06:27:14.000Z", "max_forks_repo_path": "pennylane/devices/tests/conftest.py", "max_forks_repo_name": "MoritzWillmann/pennylane", "max_forks_repo_head_hexsha": "2b07d22cfcc6406ba28e5c647062340b240a4ee5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 165, "max_forks_repo_forks_event_min_datetime": "2018-11-13T18:58:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-27T17:18:17.000Z", "avg_line_length": 32.2818532819, "max_line_length": 108, "alphanum_fraction": 0.6274369095, "include": true, "reason": "import numpy", "num_tokens": 1795}
|
#pragma once
#include <boost/iterator/filter_iterator.hpp>
#include <boost/mpl/identity.hpp>
#include <functional>
#include <tuple>
namespace boltzmann {
template <typename ITERATOR, typename ELEM>
std::tuple<boost::filter_iterator<std::function<bool(const ELEM &)>, ITERATOR>,
boost::filter_iterator<std::function<bool(const ELEM &)>, ITERATOR>>
filtered_range(const ITERATOR &begin,
const typename boost::mpl::identity<ITERATOR>::type &end,
const std::function<bool(const ELEM &)> &f)
{
return std::make_tuple(boost::make_filter_iterator(f, begin, end),
boost::make_filter_iterator(f, end, end));
}
} // end namespace boltzmann
|
{"hexsha": "874061783fdf4b09ed4836c59853df700504bc9d", "size": 706, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "spectral/filtered_range.hpp", "max_stars_repo_name": "simonpp/2dRidgeletBTE", "max_stars_repo_head_hexsha": "5d08cbb5c57fc276c7a528f128615d23c37ef6a0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-11-08T03:15:56.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-08T03:15:56.000Z", "max_issues_repo_path": "spectral/filtered_range.hpp", "max_issues_repo_name": "simonpp/2dRidgeletBTE", "max_issues_repo_head_hexsha": "5d08cbb5c57fc276c7a528f128615d23c37ef6a0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spectral/filtered_range.hpp", "max_forks_repo_name": "simonpp/2dRidgeletBTE", "max_forks_repo_head_hexsha": "5d08cbb5c57fc276c7a528f128615d23c37ef6a0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-11-08T03:15:56.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-08T03:15:56.000Z", "avg_line_length": 28.24, "max_line_length": 79, "alphanum_fraction": 0.6813031161, "num_tokens": 158}
|
#!/usr/bin/env python
"""
genalg.py:
Core of the genetic algorithm.
"""
from copy import deepcopy
from operator import attrgetter
import numpy as np
from instances.vrp import VRP
from algorithms.timer import Timer
import algorithms.plotting.plot_manager as plot_manager
from algorithms.plotting.plot_data import PlotData
import algorithms.modules.population_initializers as population_initializers
import algorithms.modules.validators as validators
import algorithms.modules.evaluators as evaluators
import algorithms.modules.parent_selectors as parent_selectors
import algorithms.modules.crossover_operators as crossover_operators
import algorithms.modules.mutation_operators as mutation_operators
import algorithms.modules.invalidity_correction_functions as invalidity_correction
def run_gen_alg(vrp_params, alg_params):
"""
Runs the Genetic Algorithm on selected VRP types.
:param vrp_params: Parameters for the specified VRP.
:param alg_params: Parameters for the genetic algorithm.
:return: Computed solution for the specified VRP.
"""
# GA Initialization, Step 0: Reset ID Counter.
VRP.id_counter = 0
# GA Initialization, Step 1: Detecting which extensions are being solved.
# CVRP: List of customer capacities.
using_cvrp = vrp_params.cvrp_node_demand is not None
# OVRP: Toggled (True/False -flag)
using_ovrp = vrp_params.ovrp_enabled
# VRPP: List of optional nodes.
using_vrpp = vrp_params.vrpp_optional_node is not None
# MDVRP: More than one depot node detected.
using_mdvrp = len(vrp_params.mdvrp_depot_node) > 1
# VRPTW: List of customer time windows.
using_vrptw = vrp_params.vrptw_node_time_window is not None
# Maximization: Determined via VRPP.
maximize = using_vrpp
# Exclude Travel Costs: Toggled (True/False -flag) (Switch between TOP and PTP)
exclude_travel_costs = vrp_params.vrpp_exclude_travel_costs
# Optimize Depot Nodes: Toggled (True/False -flag)
optimize_depot_nodes = vrp_params.mdvrp_optimize_depot_nodes
# Hard Time Windows: Toggled (True/False -flag)
using_hard_time_windows = vrp_params.vrptw_hard_windows
print("Using CVRP: {}".format(using_cvrp))
print("Using OVRP: {}".format(using_ovrp))
print("Using VRPP: {}".format(using_vrpp))
print("Using MDVRP: {}".format(using_mdvrp))
print("Using VRPTW: {}".format(using_vrptw))
print("Maximize Objective: {}".format(maximize))
print("Exclude Travel Costs: {}".format(exclude_travel_costs))
print("Optimize Depot Nodes: {}".format(exclude_travel_costs))
print("Hard Windows: {}".format(using_hard_time_windows))
# GA Initialization, Step 2: Selecting a population initialization function.
population_initialization_collection = {
0: population_initializers.random,
1: population_initializers.allele_permutation,
2: population_initializers.gene_permutation,
3: population_initializers.simulated_annealing,
4: population_initializers.nearest_neighbor_population
}
VRP.population_initializer = population_initialization_collection[alg_params.population_initializer]
# GA Initialization, Step 3: Selecting suitable validation functions.
validation_collection = {
0: validators.validate_maximum_time,
1: validators.validate_maximum_distance,
2: validators.validate_capacity,
3: validators.validate_time_windows
}
validation_functions = []
if vrp_params.vrp_maximum_route_time is not None:
validation_functions.append(validation_collection[0])
if vrp_params.vrp_maximum_route_distance is not None:
validation_functions.append(validation_collection[1])
if using_cvrp:
validation_functions.append(validation_collection[2])
if using_vrptw and using_hard_time_windows:
validation_functions.append(validation_collection[3])
# Add a dummy validation function if none were selected.
if len(validation_functions) == 0:
validation_functions.append(lambda target_individual, **kwargs: (True, "No validation functions were used."))
VRP.validator = validation_functions
# GA Initialization, Step 4: Selecting an individual evaluation function.
evaluation_collection = {
0: evaluators.evaluate_travel_distance,
1: evaluators.evaluate_travel_time,
2: evaluators.evaluate_travel_cost,
3: evaluators.evaluate_profits,
4: evaluators.evaluate_profit_cost_difference,
5: evaluators.optimize_depot_nodes
}
if using_vrpp and exclude_travel_costs:
evaluation_function = evaluation_collection[3]
elif using_vrpp:
evaluation_function = evaluation_collection[4]
else:
evaluation_function = evaluation_collection[2]
VRP.evaluator = evaluation_function
# For maximization, a proper comparison function is needed.
if maximize:
def compare(vrp1, vrp2): return vrp1.fitness > vrp2.fitness
else:
def compare(vrp1, vrp2): return vrp1.fitness < vrp2.fitness
# GA Initialization, Step 5: Selecting a parent selector function.
selector_collection = {
0: parent_selectors.best_fitness,
1: parent_selectors.roulette_selection,
2: parent_selectors.tournament_selection
}
VRP.parent_selector = selector_collection[alg_params.parent_selection_function]
# GA Initialization, Step 6: Selecting a crossover operator.
crossover_collection = {
0: crossover_operators.one_point,
1: crossover_operators.two_point,
2: crossover_operators.order_crossover,
3: crossover_operators.vehicle_crossover
}
VRP.crossover_operator = crossover_collection[alg_params.crossover_operator]
# GA Initialization, Step 7: Selecting suitable mutation operators.
mutation_collection = {
0: mutation_operators.allele_swap,
1: mutation_operators.sequence_inversion,
2: mutation_operators.sequence_shuffle,
3: mutation_operators.sequence_relocation,
4: mutation_operators.add_optional_node,
5: mutation_operators.remove_optional_node,
6: mutation_operators.change_depot
}
mutation_functions = [
mutation_collection[0],
mutation_collection[1],
mutation_collection[2],
mutation_collection[3]
]
# Optional node-wise mutation operators.
if using_vrpp:
mutation_functions.append(mutation_collection[4])
mutation_functions.append(mutation_collection[5])
if using_mdvrp and not optimize_depot_nodes:
# Depot node mutation operator is used only if depot node optimization
# is disabled.
mutation_functions.append(mutation_collection[6])
VRP.mutation_operator = mutation_functions
mutation_function_count = len(mutation_functions)
# GA Initialization, Step 8: Selecting an invalidity correction function.
invalidity_correction_collection = {
0: invalidity_correction.random_valid_individual,
1: invalidity_correction.best_individual,
2: invalidity_correction.neighbor_of_best_individual,
3: invalidity_correction.indefinite_mutation,
4: invalidity_correction.best_individual_and_mutation,
5: invalidity_correction.retry
}
VRP.invalidity_corrector = invalidity_correction_collection[alg_params.invalidity_correction]
# GA Initialization, Step 9: Create conversion functions between distance, time and cost.
# Also create functions that conduct the filtration and replacement strategies.
distance_time_var = max(0, vrp_params.vrp_distance_time_ratio)
time_cost_var = max(0, vrp_params.vrp_time_cost_ratio)
distance_cost_var = max(0, vrp_params.vrp_distance_cost_ratio)
def distance_to_time(distance): return distance * distance_time_var
def time_to_cost(time): return time * time_cost_var
def distance_to_cost(distance): return distance * distance_cost_var
# Filtration strategy.
if alg_params.filtration_frequency <= 0:
filtration_counter = float("inf")
else:
filtration_counter = alg_params.filtration_frequency
def filtration(population_old, population_new, **kwargs):
"""
Combines two most recent populations into one, takes the best half of
individuals and replaces fitness-wise duplicates with completely random
individuals.
:param population_old: Population of generation n
:param population_new: Population of generation n + 1
:param kwargs: Dictionary of expected parameters:
- (int) 'node_count': Number of nodes used in the problem. Includes depot nodes and optional nodes.
- (list<int>) 'depot_nodes': List of depot nodes used in the problem.
- (list<int>) 'optional_nodes': List of optional nodes used in the problem.
- (int) 'vehicle_count': Number of vehicles used in the problem.
- (bool) 'maximize': Flag that determines whether the objective to maximize or minimize.
- (int) 'minimum_cpu_time': CPU time that is allotted for the initialization of an individual solution.
The purpose of this is to stop the algorithm if that is unable to create a valid individual
(or it takes too long).
:return: Population containing the best of 2 recent populations and
random individuals if there were duplicates.
"""
fl_node_count = kwargs["node_count"]
fl_depot_nodes = kwargs["depot_nodes"]
fl_optional_nodes = kwargs["optional_nodes"]
fl_vehicle_count = kwargs["vehicle_count"]
fl_maximize = kwargs["maximize"]
fl_minimum_cpu_time = kwargs["minimum_cpu_time"]
def fl_check_goal(timer): return timer.past_goal()
filtration_timer = Timer()
filtration_timer.start()
population_size = len(population_new)
combined_population = population_old + population_new
combined_population.sort(key=attrgetter("fitness"), reverse=fl_maximize)
cut_population = combined_population[:population_size]
# Multiple weak solutions can share the same fitness value.
# However, with potentially optimal solutions, it is very
# likely that solutions with the same fitness value
# are the same. For that reason, it is assumed that solutions
# that have the same fitness value are the same.
replacement_indices = []
previous_fitness = cut_population[0].fitness
for fl_i in range(1, len(cut_population)):
if cut_population[fl_i].fitness == previous_fitness:
replacement_indices.append(fl_i)
else:
previous_fitness = cut_population[fl_i].fitness
# Create random individuals to replace duplicates.
fl_individual_timer = Timer(goal=fl_minimum_cpu_time)
fl_individual_args = {
"node_count": fl_node_count,
"depot_nodes": fl_depot_nodes,
"optional_nodes": fl_optional_nodes,
"vehicle_count": fl_vehicle_count,
"failure_msg": "(Filtration) Individual initialization is taking too long.",
"individual_timer": fl_individual_timer,
"check_goal": fl_check_goal,
"validation_args": validation_args,
"evaluation_args": evaluation_args
}
fl_individual_timer.start()
for fl_i in range(len(replacement_indices)):
fl_candidate_individual, error_msg = population_initializers.random_valid_individual(
**fl_individual_args
)
if fl_candidate_individual is None:
# Filtration strategy has failed due to taking too long in making a valid individual.
# In such a case, the algorithm will fall back to original new population.
return population_new, error_msg
cut_population[replacement_indices[fl_i]] = fl_candidate_individual
fl_individual_timer.reset()
# Population has to be sorted again, since there could be random individuals
# between fitness-wise good individuals.
cut_population.sort(key=attrgetter("fitness"), reverse=fl_maximize)
filtration_timer.stop()
fl_msg = "Filtration operation OK (Time taken: {} ms)".format(filtration_timer.elapsed())
return cut_population, fl_msg
# Similar individual replacement strategy.
if alg_params.replace_similar_individuals <= 0:
replacement_counter = float("inf")
else:
replacement_counter = alg_params.replace_similar_individuals
def similar_individual_replacement(target_population, **kwargs):
"""
Looks for fitness-wise duplicates in specified population and replaces
them with random individuals.
:param target_population: Population subject to duplicate replacements.
:param kwargs: Dictionary of expected parameters:
- (int) 'node_count': Number of nodes used in the problem. Includes depot nodes and optional nodes.
- (list<int>) 'depot_nodes': List of depot nodes used in the problem.
- (list<int>) 'optional_nodes': List of optional nodes used in the problem.
- (int) 'vehicle_count': Number of vehicles used in the problem.
- (bool) 'maximize': Flag that determines whether the objective to maximize or minimize.
- (int) 'minimum_cpu_time': CPU time that is allotted for the initialization of an individual solution.
The purpose of this is to stop the algorithm if that is unable to create a valid individual
(or it takes too long).
:return: Population where duplicate individuals have been replaced
with random individuals.
"""
rp_node_count = kwargs["node_count"]
rp_depot_nodes = kwargs["depot_nodes"]
rp_optional_nodes = kwargs["optional_nodes"]
rp_vehicle_count = kwargs["vehicle_count"]
rp_maximize = kwargs["maximize"]
rp_minimum_cpu_time = kwargs["minimum_cpu_time"]
def rp_check_goal(timer): return timer.past_goal()
replacement_timer = Timer()
replacement_timer.start()
replaced_population = deepcopy(target_population)
# Multiple weak solutions can share the same fitness value.
# However, with potentially optimal solutions, it is very
# likely that solutions with the same fitness value
# are the same. For that reason, it is assumed that solutions
# that have the same fitness value are the same.
replacement_indices = []
previous_fitness = replaced_population[0].fitness
for rp_i in range(1, len(replaced_population)):
if replaced_population[rp_i].fitness == previous_fitness:
replacement_indices.append(rp_i)
else:
previous_fitness = replaced_population[rp_i].fitness
# Create random individuals to replace duplicates.
rp_individual_timer = Timer(goal=rp_minimum_cpu_time)
rp_individual_args = {
"node_count": rp_node_count,
"depot_nodes": rp_depot_nodes,
"optional_nodes": rp_optional_nodes,
"vehicle_count": rp_vehicle_count,
"failure_msg": "(Similar Individual Replacement) Individual initialization is taking too long.",
"individual_timer": rp_individual_timer,
"check_goal": rp_check_goal,
"validation_args": validation_args,
"evaluation_args": evaluation_args
}
rp_individual_timer.start()
for rp_i in range(len(replacement_indices)):
rp_candidate_individual, error_msg = population_initializers.random_valid_individual(
**rp_individual_args
)
if rp_candidate_individual is None:
# Replacement operation has failed. Fall back to original population.
return target_population, error_msg
replaced_population[replacement_indices[rp_i]] = rp_candidate_individual
rp_individual_timer.reset()
# Since similar individuals have been replaced with completely random individuals,
# the population has to be sorted again.
replaced_population.sort(key=attrgetter("fitness"), reverse=rp_maximize)
rp_msg = "Similar Individual Replacement operation OK (Time taken: {} ms)" \
.format(replacement_timer.elapsed())
return replaced_population, rp_msg
# GA Initialization, Step 10: Create (and modify) variables that GA actively uses.
path_table = deepcopy(vrp_params.vrp_path_table)
path_table_mapping = deepcopy(vrp_params.vrp_path_table_mapping)
coordinates = deepcopy(vrp_params.vrp_coordinates)
node_count = len(path_table)
vehicle_count = vrp_params.vrp_vehicle_count
vehicle_capacity = vrp_params.cvrp_vehicle_capacity
depot_node_list = list(set(vrp_params.mdvrp_depot_node))
optional_node_list = list(set(vrp_params.vrpp_optional_node)) if vrp_params.vrpp_optional_node is not None else []
# At least 1 vehicle is required.
if vehicle_count < 1:
print("Vehicle count must be at least 1.")
return
# In OVRP, vehicles do not return to the depots.
# This is simulated by reducing all travels distances, where the destination is a depot, to zero.
if using_ovrp:
path_table[:, depot_node_list] = 0
# If path table mapping is provided, the path table will be limited to mapped nodes only
# and the node count will be modified to account for the mapping.
if path_table_mapping is not None:
# Path table mapping values cannot exceed the size of the original path table.
if max(path_table_mapping) >= len(path_table):
print("Path table mapping cannot contain integers greater than path table size ({} vs. {})."
.format(len(path_table), max(path_table_mapping)))
return
# Path table mapping values also cannot be negative.
if min(path_table_mapping) < 0:
print("Path table mapping cannot contain negative integers.")
return
# Adjust path table according to provided mapping.
node_count = len(path_table_mapping)
new_path_table = []
for i in path_table_mapping:
new_path_table_row = []
for j in path_table_mapping:
new_path_table_row.append(path_table[i, j])
new_path_table.append(new_path_table_row)
path_table = np.array(new_path_table)
# Depot node list cannot contain nodes that go above node count.
if max(depot_node_list) >= node_count:
print("Depot node list cannot contain nodes that do not exist ({} vs. {})."
.format(node_count, max(depot_node_list)))
return
# Depot node list also cannot contain negative integers.
if min(depot_node_list) < 0:
print("Depot node list cannot contain negative integers.")
return
# Optional node list cannot contain nodes that go above node count.
if len(optional_node_list) > 0:
if max(optional_node_list) >= node_count:
print("Optional node list cannot contain nodes that do not exist ({} vs. {})."
.format(node_count, max(optional_node_list)))
return
# Optional node list also cannot contain negative integers.
if min(optional_node_list) < 0:
print("Optional node list cannot contain negative integers.")
return
# Set maximum time/distance constraints.
maximum_time = vrp_params.vrp_maximum_route_time \
if vrp_params.vrp_maximum_route_time is not None else float("inf")
maximum_distance = vrp_params.vrp_maximum_route_distance \
if vrp_params.vrp_maximum_route_distance is not None else float("inf")
# Set node service times to 0 if they're not provided.
node_service_time = deepcopy(vrp_params.vrp_node_service_time)
if node_service_time is None:
node_service_time = [0] * node_count
else:
# Depot nodes do not need servicing.
for depot_node in depot_node_list:
node_service_time[depot_node] = 0
# Set node demands to 0 if they're not provided.
node_demand_list = deepcopy(vrp_params.cvrp_node_demand)
if node_demand_list is None:
node_demand_list = np.array([[0] * node_count]).T
if len(node_demand_list.shape) == 1:
node_demand_list = np.array([node_demand_list]).T
# If vehicle capacity was not specified, default capacities (0) are given.
if vehicle_capacity is None:
vehicle_capacity = [0] * node_demand_list.shape[1]
# Depot nodes do not have supply demands associated with them.
for depot_node in depot_node_list:
node_demand_list[depot_node] = 0
# Set time windows infinitely large if they're not provided.
time_windows = deepcopy(vrp_params.vrptw_node_time_window)
if time_windows is None:
time_windows = [(0, float("inf"))] * node_count
# Time windows of the depot nodes are the same as maximum time
# unless it is specified. (Although it is pointless to set a time window
# that is greater than maximum time: maximum time takes precedence over time windows)
# Set penalty coefficients to 0 if they're not provided.
node_penalty_list = deepcopy(vrp_params.vrptw_node_penalty)
if node_penalty_list is None:
node_penalty_list = [0] * node_count
# Set profits to 0 if they're not provided.
node_profit_list = deepcopy(vrp_params.vrpp_node_profit)
if node_profit_list is None:
node_profit_list = [0] * node_count
# Depot nodes do not have profits associated with them.
for depot_node in depot_node_list:
node_profit_list[depot_node] = 0
population_count = alg_params.population_count
parent_candidate_count = alg_params.parent_candidate_count
tournament_probability = alg_params.tournament_probability
crossover_probability = alg_params.crossover_probability
mutation_probability = alg_params.mutation_probability
sa_iteration_count = alg_params.sa_iteration_count
sa_initial_temperature = alg_params.sa_initial_temperature
sa_p_coeff = alg_params.sa_p_coeff
# GA Initialization, Step 11: Create variables relating to termination criteria.
global_cpu_limit = alg_params.cpu_total_limit
individual_cpu_limit = alg_params.cpu_individual_limit
upper_bound = alg_params.fitness_upper_bound
if upper_bound is None or not using_vrpp:
upper_bound = float("inf")
lower_bound = alg_params.fitness_lower_bound
if lower_bound is None or using_vrpp:
lower_bound = -float("inf")
threshold = alg_params.fitness_threshold
generation_count_min = alg_params.generation_count_min
generation_count_max = alg_params.generation_count_max
global_timer = Timer(global_cpu_limit)
individual_timer = Timer(individual_cpu_limit)
def check_goal(timer): return timer.past_goal()
# GA Initialization, Step 12: Prepare keyword arguments for module functions.
evaluation_args = {
"path_table": path_table,
"distance_time_converter": distance_to_time,
"distance_cost_converter": distance_to_cost,
"time_cost_converter": time_to_cost,
"time_window": time_windows,
"service_time": node_service_time,
"penalty": node_penalty_list,
"node_profit": node_profit_list,
"ovrp": using_ovrp
}
validation_args = {
"path_table": path_table,
"capacity": vehicle_capacity,
"demand": node_demand_list,
"maximum_time": maximum_time,
"maximum_distance": maximum_distance,
"time_window": time_windows,
"service_time": node_service_time,
"distance_time_converter": distance_to_time,
"ovrp": using_ovrp
}
population_args = {
"node_count": node_count,
"depot_nodes": depot_node_list,
"optional_nodes": optional_node_list,
"vehicle_count": vehicle_count,
"population_count": population_count,
"minimum_cpu_time": individual_cpu_limit,
"sa_iteration_count": sa_iteration_count,
"sa_initial_temperature": sa_initial_temperature,
"sa_p_coeff": sa_p_coeff,
"maximize": maximize,
"validation_args": validation_args,
"evaluation_args": evaluation_args
}
individual_args = {
"node_count": node_count,
"depot_nodes": depot_node_list,
"optional_nodes": optional_node_list,
"vehicle_count": vehicle_count,
"failure_msg": "(Invalid Individual Replacement) Individual initialization is taking too long.",
"individual_timer": individual_timer,
"check_goal": check_goal,
"validation_args": validation_args,
"evaluation_args": evaluation_args
}
parent_selection_args = {
"parent_candidate_count": parent_candidate_count,
"maximize": maximize,
"tournament_probability": tournament_probability
}
filtration_replacement_args = {
"node_count": node_count,
"depot_nodes": depot_node_list,
"optional_nodes": optional_node_list,
"vehicle_count": vehicle_count,
"maximize": maximize,
"minimum_cpu_time": individual_cpu_limit
}
# GA Initialization, Step 13: Miscellaneous collection of tests.
# Capacity test: total capacity potential (vehicle_capacity * vehicle_capacity) is compared
# to total required capacity (sum of every required node capacity).
if len(vehicle_capacity) != node_demand_list.shape[1]:
print("Number of Vehicle Capacity Types does not match with that of Node Demands.")
return
required_nodes = [i for i in range(node_count) if i not in optional_node_list and i not in depot_node_list]
for demand_type in range(node_demand_list.shape[1]):
# If there are no required nodes, this can be skipped.
if len(required_nodes) == 0:
break
# Start by checking if individual demands are too high by themselves.
highest_capacity_demand = max(node_demand_list[:, demand_type][required_nodes])
if highest_capacity_demand > vehicle_capacity[demand_type]:
print("Capacity requirements are too strict (Individual demand {} exceeds vehicle capacity {})"
.format(highest_capacity_demand, vehicle_capacity[demand_type]))
return
# Check if total demand is too high.
capacity_potential = vehicle_capacity[demand_type] * vehicle_count
required_capacity = 0
for i in range(len(node_demand_list[:, demand_type])):
if i not in optional_node_list:
# Depot nodes do not have supply demands.
node_capacity = node_demand_list[:, demand_type][i]
required_capacity += node_capacity
if required_capacity > capacity_potential:
print("Capacity requirements are too strict ({} required, {} available)".format(required_capacity,
capacity_potential))
# This test assumes that every node is a delivery node, while the depot nodes
# are the pickup nodes, or every node is a pickup node, while the depot nodes
# are delivery nodes.
return
# Valid depot test: optional nodes cannot be depot nodes. This will be checked here.
offending_list = []
for depot_node in depot_node_list:
if depot_node in optional_node_list:
offending_list.append(depot_node)
if len(offending_list) > 0:
print("Optional nodes cannot be depot nodes (Offending nodes: {})".format(offending_list))
return
# -----------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
# ----- Genetic Algorithm starts here -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
population_history = {} # Used in drawing graph 3 / 7.
population_history_tracker = [0, 1, 2, 3, 4, 5, 10, 15, 20, 25]
best_generation_individual_history = [] # Used in drawing graph 4 / 7.
best_time_individual_history = [] # Used in drawing graph 5 / 7.
best_individual_time_tracker = [] # Used in drawing graph 5 / 7.
best_overall_individual_history = [] # Used in drawing graph 6 / 7 and graph 7 / 7.
best_overall_generation_tracker = [] # Used in drawing graph 6 / 7 and graph 7 / 7.
current_generation = 0
current_generation_min = 0
print("Initializing generation 0 population...")
population, msg = VRP.population_initializer(**population_args)
# Population initialization can fail due to taking too long in creating a valid individual.
# If this happens, GA execution is terminated, without results.
if population is None:
print(msg)
print("Returning to menu...")
return
population.sort(key=attrgetter("fitness"), reverse=maximize)
population_history[0] = deepcopy(population)
initial_population = deepcopy(population) # Used in drawing graph 1 / 7.
best_individual = deepcopy(population[0])
best_initialized_individual = deepcopy(best_individual) # Used in drawing graph 2 / 7.
best_generation_individual_history.append(deepcopy(best_individual))
best_overall_individual_history.append(deepcopy(best_individual))
best_overall_generation_tracker.append(current_generation)
print(msg)
invalidity_correction_args = {
"best_individual": best_individual,
"individual_args": individual_args
}
timeout = False
global_timer.start()
# ------------------------------------------------------------------------------------------------------------------
# - The Beginning of the Main Loop of the Genetic Algorithm. -------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
while not timeout \
and lower_bound + threshold <= best_individual.fitness <= upper_bound - threshold \
and current_generation < generation_count_max \
and current_generation_min < generation_count_min:
new_population = []
while len(new_population) < population_count and not timeout:
# Two new individuals are created for the new population in each loop.
# Select two parents from current generation.
parent1, parent2 = VRP.parent_selector(population, **parent_selection_args)
# Perform crossover operation.
crossover_check = np.random.random()
if crossover_probability >= crossover_check:
offspring1, offspring2 = VRP.crossover_operator(parent1, parent2)
else:
offspring1, offspring2 = deepcopy(parent1), deepcopy(parent2)
offspring1.assign_id()
offspring2.assign_id()
# Perform mutation operation.
mutation_check1, mutation_check2 = np.random.random(), np.random.random()
if mutation_probability >= mutation_check1:
mutation_selector = np.random.randint(0, mutation_function_count)
VRP.mutation_operator[mutation_selector](offspring1)
if mutation_probability >= mutation_check2:
mutation_selector = np.random.randint(0, mutation_function_count)
VRP.mutation_operator[mutation_selector](offspring2)
# Optimize Depot Nodes if it has been requested.
if using_mdvrp and optimize_depot_nodes:
evaluation_collection[5](offspring1, path_table=path_table)
evaluation_collection[5](offspring2, path_table=path_table)
# Now that the offspring have been created, they must be validated
# and evaluated before they are added to the population.
add_offspring1 = True
for validator in VRP.validator:
offspring1.valid, validation_msg = validator(offspring1, **validation_args)
if offspring1.valid is False:
add_offspring1 = False
# New individual is invalid. It is now subject to a correction operation.
individual_timer.start()
replacement, msg = VRP.invalidity_corrector(offspring1, **invalidity_correction_args)
individual_timer.stop()
if replacement is None:
if msg == "RETRY":
# Ignore invalidity correction process.
break
else:
# Minimum CPU Time Termination Criterion has been violated.
# GA will be concluded here, without results.
print(msg)
timeout = True
else:
# Replacement individual is valid. Evaluate and add to population.
replacement.fitness = VRP.evaluator(replacement, **evaluation_args)
new_population.append(replacement)
# If offspring was found valid, it is added to the population here.
if add_offspring1 and offspring1.valid:
offspring1.fitness = VRP.evaluator(offspring1, **evaluation_args)
new_population.append(offspring1)
add_offspring2 = True
for validator in VRP.validator:
offspring2.valid, validation_msg = validator(offspring2, **validation_args)
if offspring2.valid is False:
add_offspring2 = False
# New individual is deemed invalid. It is now subject to a correction operation.
individual_timer.start()
replacement, msg = VRP.invalidity_corrector(offspring2, **invalidity_correction_args)
individual_timer.stop()
if replacement is None:
if msg == "RETRY":
# Ignore invalidity correction process.
break
else:
# Minimum CPU Time Termination Criterion has been violated.
# GA will be concluded here, without results.
print(msg)
timeout = True
else:
# Replacement individual is valid. Evaluate and add to population.
replacement.fitness = VRP.evaluator(replacement, **evaluation_args)
new_population.append(replacement)
# If offspring was found valid, it is added to the population here.
if add_offspring2 and offspring2.valid:
offspring2.fitness = VRP.evaluator(offspring2, **evaluation_args)
new_population.append(offspring2)
timeout = global_timer.past_goal()
# - End of population loop -
# If population count is set to an uneven number, chances are that
# only one individual has to be removed.
if len(new_population) > population_count:
del new_population[np.random.randint(0, len(new_population))]
# Check if GA termination has been requested.
if timeout:
break
new_population.sort(key=attrgetter("fitness"), reverse=maximize)
candidate_individual = new_population[0]
# Filtration/Replacement Check.
if current_generation % filtration_counter == 0:
# Filtration Strategy: combine the two recent populations into one,
# and throw away the worst individuals and replace duplicates with
# random individuals, until population count matches.
population, filtration_msg = filtration(population, new_population, **filtration_replacement_args)
elif current_generation % replacement_counter == 0:
# Similar Individual Replacement Strategy: check most recent population for
# duplicates and replace them with completely random individuals.
# Filtration Strategy does this as well, which is why the conjunction
# of the two conditions is not separately checked.
population, replacement_msg = similar_individual_replacement(new_population, **filtration_replacement_args)
else:
# No Filtration/Replacement performed: new population becomes current population.
population = new_population
# Check if next generation's best individual is the best overall.
if compare(candidate_individual, best_individual):
# New best individual takes over as the potential optimal solution.
best_individual = deepcopy(candidate_individual)
invalidity_correction_args["best_individual"] = best_individual
# Add said individual into a separate list so that it could be plotted
# later.
best_overall_individual_history.append(deepcopy(candidate_individual))
best_overall_generation_tracker.append(current_generation)
# Since new best individual was discovered, minimum generation count
# is now reset.
current_generation_min = -1
# Data collection for plotting purposes.
if current_generation + 1 in population_history_tracker:
population_history[current_generation + 1] = deepcopy(population)
best_generation_individual_history.append(deepcopy(candidate_individual))
best_time_individual_history.append(deepcopy(best_individual))
best_individual_time_tracker.append(global_timer.elapsed())
current_generation += 1
current_generation_min += 1
print("Generation {:> 5} / {:> 5} (Min: {:> 5} / {:> 5}) | "
"Best Fitness (Generation / Overall): {:0.0f} / {:0.0f}"
.format(
current_generation,
generation_count_max,
current_generation_min,
generation_count_min,
candidate_individual.fitness,
best_individual.fitness))
# ------------------------------------------------------------------------------------------------------------------
# - The End of the Main Loop of the Genetic Algorithm. -------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
global_timer.stop()
population_history[current_generation] = deepcopy(population)
print("Algorithm has finished. (Time taken: {} ms)".format(global_timer.elapsed()))
print("Discovered an individual with the following details:")
best_individual.print()
print("Preparing data for drawing plots...")
# ------------------------------------------------------------------------------------------------------------------
# - Plot Drawing starts here. --------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
PlotData.select_unused_folder_name()
plot_function_list = []
plot_data_list = []
# Graph 1 / 7
# Line Graph that illustrates diversity of population created using a population initializer.
details1 = {
"population_initializer": alg_params.str_population_initializer[alg_params.population_initializer],
"sa_iteration_count": sa_iteration_count,
"sa_initial_temperature": sa_initial_temperature,
"sa_p_coeff": sa_p_coeff
}
plot_function1, plot_data1 = plot_manager.plot_population_initializer(
initial_population,
details1
)
plot_function_list, plot_data_list = plot_function_list + plot_function1, plot_data_list + plot_data1
# Graph 2 / 7
# Scatter Graph (Map) that illustrates the solution of the best individual created by the population initializer.
# This is drawn only if node coordinates are available, and if path table mapping is not used.
if coordinates is not None and path_table_mapping is None:
details2 = {
"population_initializer": alg_params.str_population_initializer[alg_params.population_initializer],
"population_count": population_count,
"coordinates": coordinates,
"open_routes": using_ovrp,
"sa_iteration_count": sa_iteration_count,
"sa_initial_temperature": sa_initial_temperature,
"sa_p_coeff": sa_p_coeff
}
plot_function2, plot_data2 = plot_manager.plot_best_individual_initial_solution(
best_initialized_individual,
details2
)
plot_function_list, plot_data_list = plot_function_list + plot_function2, plot_data_list + plot_data2
# Graph 3 / 7
# Line Graph that illustrates the development of the population. Fitness values of every individual over
# multiple generations are presented.
details3 = {
"population_count": population_count,
"parent_selector": alg_params.str_parent_selection_function[alg_params.parent_selection_function],
"crossover_operator": alg_params.str_crossover_operator[alg_params.crossover_operator],
"tournament_probability": tournament_probability,
"crossover_probability": crossover_probability,
"mutation_probability": mutation_probability
}
plot_function3, plot_data3 = plot_manager.plot_population_development(
population_history,
details3
)
plot_function_list, plot_data_list = plot_function_list + plot_function3, plot_data_list + plot_data3
# Graph 4 / 7
# Line Graph that illustrates fitness development of the competing individuals of their generations.
details4 = {
"population_count": population_count,
"parent_selector": alg_params.str_parent_selection_function[alg_params.parent_selection_function],
"crossover_operator": alg_params.str_crossover_operator[alg_params.crossover_operator],
"tournament_probability": tournament_probability,
"crossover_probability": crossover_probability,
"mutation_probability": mutation_probability
}
plot_function4, plot_data4 = plot_manager.plot_best_individual_fitness(
best_generation_individual_history,
details4
)
plot_function_list, plot_data_list = plot_function_list + plot_function4, plot_data_list + plot_data4
# Graph 5 / 7
# Line Graph that illustrates fitness development of the competing individuals with respect to time.
details5 = {
"population_count": population_count,
"parent_selector": alg_params.str_parent_selection_function[alg_params.parent_selection_function],
"crossover_operator": alg_params.str_crossover_operator[alg_params.crossover_operator],
"tournament_probability": tournament_probability,
"crossover_probability": crossover_probability,
"mutation_probability": mutation_probability
}
plot_function5, plot_data5 = plot_manager.plot_best_individual_fitness_time(
best_time_individual_history,
best_individual_time_tracker,
details5
)
plot_function_list, plot_data_list = plot_function_list + plot_function5, plot_data_list + plot_data5
# Graph 6 / 7
# Bar Graph that illustrates the development of the best individual in terms of its fitness.
details6 = {
"bar_count": 50 if len(best_overall_individual_history) > 50 else len(best_overall_individual_history),
"population_count": population_count,
"parent_selector": alg_params.str_parent_selection_function[alg_params.parent_selection_function],
"crossover_operator": alg_params.str_crossover_operator[alg_params.crossover_operator],
"tournament_probability": tournament_probability,
"crossover_probability": crossover_probability,
"mutation_probability": mutation_probability
}
plot_function6, plot_data6 = plot_manager.plot_best_individual_collection(
best_overall_individual_history,
best_overall_generation_tracker,
details6
)
plot_function_list, plot_data_list = plot_function_list + plot_function6, plot_data_list + plot_data6
# Graph 7 / 7
# Collection of Scatter Graphs that illustrate the development of the solution of the best individual.
# This is drawn only if node coordinates are available, and if path table mapping is not used.
if coordinates is not None and path_table_mapping is None:
details7 = {
"max_plot_count": 10,
"coordinates": coordinates,
"open_routes": using_ovrp,
"population_count": population_count,
"parent_selector": alg_params.str_parent_selection_function[alg_params.parent_selection_function],
"crossover_operator": alg_params.str_crossover_operator[alg_params.crossover_operator],
"tournament_probability": tournament_probability,
"crossover_probability": crossover_probability,
"mutation_probability": mutation_probability
}
plot_function7, plot_data7 = plot_manager.plot_best_individual_solution(
best_overall_individual_history,
best_overall_generation_tracker,
details7
)
plot_function_list, plot_data_list = plot_function_list + plot_function7, plot_data_list + plot_data7
plot_manager.set_total_plot_count(plot_data_list)
print("Drawing plots...")
print("(Once a window appears, closing it resumes execution.)")
plot_manager.summon_window(plot_function_list, plot_data_list)
print("Returning to menu...")
|
{"hexsha": "35665d91b5baa87b3fbcd421f3f8b321d8fa3465", "size": 46716, "ext": "py", "lang": "Python", "max_stars_repo_path": "algorithms/genalg.py", "max_stars_repo_name": "terratenff/vrp-gen-alg", "max_stars_repo_head_hexsha": "3910ff7977a84b03e14c4f500909bcb86e6dd608", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "algorithms/genalg.py", "max_issues_repo_name": "terratenff/vrp-gen-alg", "max_issues_repo_head_hexsha": "3910ff7977a84b03e14c4f500909bcb86e6dd608", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "algorithms/genalg.py", "max_forks_repo_name": "terratenff/vrp-gen-alg", "max_forks_repo_head_hexsha": "3910ff7977a84b03e14c4f500909bcb86e6dd608", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.0927419355, "max_line_length": 120, "alphanum_fraction": 0.6653394982, "include": true, "reason": "import numpy", "num_tokens": 9121}
|
"""
meerkat_api util functions
"""
from datetime import datetime
from dateutil import parser
import meerkat_abacus.util as abacus_util
import numpy as np
import meerkat_abacus.util.epi_week
def series_to_json_dict(series):
"""
Takes pandas series and turns into a dict with string keys
Args:
series: pandas series
Returns:
dict: dict
"""
# np.asscalar is necessary to cast numpy types to python native
if series is not None:
ret = {}
for key, value in series.to_dict().items():
if isinstance(value, float) or isinstance(value, int):
ret[str(key)] = value
else:
ret[str(key)] = float(np.asscalar(value))
return ret
else:
return {}
def fix_dates(start_date, end_date):
"""
We parse the start and end date and remove any timezone information
Args:
start_date: start date
end_date: end_date
Returns:
dates(tuple): (start_date, end_date)
"""
if end_date:
end_date = parser.parse(end_date).replace(hour=23,
minute=59,
second=59,
tzinfo=None)
else:
end_date = datetime.now()
if start_date:
start_date = parser.parse(start_date).replace(hour=0,
minute=0,
second=0,
tzinfo=None)
else:
start_date = end_date.replace(month=1, day=1,
hour=0, second=0,
minute=0,
microsecond=0)
if start_date < meerkat_abacus.util.epi_week.epi_year_start_date(date=start_date):
start_date = meerkat_abacus.util.epi_week.epi_year_start_date(date=start_date)
return start_date, end_date
def row_to_dict(row):
"""
Translate sql alchemy row to dict
Args:
row: SQL alchemy class
Returns:
data_dict(dict): data as dictionary
"""
if not row:
return {}
if hasattr(row, "__table__"):
return dict((col, getattr(row, col))
for col in row.__table__.columns.keys())
else:
ret = {}
for table in row:
if table:
ret[table.__tablename__] = dict(
(col, getattr(table, col)) for col
in table.__table__.columns.keys())
return ret
def rows_to_dicts(rows, dict_id=None):
"""
Translate sql alchemy rows to dicts
Args:
rows: List of SQL alchemy rows
dict_id: If True we return a dict with the dict_id column as index
Returns:
data_dicts(dict): data as dictionary
"""
if dict_id:
if len(rows) >0 and isinstance(rows[0], tuple):
raise TypeError("Can not use dict_id=True with tuple rows")
data_dicts = {}
for row in rows:
data_dicts[getattr(row, dict_id)] = row_to_dict(row)
else:
data_dicts = []
for row in rows:
data_dicts.append(row_to_dict(row))
return data_dicts
def find_level(location, sublevel, locations):
"""
Returns the isntance of level that location is a child of
Args:
location: location
sublevel: the sublevel we are interested in
locations: all locations in dict
Returns:
location_id(int): id of the mathcing location
"""
location = int(location)
for loc in locations:
if locations[loc].level == sublevel and abacus_util.is_child(loc, location, locations):
return loc
return None
def get_children(parent, locations, clinic_type=None, require_case_report=True, case_type=None):
"""
Return all clinics that are children of parent
Args:
parent: parent_id
locations: all locations in dict
Returns:
list of location ids
"""
ret = []
for location_id in locations.keys():
if ( (not require_case_report or locations[location_id].case_report) and
(not clinic_type or locations[location_id].clinic_type == clinic_type)):
if( case_type is None or locations[location_id].case_type == case_type):
if abacus_util.is_child(parent, location_id, locations):
ret.append(location_id)
return ret
|
{"hexsha": "3a1ea458d526f1cf463aad6d844f8d30509d9ae4", "size": 4531, "ext": "py", "lang": "Python", "max_stars_repo_path": "meerkat_api/util/__init__.py", "max_stars_repo_name": "meerkat-code/meerkat_api", "max_stars_repo_head_hexsha": "9ab617498e52df5a49b993ee1c931071eab6ab92", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "meerkat_api/util/__init__.py", "max_issues_repo_name": "meerkat-code/meerkat_api", "max_issues_repo_head_hexsha": "9ab617498e52df5a49b993ee1c931071eab6ab92", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2016-06-22T17:05:49.000Z", "max_issues_repo_issues_event_max_datetime": "2018-04-12T12:56:50.000Z", "max_forks_repo_path": "meerkat_api/util/__init__.py", "max_forks_repo_name": "who-emro/meerkat_api", "max_forks_repo_head_hexsha": "9ab617498e52df5a49b993ee1c931071eab6ab92", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-06T22:46:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-06T22:46:58.000Z", "avg_line_length": 29.4220779221, "max_line_length": 96, "alphanum_fraction": 0.5676451115, "include": true, "reason": "import numpy", "num_tokens": 953}
|
import matplotlib
# don't use xwindow
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import re
import sys
import os
basedir = sys.argv[1] + "/"
files = map(lambda x: basedir + x, sorted(os.listdir(basedir), key=int))
def process_uartlog(uartlogpath):
""" process the log and then report the mean RTT for this link latency """
def mean(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
with open(uartlogpath, 'r') as f:
readlines = f.readlines()
rtts = []
for line in readlines:
if "64 bytes from 172.16.0.3:" in line:
thisrtt = line.split()[-2].split("=")[-1]
rtts.append(float(thisrtt))
return mean(rtts)
def get_average_rtt_from_file(basedirname):
uartlogpath = basedirname + "/pinger/uartlog"
latency = float(basedirname.split("/")[-1])
latency_in_ms = (latency / 3.2) / 1000000.0
ideal_rtt = (latency * 4) + 10*2
ideal_rtt_in_ms = (ideal_rtt / 3.2) / 1000000.0
measured_rtt_in_ms = process_uartlog(uartlogpath)
link_latency_us = latency_in_ms * 1000.0
measured_rtt_in_us = measured_rtt_in_ms * 1000.0
ideal_rtt_in_us = ideal_rtt_in_ms * 1000.0
print("DIFF: " + str(measured_rtt_in_us - ideal_rtt_in_us))
return [link_latency_us, measured_rtt_in_us, ideal_rtt_in_us]
resultarray = map(get_average_rtt_from_file, files)
link_latency = map(lambda x: x[0], resultarray)
measured_rtt = map(lambda x: x[1], resultarray)
ideal_rtt = map(lambda x: x[2], resultarray)
print(resultarray)
series = []
fig, ax = plt.subplots()
ser, = plt.plot(link_latency, measured_rtt, linestyle='--', marker='^', c='0.5')
series.append(ser)
ser, = plt.plot(link_latency, ideal_rtt, linestyle='-', marker='o', c='0.1')
series.append(ser)
#matplotlib.rcParams.update({'font.size': 16})
matplotlib.rcParams.update(matplotlib.rcParamsDefault)
ax.legend(series, ['Measured Ping RTT', 'Ideal RTT'],prop={'size': 10})
ax.set_xlabel(r'Link Latency ($\mu$s)', size='10')
ax.set_ylabel(r'Round Trip Time ($\mu$s)', size='10')
ax.grid(linestyle='-', linewidth=0.3)
fig = plt.gcf()
fig.set_size_inches(6, 3.75)
plt.show()
fig.savefig(basedir + 'ping-rtt.pdf', format='pdf')
|
{"hexsha": "e17b35d4a2901cfc9497625209a8fff864756d18", "size": 2235, "ext": "py", "lang": "Python", "max_stars_repo_path": "deploy/workloads/ping-latency/ping-latency-graph.py", "max_stars_repo_name": "GaloisInc/BESSPIN-firesim", "max_stars_repo_head_hexsha": "0da74414291708563f9b512634d1315d53077e91", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-07-18T06:04:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-19T21:23:55.000Z", "max_issues_repo_path": "deploy/workloads/ping-latency/ping-latency-graph.py", "max_issues_repo_name": "GaloisInc/BESSPIN-firesim", "max_issues_repo_head_hexsha": "0da74414291708563f9b512634d1315d53077e91", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deploy/workloads/ping-latency/ping-latency-graph.py", "max_forks_repo_name": "GaloisInc/BESSPIN-firesim", "max_forks_repo_head_hexsha": "0da74414291708563f9b512634d1315d53077e91", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9277108434, "max_line_length": 80, "alphanum_fraction": 0.6841163311, "include": true, "reason": "import numpy", "num_tokens": 673}
|
function oneDArray(t::DataType, len::Int)
return zeros(t, len)
end
function twoDArray(t::DataType, len_x::Int, len_y::Int)
return zeros(t, len_x, len_y)
end
|
{"hexsha": "bf9f6a51dd01084fcd5999e8e6dcc81690fc4e06", "size": 165, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/types.jl", "max_stars_repo_name": "gaelforget/Diffusion.jl", "max_stars_repo_head_hexsha": "2fe126f0f777f947b3a5deb6fa6d8e66cc8ba17b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 27, "max_stars_repo_stars_event_min_datetime": "2021-05-21T13:16:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T06:39:05.000Z", "max_issues_repo_path": "src/types.jl", "max_issues_repo_name": "gaelforget/Diffusion.jl", "max_issues_repo_head_hexsha": "2fe126f0f777f947b3a5deb6fa6d8e66cc8ba17b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2019-05-14T20:48:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-02T22:36:43.000Z", "max_forks_repo_path": "src/types.jl", "max_forks_repo_name": "gaelforget/Diffusion.jl", "max_forks_repo_head_hexsha": "2fe126f0f777f947b3a5deb6fa6d8e66cc8ba17b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-05-08T03:39:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-14T10:39:58.000Z", "avg_line_length": 23.5714285714, "max_line_length": 55, "alphanum_fraction": 0.7090909091, "num_tokens": 51}
|
function dbName = response_demo_database
%RESPONSE_DEMO_DATABASE returns the absolute path to the demo database
% DBNAME = RESPONSE_DEMO_DATABASE
file = which('response_cookbook');
path = fileparts(file);
if ~exist([path '/demo'])
error('response_demo_database: demo database not found');
else
dbName = [path '/demo/plutons'];
end
|
{"author": "geoscience-community-codes", "repo": "GISMO", "sha": "a4eafca9d2ac85079253510005ef00aa9998d030", "save_path": "github-repos/MATLAB/geoscience-community-codes-GISMO", "path": "github-repos/MATLAB/geoscience-community-codes-GISMO/GISMO-a4eafca9d2ac85079253510005ef00aa9998d030/contributed/instrument_response/response_demo_database.m"}
|
#
# Copyright © 2021 United States Government as represented by the Administrator
# of the National Aeronautics and Space Administration. No copyright is claimed
# in the United States under Title 17, U.S. Code. All Other Rights Reserved.
#
# SPDX-License-Identifier: NASA-1.3
#
"""
Dust map infrastructure. Software modified from VRO's photUtils.
https://github.com/lsst/sims_photUtils/tree/master/python/lsst/sims/photUtil
"""
import numpy as np
import astropy.units as u
from dust_extinction.parameter_averages import CCM89
from synphot import ReddeningLaw
from synphot import SourceSpectrum, SpectralElement
from synphot.models import ConstFlux1D, Box1D
class Dust:
"""Calculate extinction values
Parameters
----------
config: simsurvey config file
R_v : float (3.1)
Extinction law parameter (3.1).
ref_ev : float (1.)
The reference E(B-V) value to use. Things in MAF assume 1.
"""
def __init__(self, filters=['FUV', 'NUV'],
bandpasses=[[1350, 1750], [1750, 2800]],
zeropoints=[22.0, 23.5],
R_v=3.1, ref_ebv=1.):
# Calculate dust extinction values
self.Ax1 = {}
self.bandpassDict = {}
self.zeropointDict = {}
for ii, filt in enumerate(filters):
self.bandpassDict[filt] = bandpasses[ii]
self.zeropointDict[filt] = zeropoints[ii]
redlaw = ReddeningLaw(CCM89(Rv=R_v))
for filtername in self.bandpassDict:
wavelen_min = self.bandpassDict[filtername][0]
wavelen_max = self.bandpassDict[filtername][1]
wav = np.arange(wavelen_min, wavelen_max, 1.0) * u.AA
flat_abmag = SourceSpectrum(ConstFlux1D, amplitude=0*u.STmag)
bp = SpectralElement(Box1D,
amplitude=1,
x_0=(wavelen_max+wavelen_min)/2.0,
width=wavelen_max-wavelen_min)
extcurve = redlaw.extinction_curve(ref_ebv, wavelengths=wav)
sp_ext = flat_abmag * bp * extcurve
sp = flat_abmag * bp
sp_ext_mag = -2.5*np.log10(sp_ext.integrate().to_value())
sp_mag = -2.5*np.log10(sp.integrate().to_value())
# Calculate difference due to dust when EBV=1.0
# (m_dust = m_nodust - Ax, Ax > 0)
self.Ax1[filtername] = sp_ext_mag - sp_mag
|
{"hexsha": "c6696b1e43927e15fa2918816fde083f91979220", "size": 2417, "ext": "py", "lang": "Python", "max_stars_repo_path": "dorado/scheduling/dust.py", "max_stars_repo_name": "bwgref/dorado-scheduling", "max_stars_repo_head_hexsha": "f3f8784bcc0646d10b7bc2c11040ef9c933b92b1", "max_stars_repo_licenses": ["NASA-1.3"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-03-02T04:44:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-16T21:30:32.000Z", "max_issues_repo_path": "dorado/scheduling/dust.py", "max_issues_repo_name": "bwgref/dorado-scheduling", "max_issues_repo_head_hexsha": "f3f8784bcc0646d10b7bc2c11040ef9c933b92b1", "max_issues_repo_licenses": ["NASA-1.3"], "max_issues_count": 75, "max_issues_repo_issues_event_min_datetime": "2021-03-11T21:00:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-19T18:48:13.000Z", "max_forks_repo_path": "dorado/scheduling/dust.py", "max_forks_repo_name": "bwgref/dorado-scheduling", "max_forks_repo_head_hexsha": "f3f8784bcc0646d10b7bc2c11040ef9c933b92b1", "max_forks_repo_licenses": ["NASA-1.3"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-03-11T17:34:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-02T15:45:36.000Z", "avg_line_length": 37.1846153846, "max_line_length": 79, "alphanum_fraction": 0.6206040546, "include": true, "reason": "import numpy,import astropy", "num_tokens": 646}
|
"""
Presumably I copied this in from holoviews and hacked til it worked,
as a proof of concept? But since holoviews is deprecating magics, no
attempt was ever made (or will ever be made...) to do it properly :)
"""
from itertools import groupby
IGNORED_LINE_MAGICS = ['output']
IGNORED_CELL_MAGICS = ['output']
try:
import numpy # noqa: Some bad numpy/pytest interaction. Without importing numpy here (so it happens when nbsmoke is imported, i.e. at plugin load time,
# we get the traceback in https://github.com/numpy/numpy/issues/14012 (no idea if same cause)
except ImportError:
pass
def _make_optsspec():
from holoviews.util.parser import OptsSpec
class NbSmokeOptsSpec(OptsSpec):
@classmethod
def _hvparse(cls, line, ns={}):
"""
Parse an options specification, returning a dictionary with
path keys and {'plot':<options>, 'style':<options>} values.
"""
parses = [p for p in cls.opts_spec.scanString(line)]
if len(parses) != 1:
raise SyntaxError("Invalid specification syntax.")
else:
e = parses[0][2]
processed = line[:e]
if (processed.strip() != line.strip()):
raise SyntaxError("Failed to parse remainder of string: %r" % line[e:])
grouped_paths = cls._group_paths_without_options(cls.opts_spec.parseString(line))
things = []
for pathspecs, group in grouped_paths:
# normalization = cls.process_normalization(group)
# if normalization is not None:
# options['norm'] = normalization
if 'plot_options' in group:
plotopts = group['plot_options'][0]
opts = cls.todict(plotopts, 'brackets', ns=ns)
things+=opts
if 'style_options' in group:
styleopts = group['style_options'][0]
opts = cls.todict(styleopts, 'parens', ns=ns)
things+=opts
return things
@classmethod
def _hvtodict(cls, parseresult, mode='parens', ns={}):
"""
Helper function to return dictionary given the parse results
from a pyparsing.nestedExpr object (containing keywords).
The ns is a dynamic namespace (typically the IPython Notebook
namespace) used to update the class-level namespace.
"""
grouped = []
things = []
tokens = cls.collect_tokens(parseresult, mode)
# Group tokens without '=' and append to last token containing '='
for group in groupby(tokens, lambda el: '=' in el):
(val, items) = group
if val is True:
grouped += list(items)
if val is False:
elements =list(items)
# Assume anything before ) or } can be joined with commas
# (e.g tuples with spaces in them)
joiner=',' if any(((')' in el) or ('}' in el))
for el in elements) else ''
grouped[-1] += joiner + joiner.join(elements)
for keyword in grouped:
# Tuple ('a', 3) becomes (,'a',3) and '(,' is never valid
# Same for some of the other joining errors corrected here
for (fst,snd) in [('(,', '('), ('{,', '{'), ('=,','='),
(',:',':'), (':,', ':'), (',,', ','),
(',.', '.')]:
keyword = keyword.replace(fst, snd)
things.append('dict(%s)' % keyword)
return things
return NbSmokeOptsSpec
def opts_handler(magic):
"""Given an opts magic, return line of python suitable for pyflakes."""
NbSmokeOptsSpec = _make_optsspec()
string_of_magic_args = magic.python
return " ; ".join(NbSmokeOptsSpec.parse(string_of_magic_args)) + " # noqa: here to use names for original %s magic: %s"%(magic.__class__.__name__, magic.python)
cell_magic_handlers = {'opts': opts_handler}
line_magic_handlers = {'opts': opts_handler}
|
{"hexsha": "d35cc2bcf2d072bd273bd3c66926f06751e2d017", "size": 4380, "ext": "py", "lang": "Python", "max_stars_repo_path": "nbsmoke/lint/magics/holoviews_support.py", "max_stars_repo_name": "ianthomas23/nbsmoke", "max_stars_repo_head_hexsha": "a9c58a43af3b57c4c76ea0efc8315e0ea3344d87", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-08-12T17:14:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-18T23:00:30.000Z", "max_issues_repo_path": "nbsmoke/lint/magics/holoviews_support.py", "max_issues_repo_name": "ianthomas23/nbsmoke", "max_issues_repo_head_hexsha": "a9c58a43af3b57c4c76ea0efc8315e0ea3344d87", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 36, "max_issues_repo_issues_event_min_datetime": "2019-09-01T13:04:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-31T16:45:24.000Z", "max_forks_repo_path": "nbsmoke/lint/magics/holoviews_support.py", "max_forks_repo_name": "ianthomas23/nbsmoke", "max_forks_repo_head_hexsha": "a9c58a43af3b57c4c76ea0efc8315e0ea3344d87", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-06-10T01:50:02.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-21T07:55:48.000Z", "avg_line_length": 40.5555555556, "max_line_length": 164, "alphanum_fraction": 0.5347031963, "include": true, "reason": "import numpy", "num_tokens": 931}
|
#!/usr/bin/env python
import os
import os.path as pt
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import argparse
#TODO: take decimal places as parameter for printing.
def sizeof_pp(num):
for unit in ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB']:
if abs(num) < 1024.0:
return "%3.2f %s" % (num, unit)
num /= 1024.0
return "%.2f %s" % (num, 'Yi')
def xtic_formatter(num, tick_index):
return sizeof_pp(num)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='.')
parser.add_argument('dir_path', metavar='Path', type=str, help='')
parser.add_argument('-p', '--plot', action='store_true')
args = parser.parse_args()
sizes = []
symlink_count = 0
for root, dirs, files in os.walk(args.dir_path, followlinks=False):
for name in files:
fullpath = pt.join(root, name)
if not os.path.islink(fullpath):
sizes.append(pt.getsize(fullpath))
else:
symlink_count += 1
sizes.sort()
print("Searching in directory: {0}".format(args.dir_path))
print("Files Inspected: {0}".format(len(sizes)))
print("Maxfilesize: " + sizeof_pp(sizes[-1]))
print("Symlinks found: {0}".format(symlink_count))
percentile = 95
index = len(sizes) * (percentile / 100.)
print("{0}% of files smaller than: ~".format(percentile) + sizeof_pp(
sizes[int(index)]))
sizesArray = np.asarray(sizes)
if (args.plot):
bins = min(len(sizes) / 10, 200)
plt.figure(figsize=(8, 8))
ax = plt.subplot(111)
# Adjust y-axis to show bins of height 1 and max bin height.
n, _, _ = plt.hist(sizesArray, bins, log=True)
plt.ylim(0.5, max(n) * 1.1)
plt.xlabel("File Size (bytes)")
plt.ylabel("Log(Number of Files)")
plt.title("File size histogram for: {0}".format(args.dir_path))
x_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
x_formatter.set_scientific(False)
x_format = mpl.ticker.FuncFormatter(xtic_formatter)
ax.xaxis.set_major_formatter(x_format)
plt.show()
|
{"hexsha": "7bbce2fc454d2582a73d1544a7f10ce703ecbc9f", "size": 2182, "ext": "py", "lang": "Python", "max_stars_repo_path": "spy_dir.py", "max_stars_repo_name": "TheGhostHuCodes/spy_dir", "max_stars_repo_head_hexsha": "23d78a0fecbf6fcc78decb83dc3d02917a46844d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "spy_dir.py", "max_issues_repo_name": "TheGhostHuCodes/spy_dir", "max_issues_repo_head_hexsha": "23d78a0fecbf6fcc78decb83dc3d02917a46844d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "spy_dir.py", "max_forks_repo_name": "TheGhostHuCodes/spy_dir", "max_forks_repo_head_hexsha": "23d78a0fecbf6fcc78decb83dc3d02917a46844d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0606060606, "max_line_length": 73, "alphanum_fraction": 0.6136571952, "include": true, "reason": "import numpy", "num_tokens": 580}
|
import os, sys
import glob
import json
import cv2
import subprocess
import re
import requests
import cv2
import numpy as np
def save_frames(filepath, directory):
subprocess.check_output(['ffmpeg','-loglevel','panic','-i', filepath, '-vf', 'scale=320:-1', '-r', '10', '-y', os.path.join(directory, "frame_%3d.png")])
def recognize_image(filename, top_best=10):
results = str(subprocess.check_output(['alpr', '-c', 'eu', filename]))
results = results[1:].strip("'").split("\\n")[1:]
best_results = []
if len(results) > 1:
m = re.match(r"- (\w*)\\t confidence: (\d*.\d*)", results[0].strip())
plate_nr, confidence = m.group(1), float(m.group(2))
for i in range(min(top_best, len(results))):
m = re.match(r"- (\w*)\\t confidence: (\d*.\d*)", results[i].strip())
if m:
best_results.append({"plate_nr": m.group(1), "confidence": float(m.group(2))})
return best_results
def request_rdw(car_data):
if car_data['plate_nr'] in rdw_data_dict:
car_data["exists_in_rdw"] = rdw_data_dict[car_data['plate_nr']]["exists_in_rdw"]
car_data["color"] = rdw_data_dict[car_data['plate_nr']]["color"]
car_data["brand"] = rdw_data_dict[car_data['plate_nr']]["brand"]
else:
url = "http://api.datamarket.azure.com/opendata.rdw/VRTG.Open.Data/v1/KENT_VRTG_O_DAT(\'%s\')?$format=json" % car_data['plate_nr']
r = requests.get(url).json()
rdw_data_dict[car_data['plate_nr']] = {}
if "error" in r:
car_data["exists_in_rdw"] = "Does not exist"
car_data["color"] = "-"
car_data["brand"] = "-"
rdw_data_dict[car_data['plate_nr']]["exists_in_rdw"] = "Does not exist"
rdw_data_dict[car_data['plate_nr']]["color"] = "-"
rdw_data_dict[car_data['plate_nr']]["brand"] = "-"
else:
car_data["exists_in_rdw"] = "Exists"
car_data["color"] = r["d"]["Eerstekleur"]
car_data["brand"] = r["d"]["Merk"]
rdw_data_dict[car_data['plate_nr']]["exists_in_rdw"] = "Exists"
rdw_data_dict[car_data['plate_nr']]["color"] = r["d"]["Eerstekleur"]
rdw_data_dict[car_data['plate_nr']]["brand"] = r["d"]["Merk"]
return car_data
def get_edges(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
edged = cv2.Canny(gray, 30, 200)
# find contours in the edged image, keep only the largest
# ones, and initialize our screen contour
(_,cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = sorted(cnts, key = cv2.contourArea, reverse = True)
screenCnt = None
# loop over our contours
for c in cnts:
# approximate the contour
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
# if our approximated contour has four points, then
# we can assume that we have found our screen
if len(approx) == 4:
screenCnt = approx
return screenCnt
return None
def process_rect(screenCnt):
pts = screenCnt.reshape(4, 2)
rect = np.zeros((4, 2), dtype = "float32")
# the top-left point has the smallest sum whereas the
# bottom-right has the largest sum
s = pts.sum(axis = 1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
# compute the difference between the points -- the top-right
# will have the minumum difference and the bottom-left will
# have the maximum difference
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
# multiply the rectangle by the original ratio
#rect *= ratio
# now that we have our rectangle of points, let's compute
# the width of our new image
(tl, tr, br, bl) = rect
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
# ...and now for the height of our new image
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
# take the maximum of the width and height values to reach
# our final dimensions
maxWidth = max(int(widthA), int(widthB))
maxHeight = max(int(heightA), int(heightB))
# construct our destination points which will be used to
# map the screen to a top-down, "birds eye" view
dst = np.array([
[0, 0],
[maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# calculate the perspective transform matrix and warp
# the perspective to grab the screen
M = cv2.getPerspectiveTransform(rect, dst)
warp = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
return warp
rdw_data_dict = {}
results_file = 'results_image_processing5.csv'
video_dir = 'Movies2'
image_dir = 'analyze_images'
if not os.path.exists(image_dir):
os.makedirs(image_dir)
with open(results_file, 'w') as fout:
header = ['video_file', 'is_valid_file', 'frame', 'plate_nr', 'confidence', 'exists_in_rdw', 'car_color', 'car_brand', 'prediction_rank', 'processed']
fout.write(",".join(header))
fout.write("\n")
for video_file in glob.glob("%s/*" % video_dir):
print(video_file)
filename = os.path.basename(video_file)
file_basename, file_extension = os.path.splitext(filename)
current_image_dir = os.path.join(image_dir, file_basename)
if not os.path.exists(current_image_dir):
os.makedirs(current_image_dir)
try:
# file is an image file
if file_extension in ['jpg', 'jpeg', 'png']:
# convert to png if necessary (alpr works better with png than with jpg)
if file_extension != "png":
subprocess.check_output(['mogrify', '-format', 'png', '-g1', video_file])
filename = video_file.replace(file_extension, "png")
subprocess.check_output(['cp', filename, current_image_dir])
# file is a video file
else:
# extract frames from video using ffmpeg
save_frames(video_file, current_image_dir)
results_found = False
confident_results_found = False
for img_file in glob.glob("%s/*" % current_image_dir):
# try alpr without processing
results = recognize_image(img_file)
for i, result in enumerate(results):
if len(result['plate_nr']) == 6:
results_found = True
result = request_rdw(result)
fout.write(",".join([file_basename, "True",
os.path.basename(img_file),
result['plate_nr'], str(result['confidence']),
result['exists_in_rdw'], result['color'],
result['brand'], str(i), "False"]))
fout.write("\n")
# try replacing "J" with "4"
if result['exists_in_rdw'] != "Exists" and "J" in result['plate_nr']:
result['plate_nr'] = result['plate_nr'].replace("J", "4")
result = request_rdw(result)
if result['exists_in_rdw'] == "Exists":
fout.write(",".join([file_basename, "True",
os.path.basename(img_file),
result['plate_nr'], str(result['confidence']),
result['exists_in_rdw'], result['color'],
result['brand'], str(i)+"a", "False"]))
fout.write("\n")
if result['exists_in_rdw'] == "Exists" and result['confidence'] > 80:
confident_results_found = True
# process image
image = cv2.imread(img_file)
screenCnt = get_edges(image)
if screenCnt is None:
print("No 4-cornered contour found")
else:
warp = process_rect(screenCnt)
warp_padded = cv2.copyMakeBorder(warp, 50, 50, 50, 50, cv2.BORDER_CONSTANT)
cv2.imwrite('warped.png',warp_padded)
# alpr
results = recognize_image('warped.png')
for i, result in enumerate(results):
if len(result['plate_nr']) == 6:
results_found = True
result = request_rdw(result)
fout.write(",".join([os.path.basename(video_file)[:-4], "True",
os.path.basename(img_file),
result['plate_nr'], str(result['confidence']),
result['exists_in_rdw'], result['color'],
result['brand'], str(i), "True"]))
fout.write("\n")
# try replacing "J" with "4"
if result['exists_in_rdw'] != "Exists" and "J" in result['plate_nr']:
result['plate_nr'] = result['plate_nr'].replace("J", "4")
result = request_rdw(result)
if result['exists_in_rdw'] == "Exists":
fout.write(",".join([file_basename, "True",
os.path.basename(img_file),
result['plate_nr'], str(result['confidence']),
result['exists_in_rdw'], result['color'],
result['brand'], str(i)+"a", "True"]))
fout.write("\n")
if result['exists_in_rdw'] == "Exists" and result['confidence'] > 80:
confident_results_found = True
if confident_results_found:
break
if not results_found:
fout.write(",".join([os.path.basename(video_file)[:-4], "True",
'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA'
]))
fout.write("\n")
except subprocess.CalledProcessError:
fout.write(",".join([os.path.basename(video_file)[:-4], "False",
'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA', 'NA'
]))
fout.write("\n")
|
{"hexsha": "41956f12c1d61b44025ec041ad3c2d91ce82e4ad", "size": 11218, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/analyze_with_image_processing.py", "max_stars_repo_name": "annitrolla/car-insurance-tool", "max_stars_repo_head_hexsha": "ac54f9f61e154afc8cb1aa5cf5bd6dec3a7991bb", "max_stars_repo_licenses": ["ImageMagick"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/analyze_with_image_processing.py", "max_issues_repo_name": "annitrolla/car-insurance-tool", "max_issues_repo_head_hexsha": "ac54f9f61e154afc8cb1aa5cf5bd6dec3a7991bb", "max_issues_repo_licenses": ["ImageMagick"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/analyze_with_image_processing.py", "max_forks_repo_name": "annitrolla/car-insurance-tool", "max_forks_repo_head_hexsha": "ac54f9f61e154afc8cb1aa5cf5bd6dec3a7991bb", "max_forks_repo_licenses": ["ImageMagick"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.4170040486, "max_line_length": 157, "alphanum_fraction": 0.5058834017, "include": true, "reason": "import numpy", "num_tokens": 2561}
|
from os.path import *
import glob
import json
import numpy as np
from util.plot_utils import plot_curves, plot_multi_loss_distribution
TMPJPG = expanduser("~/Pictures/")
def plot_multi_logs(exp_name, keys, save_name, epoch, addition_len):
root_path = expanduser("/raid/dataset/detection/detr_exp")
folder_candidate = glob.glob(join(root_path, "*"))
folders = []
for name in exp_name:
for folder in folder_candidate:
if folder[-len(name):] == name:
folders.append(folder)
break
assert len(exp_name) == len(folders)
exp_data = np.stack(get_experiment_logs(folders, keys, epoch, addition_len)).transpose((1, 0, 2))
if len(addition_len) > 0 and "test_coco_eval_bbox" in keys:
idx = keys.index("test_coco_eval_bbox")
addition_len.extend(keys[idx + 1:])
keys = keys[:idx] + addition_len
plot_multi_loss_distribution(
multi_line_data=exp_data,
multi_line_labels=[exp_name] * len(keys),
save_path=TMPJPG, window=5, name=save_name,
titles=keys, fig_size=(12, 3 * len(keys)), legend_loc="upper left"
)
def get_experiment_logs(folders, keys, epoch, addition_len):
exp_data = []
for folder in folders:
print(folder)
contents = np.array(load_log(join(folder, "log.txt"), keys, addition_len))
if contents.shape[-1] >= epoch:
contents = contents[:, :epoch]
else:
zeros = np.zeros((contents.shape[0], epoch - contents.shape[1]), dtype=contents.dtype)
contents = np.concatenate((contents, zeros), axis = 1)
exp_data.append(contents)
return exp_data
def load_log(path, keys, addition=6):
if "test_coco_eval_bbox" in keys:
contents = [[] for _ in range(len(keys) + len(addition) - 1)]
else:
contents = [[] for _ in range(len(keys))]
with open(path, "r") as txt:
for line in txt.readlines():
data = json.loads(line)
j = 0
for i, key in enumerate(keys):
if key == "test_coco_eval_bbox":
for j in range(len(addition)):
contents[i + j].append(data[key][j])
else:
contents[i + j].append(data[key])
return contents
if __name__ == '__main__':
exp_name = ["be", "be_768", "be_1024", "be_mid_layer_only", "origin"]
keys = ["train_loss_bbox", "train_loss_ce", "train_loss_giou", "test_coco_eval_bbox"]
eval_name = ["AP", "AP50", "AP75", "AP_small", "AP_mid", "AP_Big",
"AR", "AR50", "AR75", "AR_small", "AR_mid", "AR_Big"]
plot_multi_logs(exp_name, keys, save_name="loss", epoch=50, addition_len=eval_name[:6])
|
{"hexsha": "81daacebc9755ed9fad67d0bb9146bb8f488fc5d", "size": 2728, "ext": "py", "lang": "Python", "max_stars_repo_path": "util/visualize_loss.py", "max_stars_repo_name": "whq-hqw/detr_change", "max_stars_repo_head_hexsha": "142f75cc5e0b59ca6e07928ddcbed3e461816611", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-07-17T15:09:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-20T13:52:48.000Z", "max_issues_repo_path": "util/visualize_loss.py", "max_issues_repo_name": "whq-hqw/detr_change", "max_issues_repo_head_hexsha": "142f75cc5e0b59ca6e07928ddcbed3e461816611", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util/visualize_loss.py", "max_forks_repo_name": "whq-hqw/detr_change", "max_forks_repo_head_hexsha": "142f75cc5e0b59ca6e07928ddcbed3e461816611", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.4225352113, "max_line_length": 101, "alphanum_fraction": 0.6129032258, "include": true, "reason": "import numpy", "num_tokens": 687}
|
from copy import deepcopy
import itertools
import os
import numpy as np
import scipy
import torch
import torch.nn as nn
import torch.nn.functional as F
from bootstrap.lib.options import Options
from bootstrap.lib.logger import Logger
import block
from block.models.networks.vqa_net import factory_text_enc
from block.models.networks.mlp import MLP
from .utils import mask_softmax
from torch.nn.utils.weight_norm import weight_norm
class UpDnNet(nn.Module):
def __init__(self,
txt_enc={},
self_q_att=False,
agg={},
classif={},
wid_to_word={},
word_to_wid={},
aid_to_ans=[],
ans_to_aid={},
fusion={},
residual=False,
q_single=False,
):
super().__init__()
self.self_q_att = self_q_att
self.agg = agg
assert self.agg['type'] in ['max', 'mean']
self.classif = classif
self.wid_to_word = wid_to_word
self.word_to_wid = word_to_wid
self.aid_to_ans = aid_to_ans
self.ans_to_aid = ans_to_aid
self.fusion = fusion
self.residual = residual
# Modules
self.txt_enc = self.get_text_enc(self.wid_to_word, txt_enc)
if self.self_q_att:
self.q_att_linear0 = nn.Linear(2400, 512)
self.q_att_linear1 = nn.Linear(512, 2)
if q_single:
self.txt_enc_single = self.get_text_enc(self.wid_to_word, txt_enc)
if self.self_q_att:
self.q_att_linear0_single = nn.Linear(2400, 512)
self.q_att_linear1_single = nn.Linear(512, 2)
else:
self.txt_enc_single = None
if self.classif['mlp']['dimensions'][-1] != len(self.aid_to_ans):
Logger()(f"Warning, the classif_mm output dimension ({self.classif['mlp']['dimensions'][-1]})"
f"doesn't match the number of answers ({len(self.aid_to_ans)}). Modifying the output dimension.")
self.classif['mlp']['dimensions'][-1] = len(self.aid_to_ans)
self.classif_module = MLP(**self.classif['mlp'])
# UpDn
q_dim = self.fusion['input_dims'][0]
v_dim = self.fusion['input_dims'][1]
output_dim = self.fusion['output_dim']
self.v_att = Attention(v_dim, q_dim, output_dim)
self.q_net = FCNet([q_dim, output_dim])
self.v_net = FCNet([v_dim, output_dim])
Logger().log_value('nparams',
sum(p.numel() for p in self.parameters() if p.requires_grad),
should_print=True)
Logger().log_value('nparams_txt_enc',
self.get_nparams_txt_enc(),
should_print=True)
def get_text_enc(self, vocab_words, options):
"""
returns the text encoding network.
"""
return factory_text_enc(self.wid_to_word, options)
def get_nparams_txt_enc(self):
params = [p.numel() for p in self.txt_enc.parameters() if p.requires_grad]
if self.self_q_att:
params += [p.numel() for p in self.q_att_linear0.parameters() if p.requires_grad]
params += [p.numel() for p in self.q_att_linear1.parameters() if p.requires_grad]
return sum(params)
def forward(self, batch):
v = batch['visual']
q = batch['question']
l = batch['lengths'].data
c = batch['norm_coord']
nb_regions = batch.get('nb_regions')
out = {}
q_emb = self.process_question(q, l,)
out['v_emb'] = v.mean(1)
out['q_emb'] = q_emb
# single txt encoder
if self.txt_enc_single is not None:
out['q_emb'] = self.process_question(q, l, self.txt_enc_single, self.q_att_linear0_single, self.q_att_linear1_single)
# New
att = self.v_att(v, q_emb)
v_emb = (att * v).sum(1)
q_repr = self.q_net(q_emb)
v_repr = self.v_net(v_emb)
joint_repr = q_repr * v_repr
logits = self.classif_module(joint_repr)
out['logits'] = logits
return out
def process_question(self, q, l, txt_enc=None, q_att_linear0=None, q_att_linear1=None):
if txt_enc is None:
txt_enc = self.txt_enc
if q_att_linear0 is None:
q_att_linear0 = self.q_att_linear0
if q_att_linear1 is None:
q_att_linear1 = self.q_att_linear1
q_emb = txt_enc.embedding(q)
q, _ = txt_enc.rnn(q_emb)
if self.self_q_att:
q_att = q_att_linear0(q)
q_att = F.relu(q_att)
q_att = q_att_linear1(q_att)
q_att = mask_softmax(q_att, l)
#self.q_att_coeffs = q_att
if q_att.size(2) > 1:
q_atts = torch.unbind(q_att, dim=2)
q_outs = []
for q_att in q_atts:
q_att = q_att.unsqueeze(2)
q_att = q_att.expand_as(q)
q_out = q_att*q
q_out = q_out.sum(1)
q_outs.append(q_out)
q = torch.cat(q_outs, dim=1)
else:
q_att = q_att.expand_as(q)
q = q_att * q
q = q.sum(1)
else:
# l contains the number of words for each question
# in case of multi-gpus it must be a Tensor
# thus we convert it into a list during the forward pass
l = list(l.data[:,0])
q = txt_enc._select_last(q, l)
return q
def process_answers(self, out, key=''):
batch_size = out[f'logits{key}'].shape[0]
_, pred = out[f'logits{key}'].data.max(1)
pred.squeeze_()
if batch_size != 1:
out[f'answers{key}'] = [self.aid_to_ans[pred[i].item()] for i in range(batch_size)]
out[f'answer_ids{key}'] = [pred[i].item() for i in range(batch_size)]
else:
out[f'answers{key}'] = [self.aid_to_ans[pred.item()]]
out[f'answer_ids{key}'] = [pred.item()]
return out
class Attention(nn.Module):
def __init__(self, v_dim, q_dim, num_hid, dropout=0.2):
super(Attention, self).__init__()
self.v_proj = FCNet([v_dim, num_hid])
self.q_proj = FCNet([q_dim, num_hid])
self.dropout = nn.Dropout(dropout)
self.linear = weight_norm(nn.Linear(num_hid, 1), dim=None)
def forward(self, v, q):
"""
v: [batch, k, vdim]
q: [batch, qdim]
"""
logits = self.logits(v, q)
w = nn.functional.softmax(logits, 1)
return w
def logits(self, v, q):
batch, k, _ = v.size()
v_proj = self.v_proj(v) # [batch, k, qdim]
q_proj = self.q_proj(q).unsqueeze(1).repeat(1, k, 1)
joint_repr = v_proj * q_proj
joint_repr = self.dropout(joint_repr)
logits = self.linear(joint_repr)
return logits
class FCNet(nn.Module):
"""Simple class for non-linear fully connect network
"""
def __init__(self, dims):
super(FCNet, self).__init__()
layers = []
for i in range(len(dims)-2):
in_dim = dims[i]
out_dim = dims[i+1]
layers.append(weight_norm(nn.Linear(in_dim, out_dim), dim=None))
layers.append(nn.ReLU())
layers.append(weight_norm(nn.Linear(dims[-2], dims[-1]), dim=None))
layers.append(nn.ReLU())
self.main = nn.Sequential(*layers)
def forward(self, x):
return self.main(x)
|
{"hexsha": "1db91d64ffc5561787ad34980a1814b13da9c425", "size": 7498, "ext": "py", "lang": "Python", "max_stars_repo_path": "cfvqa/models/networks/updn_net.py", "max_stars_repo_name": "Mike4Ellis/VQA-Based-CF-VQA", "max_stars_repo_head_hexsha": "18b61010af551f8077bcc309f6290c7c9d251e00", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 83, "max_stars_repo_stars_event_min_datetime": "2021-03-02T07:49:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T03:07:26.000Z", "max_issues_repo_path": "cfvqa/models/networks/updn_net.py", "max_issues_repo_name": "Mike4Ellis/VQA-Based-CF-VQA", "max_issues_repo_head_hexsha": "18b61010af551f8077bcc309f6290c7c9d251e00", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2021-03-14T05:36:56.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-16T04:56:53.000Z", "max_forks_repo_path": "cfvqa/cfvqa/models/networks/updn_net.py", "max_forks_repo_name": "yuleiniu/introd", "max_forks_repo_head_hexsha": "a40407c7efee9c34e3d4270d7947f5be2f926413", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2021-03-12T04:30:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-10T13:20:39.000Z", "avg_line_length": 33.6233183857, "max_line_length": 129, "alphanum_fraction": 0.5705521472, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1893}
|
#! /usr/bin/env python3
import sympy as sp
import dataclasses
import cgeneration as cg
r"""
# Lawson scheme
In this module we present some Lawson methods. Each function
represents a Lawson method, and returns an object that contains every
stages of the method.
"""
@dataclasses.dataclass
class lawson:
stages = []
is_embeded:bool = False
un = None
def __init__(self,computed_stages,expr_stages,dt_stages,is_embeded,un=None):
for _lhs,_rhs,dt in zip(computed_stages,expr_stages,dt_stages):
lhs,rhs = (cg.Uhs(),cg.Uhs())
lhs.dt = dt
lhs[:] = [ str(ui) for ui in _lhs ]
rhs[:] = _rhs
self.stages.append( (lhs,rhs) )
self.is_embeded = is_embeded
if is_embeded:
self.un = cg.Uhs()
self.un[:] = [ str(ui) for ui in un ]
def LRK44 ( t , dt , eLt , N ):
Un , U1 , U2 , U3 = [ cg.vector_stage_idx(sname) for sname in ",1,2,3".split(',') ]
print("+ stage 1")
stage_U1 = eLt.subs(t,dt/2)*Un + dt/2*eLt.subs(t,dt/2)*N(Un)
print("+ stage 2")
stage_U2 = eLt.subs(t,dt/2)*Un + dt/2*N(U1)
print("+ stage 3")
stage_U3 = eLt.subs(t,dt)*Un + dt*eLt.subs(t,dt/2)*N(U2)
print("+ stage n+1")
stage_Un1 = -eLt.subs(t,dt)*Un/3 + eLt.subs(t,dt/2)*U1/3 + 2*eLt.subs(t,dt/2)*U2/3 + U3/3 + dt/6*N(U3)
expr_stages = [stage_U1,stage_U2,stage_U3,stage_Un1]
computed_stages = [ cg.vector_stage(sname) for sname in "1,2,3,".split(",") ]
dt_stages = [ 0. , 0.5 , 0.5 , 1.0 ]
return lawson( computed_stages , expr_stages , dt_stages , is_embeded=False )
def LDP43 ( t , dt , eLt , N ):
Un , U1 , U2 , U3 , U4 = [ cg.vector_stage_idx(sname) for sname in ",1,2,3,4".split(',') ]
print("+ stage 1")
stage_U1 = eLt.subs(t,dt/2)*Un + dt/2*eLt.subs(t,dt/2)*N(Un)
print("+ stage 2")
stage_U2 = eLt.subs(t,dt/2)*Un + dt/2*N(U1)
print("+ stage 3")
stage_U3 = eLt.subs(t,dt)*Un + dt*eLt.subs(t,dt/2)*N(U2)
print("+ stage 4")
stage_U4 = -eLt.subs(t,dt)*Un/3 + eLt.subs(t,dt/2)*U1/3 + 2*eLt.subs(t,dt/2)*U2/3 + U3/3 + dt/6*N(U3)
print("+ stage 5")
stage_U5 = -eLt.subs(t,dt)*Un/5 + eLt.subs(t,dt/2)*U1/5 + 2*eLt.subs(t,dt/2)*U2/5 + U3/5 + 2*U4/5 + dt/10*N(U4)
expr_stages = [stage_U1,stage_U2,stage_U3,stage_U4,stage_U5]
computed_stages = [ cg.vector_stage(sname) for sname in "1,2,3,4,5".split(",") ]
dt_stages = [ 0. , 0.5 , 0.5 , 1.0 , 1.0 ]
return lawson( computed_stages , expr_stages , dt_stages , is_embeded=True , un=cg.vector_stage("") )
def LRK33 ( t , dt , eLt , N ):
Un , U1 , U2 = [ cg.vector_stage_idx(sname) for sname in ",1,2".split(',') ]
print("+ stage 1")
stage_U1 = eLt.subs(t,dt)*Un + dt*eLt.subs(t,dt)*N(Un)
print("+ stage 2")
stage_U2 = 0.75*eLt.subs(t,dt/2)*Un + 0.25*eLt.subs(t,-dt/2)*U1 + 0.25*dt*eLt.subs(t,-dt/2)*N(U1)
print("+ stage n+1")
stage_Un1 = eLt.subs(t,dt)*Un/3 + 2*eLt.subs(t,dt/2)*U2/3 + 2*dt/3*eLt.subs(t,dt/2)*N(U2)
expr_stages = [stage_U1,stage_U2,stage_Un1]
computed_stages = [ cg.vector_stage(sname) for sname in "1,2,".split(",") ]
dt_stages = [ 0. , 1.0 , 0.5 ]
return lawson( computed_stages , expr_stages , dt_stages , is_embeded=False )
methods = { "RK44":LRK44, "DP43":LDP43 , "RK33":LRK33, }
|
{"hexsha": "f1755bb2ddef01af3880bd0c89df8d35d84ef520", "size": 3174, "ext": "py", "lang": "Python", "max_stars_repo_path": "script/schemes.py", "max_stars_repo_name": "kivvix/vlasovpp", "max_stars_repo_head_hexsha": "123072d42ddcceef9278e0cd3ac18d5b3fa4b3c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "script/schemes.py", "max_issues_repo_name": "kivvix/vlasovpp", "max_issues_repo_head_hexsha": "123072d42ddcceef9278e0cd3ac18d5b3fa4b3c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "script/schemes.py", "max_forks_repo_name": "kivvix/vlasovpp", "max_forks_repo_head_hexsha": "123072d42ddcceef9278e0cd3ac18d5b3fa4b3c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6629213483, "max_line_length": 113, "alphanum_fraction": 0.6165721487, "include": true, "reason": "import sympy", "num_tokens": 1308}
|
import os
import numpy as np
import functools
from . import dpath, chunks, resolve_symbols, namespace_dir, group_blocks_into_fills, write_fill
def check_bounds(voxels):
"""gives the bounds for a list of Voxels"""
bounds = functools.reduce(
lambda bounds, voxel: (
min(bounds[0], voxel[0]), max(bounds[1], voxel[0]),
min(bounds[2], voxel[1]), max(bounds[3], voxel[1]),
min(bounds[4], voxel[2]), max(bounds[5], voxel[2]),
),
voxels, (0, 0, 0, 0, 0, 0)
)
print(bounds)
def generate(settings):
"""
Generates functions that create domes,
one for each combination of `radiuses` and `blocks_and_tags`.
"""
namespace = namespace_dir(settings)
max_commands = dpath.get(settings, '/max_commands')
print('Generating multiple points')
radiuses = dpath.get(settings, '/radiuses')
# Generate multiple points on the dome with based on the largest radius.
step = 0.5 / functools.reduce(lambda a, b: max(a, b), radiuses)
points = [
(
np.sin(azimuth) * np.cos(elevation),
np.sin(elevation),
np.cos(azimuth) * np.cos(elevation),
)
# full circle
for azimuth in np.arange(-np.pi, np.pi, step)
# from just below the ground to the apex
for elevation in np.arange(-np.pi/4, np.pi/2, step)
]
def create_dome_fills(radius):
"""
Closure on `points` that creates a list of fills for a dome with a given radius.
"""
print(f'preparing dome: {radius}')
# convert `points` to `voxels` and remove duplicates
voxels = (np.array(points) * radius).astype(np.int16)
# remove duplicates
uniqueVoxels = {(x, y, z) for x, y, z in voxels}
# group blocks into fills
print(f'grouping dome: {radius}')
blocks = []
min_x, min_y, min_z = (0, 0, 0)
max_x, max_y, max_z = (0, 0, 0)
for x, y, z in uniqueVoxels:
min_x = min(min_x, x)
min_y = min(min_y, y)
min_z = min(min_z, z)
max_x = max(max_x, x + 1)
max_y = max(max_y, y + 1)
max_z = max(max_z, z + 1)
blocks.append((x, y, z, True))
return group_blocks_into_fills(
blocks, (max_x, max_y, max_z), (min_x, min_y, min_z)
)
def write_dome_function(radius, block, tag, fills):
"""
Closure that creates a dome function from a list of fills for given radius and block.
"""
# minecraft functions can only execute MAX_COMMANDS commands,
# so we may have to split functions
for i, fills_chunk in enumerate(chunks(fills, max_commands)):
if i > 0:
tag = f'{tag}_{i}'
file_name = os.path.join(namespace, f'{radius}_{tag}.mcfunction')
print(f'writing {file_name}')
with open(file_name, 'w') as file:
for min_voxel, max_voxel, _ in fills_chunk:
write_fill(file, min_voxel, max_voxel, block)
# create a dome function for each combination of `radiuses` and `blocks_and_tags`
blocks_and_tags = [
(resolve_symbols(settings, block), tag)
for block, tag in dpath.get(settings, '/blocks_and_tags')
]
for radius in radiuses:
fills = create_dome_fills(radius)
for block, tag in blocks_and_tags:
write_dome_function(radius, block, tag, fills)
|
{"hexsha": "2cf0a9dae316ce461c00f5c02fe1b797715b965a", "size": 3494, "ext": "py", "lang": "Python", "max_stars_repo_path": "functions/dome.py", "max_stars_repo_name": "msb/minecraft-functions", "max_stars_repo_head_hexsha": "d9fa2d73a9038c29e4be0aa03e4286a33d0eda46", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "functions/dome.py", "max_issues_repo_name": "msb/minecraft-functions", "max_issues_repo_head_hexsha": "d9fa2d73a9038c29e4be0aa03e4286a33d0eda46", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "functions/dome.py", "max_forks_repo_name": "msb/minecraft-functions", "max_forks_repo_head_hexsha": "d9fa2d73a9038c29e4be0aa03e4286a33d0eda46", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0550458716, "max_line_length": 96, "alphanum_fraction": 0.5838580424, "include": true, "reason": "import numpy", "num_tokens": 900}
|
import numpy as np
class Sampler(object):
def __init__(self, data_source):
pass
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class SequentialSampler(Sampler):
def __init__(self, data_source):
self.data_source = data_source
def __iter__(self):
return iter(range(len(self.data_source)))
def __len__(self):
return len(self.data_source)
class RandomSampler(Sampler):
def __init__(self, data_source, replacement=False, num_samples=None):
self.data_source = data_source
self.replacement = replacement
self._num_samples = num_samples
@property
def num_samples(self):
if self._num_samples is None:
return len(self.data_source)
return self._num_samples
def __iter__(self):
n = len(self.data_source)
k = np.arange(start=0, stop=len(self.data_source))
np.random.shuffle(k)
return iter(k)
def __len__(self):
return self.num_samples
class BatchSampler(Sampler):
def __init__(self, sampler, batch_size=3, drop_last=True):
self.sampler = sampler
self.batch_size = batch_size
self.drop_last = drop_last
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return len(self.sampler) // self.batch_size
else:
return (len(self.sampler) + self.batch_size - 1) // self.batch_size
|
{"hexsha": "ef9126c0b9b9ac1ad967e3ab809a4a6ed62ac71a", "size": 1745, "ext": "py", "lang": "Python", "max_stars_repo_path": "codes/data_m/sampler.py", "max_stars_repo_name": "mengdongwei/ai4khdr_mmsr", "max_stars_repo_head_hexsha": "5f16b667f5735b530e2c88c05b125e57a3cd2aa2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "codes/data_m/sampler.py", "max_issues_repo_name": "mengdongwei/ai4khdr_mmsr", "max_issues_repo_head_hexsha": "5f16b667f5735b530e2c88c05b125e57a3cd2aa2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "codes/data_m/sampler.py", "max_forks_repo_name": "mengdongwei/ai4khdr_mmsr", "max_forks_repo_head_hexsha": "5f16b667f5735b530e2c88c05b125e57a3cd2aa2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-12-15T02:56:49.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-15T02:56:49.000Z", "avg_line_length": 24.5774647887, "max_line_length": 79, "alphanum_fraction": 0.6183381089, "include": true, "reason": "import numpy", "num_tokens": 388}
|
import logging
import argparse
import numpy as np
from gensim.models import Word2Vec
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def main(args):
walk_path = args.walk_path
embed_size = args.embed_size
window_size = args.window_size
negative = args.negative
workers = args.workers
out_dir = args.out_dir
epochs = args.epochs
walks = np.load(walk_path).tolist()
skipgram = Word2Vec(sentences=walks, vector_size=embed_size, window=window_size, epochs=epochs, negative=negative, sg=1, workers=workers, min_count=1)
keys = list(map(int, skipgram.wv.index_to_key))
keys.sort()
vectors = [skipgram.wv[idx] for idx in keys]
embeddings = np.array(vectors)
np.save(out_dir, embeddings)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='train embedding with skipgram')
parser.add_argument('--walk_path', type=str, required=True, help='numpy file containing walks')
parser.add_argument('--embed_size', type=int, default=128, help='embedding dimension. Default: 128')
parser.add_argument('--window_size', type=int, default=5, help='skipgram window')
parser.add_argument('--negative', type=int, default=5, help='number of negative samples')
parser.add_argument('--epochs', type=int, default=1, help='skip-gram epochs')
parser.add_argument('--workers', type=int, default=2, help='number of cpu workers')
parser.add_argument('--out_dir', type=str, required=True, help='directory to store embedding')
args = parser.parse_args()
main(args)
|
{"hexsha": "6ef52063e44feb5c04f952d48b87df1abdccf8e2", "size": 1609, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_embedding.py", "max_stars_repo_name": "abidikhairi/embedding_with_lrw", "max_stars_repo_head_hexsha": "8152b3e4b61b197857d1293d5a50bff118804109", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "train_embedding.py", "max_issues_repo_name": "abidikhairi/embedding_with_lrw", "max_issues_repo_head_hexsha": "8152b3e4b61b197857d1293d5a50bff118804109", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "train_embedding.py", "max_forks_repo_name": "abidikhairi/embedding_with_lrw", "max_forks_repo_head_hexsha": "8152b3e4b61b197857d1293d5a50bff118804109", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2340425532, "max_line_length": 154, "alphanum_fraction": 0.7141081417, "include": true, "reason": "import numpy", "num_tokens": 387}
|
%----------------------------------------------------------------------------
% Magic tutorial number S-2
%----------------------------------------------------------------------------
\NeedsTeXFormat{LaTeX2e}[1994/12/01]
\documentclass[letterpaper,twoside,12pt]{article}
\usepackage{epsfig,times}
\setlength{\textwidth}{8.5in}
\addtolength{\textwidth}{-2.0in}
\setlength{\textheight}{11.0in}
\addtolength{\textheight}{-2.0in}
\setlength{\oddsidemargin}{0in}
\setlength{\evensidemargin}{0pt}
\setlength{\topmargin}{-0.5in}
\setlength{\headheight}{0.2in}
\setlength{\headsep}{0.3in}
\setlength{\topskip}{0pt}
\def\hinch{\hspace*{0.5in}}
\def\starti{\begin{center}\begin{tabbing}\hinch\=\hinch\=\hinch\=hinch\=\hinch\=\kill}
\def\endi{\end{tabbing}\end{center}}
\def\ii{\>\>\>}
\def\q{\special{ps:(") show}\hspace*{0.6em}}
\def\mytitle{Magic Tutorial \#S-2: Boxes and labels}
%----------------------------------------------------------------------------
\begin{document}
\makeatletter
\newcommand{\ps@magic}{%
\renewcommand{\@oddhead}{\mytitle\hfil\today}%
\renewcommand{\@evenhead}{\today\hfil\mytitle}%
\renewcommand{\@evenfoot}{\hfil\textrm{--{\thepage}--}\hfil}%
\renewcommand{\@oddfoot}{\@evenfoot}}
\newcommand{\ps@mplain}{%
\renewcommand{\@oddhead}{}%
\renewcommand{\@evenhead}{}%
\renewcommand{\@evenfoot}{\hfil\textrm{--{\thepage}--}\hfil}%
\renewcommand{\@oddfoot}{\@evenfoot}}
\makeatother
\pagestyle{magic}
\thispagestyle{mplain}
\begin{center}
{\bfseries \Large \mytitle} \\
\vspace*{0.5in}
{\itshape Rajit Manohar} \\
\vspace*{0.5in}
Department of Computer Science \\
California Institute of Technology \\
Pasadena, CA 91125 \\
\vspace*{0.25in}
This tutorial corresponds to Magic version 7. \\
\end{center}
\vspace*{0.5in}
{\noindent\bfseries\large Tutorials to read first:}
\starti
\> Magic Tutorial \#S-1: The scheme command-line interpreter
\endi
{\noindent\bfseries\large Commands introduced in this tutorial:}
\starti
\> :getbox, :box.push, :box.pop, :box.move, :label.vert, :label.horiz, \\
\> :label.rename, :label.search, :label.find-next
\endi
{\noindent\bfseries\large Macros introduced in this tutorial:}
\starti
\> {\itshape (None)}
\endi
\vspace*{0.25in}
\section{The current box}
The fundamental way scheme programs interact with magic layout is by
using magic's {\bfseries box} command. For instance,
\starti
\ii {\bfseries (box 1 1 2 2)}
\endi
changes the current box to the rectangle defined by the coordinates
(1,1) and (2,2) in the current edit cell. This is the standard magic
{\bfseries :box} command. After moving the box to a particular position in
the layout, the area can be painted, erased, selected, etc.
The scheme function {\bfseries getbox} returns the current box as a list of
four integers. For instance,
\starti
\ii {\bfseries (box 1 1 2 2)} \\
\ii {\bfseries (define x (getbox))}
\endi
will bind the list {\bfseries (1 1 2 2)} to variable {\bfseries x}.
\section{Saving and restoring the box}
If a scheme function moves the current box around, it is good practice
to restore the box back to its original position. This is especially
useful when writing a function that the user is likely to type on the
command line.
{\bfseries box.push} can be used to push a box onto the current stack of
boxes. {\bfseries box.pop} restores the box to the one on the top of the box
stack. The sequence
\starti
\ii {\bfseries (box.push (getbox))} \\
\ii {\bfseries (box 1 1 5 4)} \\
\ii {\bfseries (paint poly)} \\
\ii {\bfseries (box.pop)}
\endi
will paint a rectangle of polysilicon from (1,1) to (5,4), restoring
the original position of the box.
\section{Moving the box}
Magic's built-in {\bfseries move} command is not entirely
reliable. Sometimes move commands are ignored, with disastrous
effects. (Think about what might happen if a move command was ignored
in the middle of drawing a stack of twenty transistors . . .) The
scheme function {\bfseries box.move} moves the box relative to the current
position.
\starti
\ii {\bfseries (box.move 5 3)}
\endi
will move the box right 5 lambda and up 3 lambda.
\section{Labelling vertical and horizontal wires}
Datapaths are usually designed by designing cells for a single bit of
the datapath, and then arraying those cells to obtain the complete
datapath. When simulating such designs, it is usually desirable to
label wires in the datapath with names like ``name0'', ``name1'', up to
``nameN.''
There are two functions that can be used to perform such a task. The
function {\bfseries label.vert} returns a function that can be used as a
labeller for vertically arrayed nodes. {\bfseries label.horiz} returns a
function that can be used as a labeller for horizontally arrayed
nodes.
\starti
\ii {\bfseries (define lbl (label.vert {\q}name{\q} 6)}
\endi
The command above defines a new function {\bfseries lbl} that can be used to
generate labels beginning with {\q}name0{\q} for nodes that are vertically
spaced by 6 lambda. The simplest way to use this function is to bind
it to a macro as follows:
\starti
\ii {\bfseries (macro 1 {\q}lbl{\q})}
\endi
Place the box over the lowest node. Every time key ``1'' is pressed, a
new label ``nameM'' is created and the box is moved up by 6
lambda. {\bfseries label.horiz} can be used in a similar fashion for
labelling nodes that are horizontally arrayed.
\section{Finding and renaming existing labels}
The label macros provide functionality to search for all labels
that match a particular string. Place the box over the region of
interest. Type:
\starti
\ii {\bfseries (label.search {\q}label{\q})}
\endi
To place the box over the first occurrence of the label you searched
for, type:
\starti
\ii {\bfseries (label.find-next)}
\endi
Repeatedly executing this function causes the box to move to all the
labels that match the search pattern. Typically, one would bind
{\bfseries label.find-next} to a macro.
The command {\bfseries label.rename} can be used to rename all labels with a
particular name. To use this command, place the box over the region of
interest. Then type
\starti
\ii {\bfseries (label.rename {\q}label1{\q} {\q}label2{\q})}
\endi
All occurrences of label ``label1'' in the current box will be
renamed to ``label2''.
\section{Writing these functions}
The functions discussed in this tutorial are not built-in. They are
user-defined functions in the default scheme file loaded in when magic
starts.
As you begin to use magic with the scheme command-line interpreter,
you will observe that commands for drawing paint on the screen are
extremely slow. This time interval is not normally noticeable because
editing is interactive. However, when one can write a scheme program
to draw twenty transistors on the screen, this delay becomes
noticeable. It is worthwhile to minimize the number of magic commands
executed, even if this involves writing more scheme code. The
{\bfseries box-pop} command has been tuned a little to not execute the
{\bfseries box} command if the box would not move as a result.
\bfseries
\starti
\> (define box.list ()) \\ \\
\> (define box.move \\
\>\> (lambda (dx dy) \\
\ii (let* ((x (getbox)) \\
\ii\> (nllx (+ dx (car x))) \\
\ii\> (nlly (+ dy (cadr x))) \\
\ii\> (nurx (+ dx (caddr x))) \\
\ii\> (nury (+ dy (cadddr x)))) \\
\ii (box nllx nlly nurx nury) \\
\ii ) \\
\>\> ) \\
\> ) \\ \\
\> (define box.=? \\
\>\> (lambda (b1 b2) \\
\ii (and (and (=? (car b1) (car b2)) (=? (cadr b1) (cadr b2))) \\
\ii\> (and (=? (caddr b1) (caddr b2)) (=? (caddr b1) (caddr b2))) \\
\ii ) \\
\>\> ) \\
\> ) \\ \\
\> (define box.push \\
\>\> (lambda (pos) \\
\ii (set! box.list (cons pos box.list)) \\
\>\> ) \\
\> )
\endi
\starti
\> (define box.pop \\
\>\> (lambda () \\
\ii (if (null? box.list) \\
\ii\> (echo {\q}Box list is empty{\q}) \\
\ii\> (let ((x (car box.list))) \\
\ii\>\> (begin \\
\ii\>\> (set! box.list (cdr box.list)) \\
\ii\>\> (if (box.=? x (getbox)) \#t (eval (cons 'box x))) \\
\ii\>\> ) \\
\ii\> ) \\
\ii ) \\
\>\> ) \\
\> )
\endi
\end{document}
|
{"hexsha": "6d8531ee800818e6da8a71dd44ee97c485bdefde", "size": 8173, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/latexfiles/tutscm2.tex", "max_stars_repo_name": "wisehackermonkey/magic", "max_stars_repo_head_hexsha": "fb85e97b9233cff352d964823173c18527c714aa", "max_stars_repo_licenses": ["TCL", "X11", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "doc/latexfiles/tutscm2.tex", "max_issues_repo_name": "wisehackermonkey/magic", "max_issues_repo_head_hexsha": "fb85e97b9233cff352d964823173c18527c714aa", "max_issues_repo_licenses": ["TCL", "X11", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "doc/latexfiles/tutscm2.tex", "max_forks_repo_name": "wisehackermonkey/magic", "max_forks_repo_head_hexsha": "fb85e97b9233cff352d964823173c18527c714aa", "max_forks_repo_licenses": ["TCL", "X11", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.2703703704, "max_line_length": 86, "alphanum_fraction": 0.6700110119, "num_tokens": 2453}
|
// Copyright (c) 2013, Manuel Blum
// All rights reserved.
#include <Eigen/Dense>
#include <iostream>
#include <cstdio>
#include "nn.h"
int main (int argc, const char* argv[]) {
// input dimensionality
int n_input = 2;
// output dimensionality
int n_output = 1;
// number of training samples
int m = 4;
// number of layers
int k = 3;
// number of optimization steps
int max_steps = 50;
// regularization parameter
double lambda = 0.000001;
// training inputs
matrix_t X(m, n_input);
matrix_t Y(m, n_output);
// XOR problem
X << 0, 0, 0, 1, 1, 0, 1, 1;
Y << 0, 1, 1, 0;
std::cout << "training input: " << std::endl << X << std::endl;
std::cout << "training output: " << std::endl << Y << std::endl;
// specify network topology
Eigen::VectorXi topo(k);
topo << n_input, 6, n_output;
std::cout << "topology: " << std::endl << topo << std::endl;
// initialize a neural network with given topology
NeuralNet nn(topo);
nn.autoscale(X,Y);
// train the network
std::cout << "starting training" << std::endl;
double err;
for (int i = 0; i < max_steps; ++i) {
err = nn.loss(X, Y, lambda);
nn.rprop();
printf("%3i %4.4f\n", i, err);
}
// write model to disk
nn.write("example.nn");
// read model from disk
NeuralNet nn2("example.nn");
// testing
nn2.forward_pass(X);
matrix_t Y_test = nn2.get_activation();
std::cout << "test input:" << std::endl << X << std::endl;
std::cout << "test output:" << std::endl << Y_test << std::endl;
return 0;
}
|
{"hexsha": "301bde4d19088e88a2852c889fe4b85888f79ce0", "size": 1549, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tutorial.cpp", "max_stars_repo_name": "mblum/nn", "max_stars_repo_head_hexsha": "f5fbba4ad93ce72798828d03b9b7d34dfb48a10f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 11.0, "max_stars_repo_stars_event_min_datetime": "2015-05-27T11:59:14.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-12T14:57:31.000Z", "max_issues_repo_path": "tutorial.cpp", "max_issues_repo_name": "mblum/nn", "max_issues_repo_head_hexsha": "f5fbba4ad93ce72798828d03b9b7d34dfb48a10f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tutorial.cpp", "max_forks_repo_name": "mblum/nn", "max_forks_repo_head_hexsha": "f5fbba4ad93ce72798828d03b9b7d34dfb48a10f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2017-08-25T11:04:43.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-15T04:36:25.000Z", "avg_line_length": 22.1285714286, "max_line_length": 66, "alphanum_fraction": 0.6010329245, "num_tokens": 495}
|
from tensorflow.keras.models import Sequential, model_from_json
from tensorflow.keras.layers import Conv3D, Conv2D
from tensorflow.keras.layers import ConvLSTM2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras import losses
import numpy as np
import pandas as pd
import random
import pandasql as ps
import pickle
from scipy.stats import entropy
from numpy import percentile
import tensorflow.keras as keras
import gc
########## Create ConvLSTM network ##############
from tensorflow.keras.layers import LayerNormalization
def create_model(pixel,filters,channel,hiddenlayers = 4):
seq = Sequential()
#seq.add(BatchNormalization(trainable=False))
seq.add(ConvLSTM2D(filters=filters, kernel_size=(3, 3),
input_shape=(None, pixel, pixel, channel),
padding='same', return_sequences=True))#activation = 'tanh', recurrent_activation = 'tanh')),activation = 'elu'
#seq.add(BatchNormalization(trainable=False))
for layer in range(hiddenlayers-1):
seq.add(ConvLSTM2D(filters=filters, kernel_size=(3, 3),
padding='same', return_sequences=True))# activation = 'tanh', recurrent_activation = 'tanh'))
seq.add(ConvLSTM2D(filters=filters, kernel_size=(3, 3),
padding='same', return_sequences=False)) #activation = 'tanh', recurrent_activation = 'tanh'))
seq.add(Conv2D(filters=1, kernel_size=(3, 3),
activation='elu',
padding='same', data_format='channels_last'))
#seq.add(BatchNormalization(trainable=False))
seq.compile(loss='mean_squared_error', optimizer='adam',metrics=['mae'])
return seq
import pandas as pd
import statsmodels.formula.api as sm
def get_localdist(trainX,spatialboundary,ST,boundmargin,span,channel):
trainx_dist = []
for day in range(span):
if day <= boundmargin:
_trainx_dist = trainX[0:ST,spatialboundary[0]:spatialboundary[1],spatialboundary[2]:spatialboundary[3],::]
elif day >= span - boundmargin-1:
_trainx_dist = trainX[span-ST:span,spatialboundary[0]:spatialboundary[1],spatialboundary[2]:spatialboundary[3],::]
else:
_trainx_dist = trainX[day-boundmargin:day+boundmargin+1,spatialboundary[0]:spatialboundary[1],spatialboundary[2]:spatialboundary[3],::]
_trainx_dist = _trainx_dist.reshape(ST**3,channel)
_trainx_dist = np.std(_trainx_dist, axis = 0)
trainx_dist.append(_trainx_dist)
trainx_dist = np.array(trainx_dist)
return (trainx_dist)
def get_localranddist(trainx_dist,span,channel,spatial):
randomlist = np.array(random.sample(range(-5, 5), span))[::,np.newaxis]
for j in range(1,channel):
if j in spatial:
a = random.randint(-5,5)
_randomlist = np.array([a for i in range(10)])[::,np.newaxis]
else:
_randomlist = np.array(random.sample(range(-5, 5), span))[::,np.newaxis]
randomlist = np.concatenate((randomlist,_randomlist),axis = 1)
randomlist[randomlist == 0 ] =1
return (trainx_dist/randomlist)
import statsmodels.api as sm
def run_ST_lime_pixel(model,trainX,trainx_dist,samp,span,channel,spatial,ST,r,c,channellist,incubation):
trainx = []
trainy = []
#print(r,c)
incubation_span = span - incubation
for i in range(samp):
rand_trainx_dist = get_localranddist(trainx_dist,span,channel,spatial)
_trainx = pickle.loads(pickle.dumps(trainX , -1))
#if (r,c) == (5,6):
# print(_trainx[::,r,c,4])
temp = _trainx[::,r,c,::]+rand_trainx_dist
rand_trainx_dist[np.where((temp <0) | (temp >1) )] = rand_trainx_dist[np.where((temp <0) | (temp >1) )] * -1
_trainx[(incubation_span - ST):incubation_span,r,c,channellist] = _trainx[(incubation_span - ST):incubation_span,r,c,channellist]+rand_trainx_dist[(incubation_span - ST):incubation_span,channellist]
#print(_trainx[::,r,c,4])
for C in spatial:
_trainx[::,::,::,C] = _trainx[incubation_span-1,::,::,C]
_trainy = model.predict(_trainx[np.newaxis,::,::,::,::])
_trainy = _trainy[0,::,::,0]
trainx.append(_trainx)
trainy.append(_trainy)
trainx = np.array(trainx)[::,::,r,c,::]
#print(trainx[::,::,4].shape)
trainy = np.array(trainy)[::,r,c]
traindata = pd.DataFrame()
for C in channellist:
if C in spatial:
traindata['C'+str(C)] = trainx[::,span-1,C].flatten()
else:
for T in range(incubation+1,incubation+ST+1):
traindata['C'+str(C)+'_T'+str(T)] = trainx[::,span-T,C].flatten()
traindata['Y'] = trainy.flatten()
traindata = traindata[traindata.sum(axis=1)>0]
X=list(traindata.columns)
X.remove('Y')
#X.remove('index')
_traindata = pickle.loads(pickle.dumps(traindata,-1))
for x in X:
_traindata[x] = (_traindata[x] - _traindata[x].mean())/_traindata[x].std()
_traindata['Y'] = (_traindata['Y'] - _traindata['Y'].mean())/_traindata['Y'].std()
try:
res = sm.OLS(_traindata['Y'],_traindata[X]).fit()
except:
print(channellist)
print(traindata.iloc[0]) #trainx[::,span-4,4].flatten()) #trainx[::,span-1,2].flatten())
raise
return(res,traindata)
import itertools
def run_regression(model,grid,train,train_gridday,frames_grid,exclude_channel = [0],spatial = [1],start=0,ST=3,margin = 4,samp= 500, incubation = 3,offset=10):
trainsamp = []
maxday = max(frames_grid['day'])
span = train.shape[1]
channel = train.shape[-1]
channellist = list(set(range(channel)) - set(exclude_channel))
pix = np.int(np.sqrt(max(frames_grid['pixno'])))
_gridpix = np.flip(np.array(range(1,max(frames_grid['pixno'])+1)).reshape(pix,pix),0)
gridpix = _gridpix[margin:pix-margin,margin:pix-margin].flatten()
allowedgridpix = frames_grid[(frames_grid['no_pat']>10) & (frames_grid['grid'] == grid)].groupby(['grid','pixno'])['day'].count().reset_index()
allowedgridpix = allowedgridpix[allowedgridpix.day > 30 ][['grid','pixno']]
gridpix = np.intersect1d(gridpix,np.array(allowedgridpix['pixno']))
train_xplain = pd.DataFrame()
gridtraindata_xplain= pd.DataFrame()
for k,(_grid,T) in train_gridday.items():
if _grid == grid:
trainsamp.append(k)
for T_span in itertools.islice(trainsamp[0:span], None, None, ST):# trainsamp[start:start+ST]:
trainX = train[T_span,::,::,::,::]
g,day = train_gridday[T_span]
for pixno in gridpix:
(r,c) = np.array((np.where(_gridpix==pixno))).reshape(2)
_boundmargin = np.int((ST-1)/2)
spatialboundary = (r-_boundmargin,r+_boundmargin+1,c - _boundmargin, c+_boundmargin+1)
trainx_dist = get_localdist(trainX,spatialboundary,ST,_boundmargin,span,channel)
print("pixno",pixno,"Tspan",T_span)
res,traindata_explain = run_ST_lime_pixel(model,trainX,trainx_dist,samp,span,channel,spatial,ST,r,c, channellist,incubation)
traindata_explain['grid'] = grid; traindata_explain['pixno'] = pixno; traindata_explain['day'] = maxday - day;
gridtraindata_xplain = gridtraindata_xplain.append(traindata_explain, ignore_index = True)
#print(res.summary())
fnames = list(res.params.index.values); coef = list(res.params); pvalue = list(res.pvalues)
fnames.append('beta');coef.append(np.mean(trainX[span-ST:,r,c,0])); pvalue.append(0)
for C in channellist:
fnames.append('act_C_'+str(C))
coef.append(np.mean(trainX[span-ST:,r,c,C]))
pvalue.append(0)
temp_df = pd.DataFrame({'fnames':fnames,'coef':coef,'pvalue':pvalue})
temp_df['grid'] = grid; temp_df['pixno'] = pixno; temp_df['day'] = maxday - day;
train_xplain = train_xplain.append(temp_df, ignore_index = True)
return(train_xplain,gridtraindata_xplain)
# """Compute softmax values for each sets of scores in x."""
def softmax(x):
if np.max(x) > 1:
e_x = np.exp(x/np.max(x))
else:
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum()
########## Convert image pixel values to number of infection cases ########
def convert_image_to_data(image,margin,sus_pop):
frame = image
frame[frame<0.001] = 0
pix = frame.shape[0]
frame = frame[margin:pix-margin,margin:pix-margin]
_sus_pop = np.log(sus_pop +2)
frame = np.multiply(frame,_sus_pop)
popexists_size = len(sus_pop[sus_pop>0])
frame = np.exp(frame) -1
frame = np.round(frame,0)
return (frame,popexists_size)
def forecast(model,input_sequence,frames_grid,test_gridday,span,qt,in_grid=-1,epsilon_T = -1,margin=4,spatial_channel=[],calculate_channel={},pixno=-1):
pix = np.int(np.sqrt(max(frames_grid['pixno'])))
_gridpix = np.flip(np.array(range(1,max(frames_grid['pixno'])+1)).reshape(pix,pix),0)
gridpix = _gridpix[margin:pix-margin,margin:pix-margin]
forecastframe = pd.DataFrame()
channels = input_sequence.shape[-1]
_span = 10
#forecast_frames_grid = pickle.loads(pickle.dumps(frames_grid[frames_grid['day'] <= max(frames_grid['day'])-_span],-1))
forecast_frames_grid_array = []; colnames = frames_grid.columns
print(max(frames_grid['day'])-_span)
for k,(grid,_filler) in test_gridday.items():
if in_grid >-1 and in_grid != grid:
continue
grid_forecast_frames_grid = pickle.loads(pickle.dumps(frames_grid[(frames_grid.grid == grid) & (frames_grid['day'] <= max(frames_grid['day'])-_span)],-1))
track = input_sequence[k]
totpop = track[0,::,::,1]
pix = totpop.shape[0]
print(grid)
I_0 = np.log(np.array(grid_forecast_frames_grid[(grid_forecast_frames_grid.day == max(grid_forecast_frames_grid.day))].sort_values(['pixno'])['I'])+1)
I_0 = np.flip(I_0.reshape(pix,pix),0)
_forecast_frames_grid = pickle.loads(pickle.dumps(grid_forecast_frames_grid[grid_forecast_frames_grid['day']==max(grid_forecast_frames_grid['day'])],-1))
popexists = pickle.loads(pickle.dumps(totpop[::,::],-1))
popexists[popexists>0] = 1
######## for each prediction day
for i in range(span):
new_pos = model.predict(track[np.newaxis, ::, ::, ::, ::])
new = new_pos[::, ::, ::, ::]
new = np.multiply(new[0,::,::,0],popexists)[np.newaxis,::,::,np.newaxis]
I_0 = np.multiply(I_0,popexists)
new[new<0] = 0
new[new>1] = 1
if epsilon_T > 1 and i > 0:
#pass
sum_beta_gamma = grid_forecast_frames_grid[(grid_forecast_frames_grid.day >41 )][['pixno','beta','gamma']].groupby(['pixno']).sum()
sum_beta = np.flip(np.array(sum_beta_gamma.beta).reshape(pix,pix),0)
sum_gamma =np.flip(np.array(sum_beta_gamma.gamma).reshape(pix,pix),0);
Iperc = pickle.loads(pickle.dumps(track[-1,::,::,4],-1)); Iperc[Iperc==0]=1
gamma1 = I_0*(i+1)/epsilon_T+ new[0,::,::,0]*qt/Iperc + sum_beta -sum_gamma; gamma1[gamma1>0.2] = 0.2
else:
gamma = forecast_gamma(grid_forecast_frames_grid,grid,5)
if pixno > -1 and i > 0:
gamma = forecast_gamma(grid_forecast_frames_grid,grid,5)
pixcor = np.where(_gridpix == pixno)
gamma[pixcor] = gamma[pixcor]
elif i > 0 and epsilon_T>1:
gamma = gamma
_forecast_frames_grid = calculate_future_SIR(_forecast_frames_grid,grid,forecastbeta = new[0,::,::,0],forecastgamma = gamma,qt = qt)
if len(forecast_frames_grid_array) != 0:
forecast_frames_grid_array = np.concatenate((forecast_frames_grid_array,_forecast_frames_grid.values),axis = 0)
else:
forecast_frames_grid_array = _forecast_frames_grid.values
#print(span, max( forecast_frames_grid[(forecast_frames_grid.grid == grid)]['day']))
########### append channels
newtrack = new
for channel in range(1,channels):
if channel in spatial_channel:
channel_data = track[0,::,::,channel]
newtrack = np.concatenate((newtrack,channel_data[np.newaxis,::,::,np.newaxis]),axis = 3)
elif channel in calculate_channel:
channel2 = np.flip(np.array(_forecast_frames_grid[calculate_channel[channel]]).reshape(pix, pix), 0)
newtrack = np.concatenate((newtrack,channel2[np.newaxis,::,::,np.newaxis]),axis = 3)
track = np.concatenate((track, newtrack), axis=0)
predictframe = np.squeeze(new,0)[::,::,0][margin:pix-margin,margin:pix-margin]
#_forecastframe = pd.DataFrame({'pixno':gridpix[totpop[margin:pix-margin,margin:pix-margin]>0].flatten(),
#'predict':predictframe[totpop[margin:pix-margin,margin:pix-margin]>0].flatten()})
#_forecastframe['day'] = i
#_forecastframe['grid'] = grid
#forecastframe = forecastframe.append(_forecastframe)
forecast_frames_grid = pd.DataFrame(forecast_frames_grid_array)
forecast_frames_grid.columns = colnames
return (forecast_frames_grid)
from statsmodels.tsa.arima_model import ARIMA
def forecast_gamma_model(frames_grid,span):
gamma_model = {}
T = max(frames_grid['day']) - span
pix = max(frames_grid['pixno'])
for grid in frames_grid['grid'].unique():
for pixno in range(1,pix+1):
t_series = np.array(frames_grid[(frames_grid['grid'] == grid) & (frames_grid['pixno'] == pixno) & (frames_grid['no_pat'] >0)]['gamma'])
if len(t_series) > 10 :
gamma_model[(grid,pixno)] = ARIMA(t_series, (2,1,2))
gamma_model[(grid,pixno)].fit()
return gamma_model
def forecast_gamma(forecast_frames_grid,grid,span):
_forecast_frames_grid = forecast_frames_grid[forecast_frames_grid['grid'] == grid]
_forecast_frames_grid = _forecast_frames_grid[_forecast_frames_grid['day'] >= max(_forecast_frames_grid['day']) - span]
gamma = np.array(_forecast_frames_grid.groupby(['pixno'])['gamma'].mean())
pix = np.int(np.sqrt(max(_forecast_frames_grid['pixno'])))
gamma = np.flip(gamma.reshape(pix,pix),0)
return gamma
def validate(ensemble,test,testout,test_gridday,frames_grid,margin, qt, spatial_channel = [], forecast_channel=[], calculate_channel = {}):
errorsum = 0
averagetotalerror = 0
cnt = 1
channels = test.shape[-1]
predicttotal = pd.DataFrame()
pix = np.int(np.sqrt(max(frames_grid['pixno'])))
gridpix = np.flip(np.array(range(1,max(frames_grid['pixno'])+1)).reshape(pix,pix),0)
gridpix = gridpix[margin:pix-margin,margin:pix-margin]
errorframe = pd.DataFrame()
minpop = min(frames_grid['norm_pop'])
span = test_gridday[0][1]
forecast_frames_grid = frames_grid[frames_grid['day'] <= max(frames_grid['day'])-span]
forecast_frames_grid_array = []; colnames = frames_grid.columns
for k,(grid,span) in test_gridday.items():
######## for each test grid
grid_forecast_frames_grid = pickle.loads(pickle.dumps(forecast_frames_grid[forecast_frames_grid.grid == grid],-1))
_forecast_frames_grid = pickle.loads(pickle.dumps(grid_forecast_frames_grid[grid_forecast_frames_grid['day']==max(grid_forecast_frames_grid['day'])],-1))
track = test[k]
totpop = track[0,::,::,1]
pix = totpop.shape[0]
print(grid)
popexists = pickle.loads(pickle.dumps(totpop[::,::],-1))
popexists[popexists>0] = 1
popexists_size = len(popexists[popexists>0].flatten())
out = testout[k]
######## for each prediction day
for i in range(span):
new_pos = ensemble.predict(track[np.newaxis, ::, ::, ::, ::])
#new_pos = ensemble_predict(ensemble,track[np.newaxis, ::, ::, ::, ::])
new = new_pos[::, ::, ::, ::]
new = np.multiply(new[0,::,::,0],popexists)[np.newaxis,::,::,np.newaxis]
new[new<0] = 0
new[new>1] = 1
gamma = forecast_gamma(grid_forecast_frames_grid,grid,span)
_forecast_frames_grid = calculate_future_SIR(_forecast_frames_grid,grid,forecastbeta = new[0,::,::,0],forecastgamma = gamma,qt = qt)
if len(forecast_frames_grid_array) != 0:
forecast_frames_grid_array = np.concatenate((forecast_frames_grid_array,_forecast_frames_grid.values),axis = 0)
else:
forecast_frames_grid_array = _forecast_frames_grid.values
#print("forecast done")
########### append channels
newtrack = new
for channel in range(1,channels):
if channel in spatial_channel:
channel_data = track[i,::,::,channel]
newtrack = np.concatenate((newtrack,channel_data[np.newaxis,::,::,np.newaxis]),axis = 3)
elif channel in forecast_channel:
channel_data = out[i,::,::,channel]
newtrack = np.concatenate((newtrack,channel_data[np.newaxis,::,::,np.newaxis]),axis = 3)
elif channel in calculate_channel:
channel2 = np.flip(np.array(_forecast_frames_grid[calculate_channel[channel]]).reshape(pix, pix), 0)
newtrack = np.concatenate((newtrack,channel2[np.newaxis,::,::,np.newaxis]),axis = 3)
#print(channels,spatialorforecast_channel,newtrack.shape,track.shape)
track = np.concatenate((track, newtrack), axis=0)
predictframe = np.squeeze(new,0)[::,::,0][margin:pix-margin,margin:pix-margin]
actualframe = out[i,::,::,0][margin:pix-margin,margin:pix-margin]
notzeroframe = pickle.loads(pickle.dumps(actualframe, -1))
notzeroframe[notzeroframe == 0] =1
_errorframe = pd.DataFrame({'pixno':gridpix[totpop[margin:pix-margin,margin:pix-margin]>0].flatten(),
'predict':predictframe[totpop[margin:pix-margin,margin:pix-margin]>0].flatten(),
'actual':actualframe[totpop[margin:pix-margin,margin:pix-margin]>0].flatten()})
_errorframe['day'] = i
_errorframe['grid'] = grid
errorframe = errorframe.append(_errorframe)
error = np.sum(np.absolute((predictframe - actualframe)/notzeroframe))/(popexists_size+1)
averagetotalerror += np.sum(np.absolute((predictframe - actualframe)))/(popexists_size+1)
errorsum +=error
cnt +=1
averageerror = errorsum/cnt
averagetotalerror /= cnt
forecast_frames_grid = pd.DataFrame(forecast_frames_grid_array)
forecast_frames_grid.columns = colnames
return (averageerror,averagetotalerror,forecast_frames_grid)
def calculate_future_SIR(forecast_frames_grid,grid,forecastbeta,forecastgamma,qt):
_forecast_frames_grid = forecast_frames_grid[forecast_frames_grid['grid'] == grid]
_forecast_frames_grid = _forecast_frames_grid[_forecast_frames_grid['day'] == max(_forecast_frames_grid['day'])].sort_values(['pixno'])
beta = np.flip(forecastbeta,0).flatten()
gamma = np.flip(forecastgamma,0).flatten()
_forecast_frames_grid.loc[:,'pixel'] = beta
_forecast_frames_grid.loc[:,'beta'] = beta*qt/_forecast_frames_grid.Iperc
_forecast_frames_grid.loc[:,'gamma'] = gamma
_forecast_frames_grid.loc[:,'new_pat'] = np.round(qt*beta *_forecast_frames_grid['SI']/(_forecast_frames_grid['I']+1)) #(_forecast_frames_grid['I']+1))
_forecast_frames_grid.loc[:,'no_pat'] = _forecast_frames_grid['new_pat'] + _forecast_frames_grid['no_pat']
_forecast_frames_grid.loc[:,'S'] = _forecast_frames_grid['S'] - _forecast_frames_grid['new_pat']
_forecast_frames_grid.loc[:,'new_removed_pat'] = np.round(gamma * (_forecast_frames_grid['I']+1))
_forecast_frames_grid.loc[_forecast_frames_grid['new_removed_pat']>_forecast_frames_grid['I'],['new_removed_pat']] = 0
_forecast_frames_grid.loc[:,'I'] = _forecast_frames_grid['I'] + _forecast_frames_grid['new_pat'] - _forecast_frames_grid['new_removed_pat']
_forecast_frames_grid.loc[:,'SI'] = _forecast_frames_grid['I'] * _forecast_frames_grid['S']
temp_I = np.array(_forecast_frames_grid['I'])
temp_I[temp_I<1] = 1
_forecast_frames_grid.loc[:,'Iperc'] = _forecast_frames_grid['I']/(_forecast_frames_grid['pop']+1) #1/temp_I
_forecast_frames_grid.loc[:,'Sperc'] = _forecast_frames_grid['S']/(_forecast_frames_grid['pop']+1)
_forecast_frames_grid.loc[:,'day'] = _forecast_frames_grid['day'] +1
return _forecast_frames_grid
############ Test ensemble model foor Italy ####################
############ Test ensemble model foor Italy ####################
############ Test ensemble model foor Italy ####################
############ Test ensemble model foor Italy ####################
def test_model(model,test,testoutput,test_gridday,frames_grid,qt,spatial_channel,forecast_channel,calculate_channel = {},span=5,margin=4):
test_gridday_span = {}
pix = np.int(np.sqrt(max(frames_grid['pixno'])))
gridpix = np.flip(np.array(range(1,max(frames_grid['pixno'])+1)).reshape(pix,pix),0)
gridpix = gridpix[margin:pix-margin,margin:pix-margin].flatten()
for i,v in test_gridday.items():
if True: #v[0] == grid:
test_gridday_span[i] = (v[0],span)
#print(test_gridday_span)
(averageerror,averagetotalerror,forecast_frames_grid) = validate(model,test,testoutput,test_gridday_span,frames_grid,margin=4,qt=qt, spatial_channel = spatial_channel, forecast_channel = forecast_channel, calculate_channel = calculate_channel)
predict = forecast_frames_grid[(forecast_frames_grid['day']>max(forecast_frames_grid['day'])-span) ][['grid','day','pixno','new_pat','no_pat','S','I','beta','new_removed_pat']]
predict = predict[predict.pixno.isin(gridpix)]
actual = frames_grid[(frames_grid['day']>max(frames_grid['day'])-span)][['grid','day','pixno','new_pat','no_pat','S','I','beta','new_removed_pat','pop']]
actual = actual[actual.pixno.isin(gridpix)]
errorframe = pd.merge(predict,actual,on=['grid','pixno','day'])
errorframe = errorframe[errorframe['pop'] > 0 ]
#errorframe['beta_xx'] = errorframe['beta_x']*errorframe['pop_x']/errorframe['I_x'];errorframe['beta_yy'] = errorframe['beta_y']*errorframe['pop_y']/errorframe['I_y'];
KL_div = entropy( softmax(errorframe['beta_x']), softmax(errorframe['beta_y']) )
total_errorframe = errorframe.groupby(['day']).sum().reset_index()
grid_total_errorframe = errorframe.groupby(['grid','day']).sum().reset_index()
MAPE_countrytotal = np.mean(np.absolute((total_errorframe['no_pat_x'] - total_errorframe['no_pat_y'])/(total_errorframe['no_pat_y'])))
MAPE_grid = np.mean(np.absolute((grid_total_errorframe['no_pat_x'] - grid_total_errorframe['no_pat_y'])/(grid_total_errorframe['no_pat_y'])))
return(KL_div,MAPE_grid,MAPE_countrytotal,averageerror,errorframe)
def train_country_model(src_dir,model_dir,country,epochs = 20,hiddenlayers=4,batch_size = 50, channel = 2 , pixel = 16, filters = 32):
with open(src_dir+country+'prepdata.pkl', 'rb') as filehandler:
(train,output,test,testoutput,test_gridday,train_gridday) = pickle.load(filehandler)
frames_grid = pd.read_csv(src_dir+country+"framesgrid.csv")
frames_grid = pickle.loads(pickle.dumps(reduce_mem_usage(frames_grid),-1))
qt = percentile(frames_grid[frames_grid['no_pat']>0]['beta'],95)
with open(src_dir+country+'testprepdata.pkl', 'wb') as filehandler:
pickle.dump((test,testoutput,test_gridday,qt), filehandler)
print(country+" test data have been saved in "+src_dir+country+'testprepdata.pkl')
if country == 'USA':
pass
else:
out = output[::,-1,::,::,0]
out = out[::,::,::,np.newaxis]
model = create_model(pixel=pixel,filters=filters,channel=channel,hiddenlayers=hiddenlayers)
hist = model.fit(train, out, batch_size=batch_size,epochs=epochs, validation_split=0.05)
model.save(model_dir+country+"model.h5py")
print(country+" model have been generated and saved")
return
def test_country_model(src_dir,model_dir,country,span,margin=4):
with open(src_dir+country+'testprepdata.pkl', 'rb') as filehandler:
(test,testoutput,test_gridday,qt) = pickle.load(filehandler)
frames_grid = pd.read_csv(src_dir+country+'framesgrid.csv',usecols=['grid','day','pixno','Date','pop','no_pat','new_pat','new_removed_pat','S','I','Iperc','Sperc','invI','SI','beta','gamma','pixel','norm_pop'])
frames_grid = pickle.loads(pickle.dumps(reduce_mem_usage(frames_grid),-1))
gc.collect()
model = keras.models.load_model(model_dir+country+"model.h5py")
if span > test_gridday[0][1]:
print("span should be less than ",test_gridday[0][1]+1)
raise
KL_div,MAPE_grid,MAPE_countrytotal,averageerror,errorframe = test_model(model,test,testoutput,test_gridday,frames_grid,qt,spatial_channel = [1],forecast_channel = [], calculate_channel = {},span=span)
errorframe.to_csv(src_dir+country+"errorframe.csv",index = False)
return (KL_div,MAPE_grid,MAPE_countrytotal,averageerror)
def forecast_country_cases(src_dir,country,span=100,margin=4):
with open(src_dir+country+'testprepdata.pkl', 'rb') as filehandler:
(test,testoutput,test_gridday,qt) = pickle.load(filehandler)
frames_grid = pd.read_csv(src_dir+country+'framesgrid.csv',usecols=['grid','day','pixno','Date','pop','no_pat','new_pat','new_removed_pat','S','I','Iperc','Sperc','invI','SI','beta','gamma','pixel','norm_pop'])
frames_grid = pickle.loads(pickle.dumps(reduce_mem_usage(frames_grid),-1))
model = keras.models.load_model(src_dir+country+"model.h5py")
forecast_frames_grid=forecast(model,test,frames_grid,test_gridday,span=span,qt=qt,spatial_channel=[1])
forecast_frames_grid=forecast_frames_grid[['grid','day','pixno','no_pat','pop','new_pat','new_removed_pat']]
forecast_frames_grid = pickle.loads(pickle.dumps(reduce_mem_usage(forecast_frames_grid),-1))
df_pixel_county= pd.read_csv(src_dir+country+"pixel_counties.csv")
df_pixel_county = df_pixel_county[['grid','pixno','District','State','ratio']]
forecast_frames_county = pd.merge(forecast_frames_grid,df_pixel_county,left_on=['grid','pixno'],right_on=['grid','pixno'])
forecast_frames_county['no_pat'] = forecast_frames_county['no_pat']*forecast_frames_county['ratio']
forecast_frames_county['pop'] = forecast_frames_county['pop']*forecast_frames_county['ratio']
forecast_frames_county['new_pat'] = forecast_frames_county['new_pat']*forecast_frames_county['ratio']
forecast_frames_county['new_removed_pat'] = forecast_frames_county['new_removed_pat']*forecast_frames_county['ratio']
#forecast_frame.loc[:,['total_pat']] = forecast_frame['total_pat'] +forecast_frame['no_pat']
forecast_frames_county.to_csv(src_dir+country+"forecastcases.csv", index=False)
return forecast_frames_county
def reduce_mem_usage(df):
""" iterate through all the columns of a dataframe and modify the data type
to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
|
{"hexsha": "59079e0f74e8b8f489e827252f0d4fcf6fac4711", "size": 26849, "ext": "py", "lang": "Python", "max_stars_repo_path": "model.py", "max_stars_repo_name": "swarna-kpaul/indiacovidforecast", "max_stars_repo_head_hexsha": "bfd2e000ef1ae338f313ea8e9d3ad5e972a3cf94", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-06-24T04:35:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-24T04:35:00.000Z", "max_issues_repo_path": "model.py", "max_issues_repo_name": "swarna-kpaul/indiacovidforecast", "max_issues_repo_head_hexsha": "bfd2e000ef1ae338f313ea8e9d3ad5e972a3cf94", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model.py", "max_forks_repo_name": "swarna-kpaul/indiacovidforecast", "max_forks_repo_head_hexsha": "bfd2e000ef1ae338f313ea8e9d3ad5e972a3cf94", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.484063745, "max_line_length": 245, "alphanum_fraction": 0.7028194719, "include": true, "reason": "import numpy,from numpy,from scipy,import statsmodels,from statsmodels", "num_tokens": 7679}
|
type ClusterInfo
meta::Dict
locked::Bool
cloudname::AbstractString
skipticks::Bool
version::AbstractString
cloudsize::Int
healthy::Bool
badnodes::Int
excludefields::AbstractString
nodes::Vector
clouduptimemillis::Int
nodeidx::Int
consensus::Bool
isclient::Bool
end
|
{"hexsha": "75fb5c0d224290ec4aa8bfd0ba6e3ae2a5dedafc", "size": 278, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/types/clusterinfo.jl", "max_stars_repo_name": "drewgendreau/H2O.jl", "max_stars_repo_head_hexsha": "559c7f924965a9634dd53b692be5391dc2be2161", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-12-14T16:18:59.000Z", "max_stars_repo_stars_event_max_datetime": "2016-12-14T16:18:59.000Z", "max_issues_repo_path": "src/types/clusterinfo.jl", "max_issues_repo_name": "drewgendreau/H2O.jl", "max_issues_repo_head_hexsha": "559c7f924965a9634dd53b692be5391dc2be2161", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/types/clusterinfo.jl", "max_forks_repo_name": "drewgendreau/H2O.jl", "max_forks_repo_head_hexsha": "559c7f924965a9634dd53b692be5391dc2be2161", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-08T13:43:16.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-08T13:43:16.000Z", "avg_line_length": 17.375, "max_line_length": 30, "alphanum_fraction": 0.7913669065, "num_tokens": 89}
|
// smooth: Lie Theory for Robotics
// https://github.com/pettni/smooth
//
// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
//
// Copyright (c) 2021 Petter Nilsson
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#ifndef SMOOTH__INTERNAL__SE3_HPP_
#define SMOOTH__INTERNAL__SE3_HPP_
#include <Eigen/Core>
#include "common.hpp"
#include "smooth/derivatives.hpp"
#include "so3.hpp"
namespace smooth {
/**
* @brief SE(3) Lie Group represented as S^3 ⋉ R3
*
* Memory layout
* -------------
* Group: x y z qx qy qz qw
* Tangent: vx vy vz Ωx Ωy Ωz
*
* Lie group Matrix form
* ---------------------
* [ R T ]
* [ 0 1 ]
*
* where R ∈ SO(3) and T = [x y z] ∈ R3
*
* Lie algebra Matrix form
* -----------------------
* [ 0 -Ωz Ωy vx]
* [ Ωz 0 -Ωx vy]
* [ -Ωy Ωx 0 vz]
* [ 0 0 0 0]
*
* Constraints
* -----------
* Group: qx * qx + qy * qy + qz * qz + qw * qw = 1
* Tangent: -pi < Ωx Ωy Ωz <= pi
*/
template<typename _Scalar>
class SE3Impl
{
public:
using Scalar = _Scalar;
static constexpr Eigen::Index RepSize = 7;
static constexpr Eigen::Index Dim = 4;
static constexpr Eigen::Index Dof = 6;
static constexpr bool IsCommutative = false;
SMOOTH_DEFINE_REFS;
static void setIdentity(GRefOut g_out)
{
g_out.template head<6>().setZero();
g_out(6) = Scalar(1);
}
static void setRandom(GRefOut g_out)
{
g_out.template head<3>().setRandom();
SO3Impl<Scalar>::setRandom(g_out.template tail<4>());
}
static void matrix(GRefIn g_in, MRefOut m_out)
{
m_out.setIdentity();
SO3Impl<Scalar>::matrix(g_in.template tail<4>(), m_out.template topLeftCorner<3, 3>());
m_out.template topRightCorner<3, 1>() = g_in.template head<3>();
}
static void composition(GRefIn g_in1, GRefIn g_in2, GRefOut g_out)
{
SO3Impl<Scalar>::composition(
g_in1.template tail<4>(), g_in2.template tail<4>(), g_out.template tail<4>());
Eigen::Matrix<Scalar, 3, 3> R1;
SO3Impl<Scalar>::matrix(g_in1.template tail<4>(), R1);
g_out.template head<3>().noalias() = R1 * g_in2.template head<3>() + g_in1.template head<3>();
}
static void inverse(GRefIn g_in, GRefOut g_out)
{
Eigen::Matrix<Scalar, 4, 1> so3inv;
SO3Impl<Scalar>::inverse(g_in.template tail<4>(), so3inv);
Eigen::Matrix<Scalar, 3, 3> Rinv;
SO3Impl<Scalar>::matrix(so3inv, Rinv);
g_out.template head<3>().noalias() = -Rinv * g_in.template head<3>();
g_out.template tail<4>() = so3inv;
}
static void log(GRefIn g_in, TRefOut a_out)
{
using SO3TangentMap = Eigen::Matrix<Scalar, 3, 3>;
SO3Impl<Scalar>::log(g_in.template tail<4>(), a_out.template tail<3>());
SO3TangentMap M_dr_expinv, M_ad;
SO3Impl<Scalar>::dr_expinv(a_out.template tail<3>(), M_dr_expinv);
SO3Impl<Scalar>::ad(a_out.template tail<3>(), M_ad);
a_out.template head<3>().noalias() = (-M_ad + M_dr_expinv) * g_in.template head<3>();
}
static void Ad(GRefIn g_in, TMapRefOut A_out)
{
SO3Impl<Scalar>::matrix(g_in.template tail<4>(), A_out.template topLeftCorner<3, 3>());
SO3Impl<Scalar>::hat(g_in.template head<3>(), A_out.template topRightCorner<3, 3>());
A_out.template topRightCorner<3, 3>() *= A_out.template topLeftCorner<3, 3>();
A_out.template bottomRightCorner<3, 3>() = A_out.template topLeftCorner<3, 3>();
A_out.template bottomLeftCorner<3, 3>().setZero();
}
static void exp(TRefIn a_in, GRefOut g_out)
{
using SO3TangentMap = Eigen::Matrix<Scalar, 3, 3>;
SO3Impl<Scalar>::exp(a_in.template tail<3>(), g_out.template tail<4>());
SO3TangentMap M_dr_exp, M_Ad;
SO3Impl<Scalar>::dr_exp(a_in.template tail<3>(), M_dr_exp);
SO3Impl<Scalar>::Ad(g_out.template tail<4>(), M_Ad);
g_out.template head<3>().noalias() = M_Ad * M_dr_exp * a_in.template head<3>();
}
static void hat(TRefIn a_in, MRefOut A_out)
{
A_out.setZero();
SO3Impl<Scalar>::hat(a_in.template tail<3>(), A_out.template topLeftCorner<3, 3>());
A_out.template topRightCorner<3, 1>() = a_in.template head<3>();
}
static void vee(MRefIn A_in, TRefOut a_out)
{
SO3Impl<Scalar>::vee(A_in.template topLeftCorner<3, 3>(), a_out.template tail<3>());
a_out.template head<3>() = A_in.template topRightCorner<3, 1>();
}
static void ad(TRefIn a_in, TMapRefOut A_out)
{
SO3Impl<Scalar>::hat(a_in.template tail<3>(), A_out.template topLeftCorner<3, 3>());
SO3Impl<Scalar>::hat(a_in.template head<3>(), A_out.template topRightCorner<3, 3>());
A_out.template bottomRightCorner<3, 3>() = A_out.template topLeftCorner<3, 3>();
A_out.template bottomLeftCorner<3, 3>().setZero();
}
static Eigen::Matrix<Scalar, 3, 3> calculate_q(TRefIn a)
{
using std::abs, std::sqrt, std::cos, std::sin;
const Scalar th2 = a.template tail<3>().squaredNorm();
const auto [A, B, C] = [&]() -> std::array<Scalar, 3> {
if (th2 < Scalar(eps2)) {
return {
// https://www.wolframalpha.com/input/?i=series+%28x+-+sin+x%29+%2F+x%5E3+at+x%3D0
Scalar(1) / Scalar(6) - th2 / Scalar(120),
// https://www.wolframalpha.com/input/?i=series+%28cos+x+-+1+%2B+x%5E2%2F2%29+%2F+x%5E4+at+x%3D0
Scalar(1) / Scalar(24) - th2 / Scalar(720),
// https://www.wolframalpha.com/input/?i=series+%28x+-+sin+x+-+x%5E3%2F6%29+%2F+x%5E5+at+x%3D0
-Scalar(1) / Scalar(120) + th2 / Scalar(5040),
};
} else {
const Scalar th = sqrt(th2), th_4 = th2 * th2, cTh = cos(th), sTh = sin(th);
return {
(th - sTh) / (th * th2),
(cTh - Scalar(1) + th2 / Scalar(2)) / th_4,
(th - sTh - th * th2 / Scalar(6)) / (th_4 * th),
};
}
}();
Eigen::Matrix<Scalar, 3, 3> V, W;
SO3Impl<Scalar>::hat(a.template head<3>(), V);
SO3Impl<Scalar>::hat(a.template tail<3>(), W);
const Scalar vdw = a.template tail<3>().dot(a.template head<3>());
const Eigen::Matrix<Scalar, 3, 3> WV = W * V, VW = V * W, WW = W * W;
// clang-format off
return Scalar(0.5) * V + A * (WV + VW - vdw * W) + B * (W * WV + VW * W + vdw * (3 * W - WW)) - C * 3 * vdw * WW;
// clang-format on
}
static std::pair<Eigen::Matrix3<Scalar>, Eigen::Matrix<Scalar, 3, 18>> calculate_Q_dQ(TRefIn a)
{
const Eigen::Vector3<Scalar> v = a.template head<3>();
const Eigen::Vector3<Scalar> w = a.template tail<3>();
const Scalar th2 = w.squaredNorm();
const auto [A, B, C, dA_over_th, dB_over_th, dC_over_th] = [&]() -> std::array<Scalar, 6> {
if (th2 < Scalar(eps2)) {
return {
Scalar(1) / Scalar(6) - th2 / Scalar(120),
Scalar(1) / Scalar(24) - th2 / Scalar(720),
-Scalar(1) / Scalar(120) + th2 / Scalar(5040),
-Scalar(1) / 60,
-Scalar(1) / 360,
Scalar(1) / 2520,
};
} else {
const Scalar th = sqrt(th2);
const Scalar th3 = th2 * th;
const Scalar th4 = th2 * th2;
const Scalar th5 = th3 * th2;
const Scalar th6 = th3 * th3;
const Scalar th7 = th4 * th3;
const Scalar sTh = sin(th);
const Scalar cTh = cos(th);
return {
(th - sTh) / (th3),
(cTh - Scalar(1) + th2 / Scalar(2)) / th4,
(th - sTh - th * th2 / Scalar(6)) / th5,
-cTh / th4 - 2 / th4 + 3 * sTh / th5,
-1 / th4 - sTh / th5 - 4 * cTh / th6 + 4 / th6,
1 / (3 * th4) - cTh / th6 - 4 / th6 + 5 * sTh / th7,
};
}
}();
Eigen::Matrix<Scalar, 3, 3> V, W;
SO3Impl<Scalar>::hat(a.template head<3>(), V);
SO3Impl<Scalar>::hat(a.template tail<3>(), W);
const Scalar vdw = v.dot(w);
const Eigen::Matrix3<Scalar> WV = W * V, VW = V * W, WW = W * W, PA = WV + VW - vdw * W,
PB = W * WV + VW * W + vdw * (3 * W - WW), PC = -3 * vdw * WW;
Eigen::Matrix3<Scalar> Q = V / 2 + A * PA + B * PB + C * PC;
// part with derivatives from matrices
// clang-format off
Eigen:: Matrix<Scalar, 3, 18> dQ {{ w.x()*(B + 3*C)*(w.y()*w.y() + w.z()*w.z()),
w.y()*(-2*A + B*(w.y()*w.y() + w.z()*w.z()) + 3*C*(w.y()*w.y() + w.z()*w.z())),
w.z()*(-2*A + B*(w.y()*w.y() + w.z()*w.z()) + 3*C*(w.y()*w.y() + w.z()*w.z())),
v.x()*(B + 3*C)*(w.y()*w.y() + w.z()*w.z()),
-2*A*v.y() + B*v.y()*(w.y()*w.y() + w.z()*w.z()) + 2*B*w.y()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()) + 3*C*(v.y()*w.y()*w.y() + v.y()*w.z()*w.z() + 2*w.y()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())),
-2*A*v.z() + B*v.z()*(w.y()*w.y() + w.z()*w.z()) + 2*B*w.z()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()) + 3*C*(v.z()*w.y()*w.y() + v.z()*w.z()*w.z() + 2*w.z()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())),
-A*(w.x()*w.z() - w.y()) - B*w.x()*(w.x()*w.y() - 2*w.z()) - 3*C*w.x()*w.x()*w.y(),
A*(w.x() - w.y()*w.z()) - B*w.y()*(w.x()*w.y() - 2*w.z()) - 3*C*w.x()*w.y()*w.y(),
-A*w.z()*w.z() - B*w.x()*w.x() - B*w.x()*w.y()*w.z() - B*w.y()*w.y() + B*w.z()*w.z() - 3*C*w.x()*w.y()*w.z() + Scalar(0.5),
-A*(v.x()*w.z() - v.y()) - B*(v.x()*w.z() + v.x()*(w.x()*w.y() - 3*w.z()) + 2*v.z()*w.x() + w.y()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())) - 3*C*v.x()*w.x()*w.y() - 3*C*w.y()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()),
A*(v.x() - v.y()*w.z()) - B*(v.y()*w.z() + v.y()*(w.x()*w.y() - 3*w.z()) + 2*v.z()*w.y() + w.x()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())) - 3*C*v.y()*w.x()*w.y() - 3*C*w.x()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()),
-A*(v.x()*w.x() + v.y()*w.y() + 2*v.z()*w.z()) + B*(2*v.x()*w.x() + 2*v.y()*w.y() - v.z()*w.z() - v.z()*(w.x()*w.y() - 3*w.z())) - 3*C*v.z()*w.x()*w.y(),
A*(w.x()*w.y() + w.z()) - B*w.x()*(w.x()*w.z() + 2*w.y()) - 3*C*w.x()*w.x()*w.z(),
A*w.y()*w.y() + B*w.x()*w.x() - B*w.x()*w.y()*w.z() - B*w.y()*w.y() + B*w.z()*w.z() - 3*C*w.x()*w.y()*w.z() - Scalar(0.5),
A*(w.x() + w.y()*w.z()) - B*w.z()*(w.x()*w.z() + 2*w.y()) - 3*C*w.x()*w.z()*w.z(),
A*(v.x()*w.y() + v.z()) + B*(v.x()*w.y() - v.x()*(w.x()*w.z() + 3*w.y()) + 2*v.y()*w.x() - w.z()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())) - 3*C*v.x()*w.x()*w.z() - 3*C*w.z()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()),
A*(v.x()*w.x() + 2*v.y()*w.y() + v.z()*w.z()) - B*(2*v.x()*w.x() - v.y()*w.y() + v.y()*(w.x()*w.z() + 3*w.y()) + 2*v.z()*w.z()) - 3*C*v.y()*w.x()*w.z(),
A*(v.x() + v.z()*w.y()) + B*(2*v.y()*w.z() + v.z()*w.y() - v.z()*(w.x()*w.z() + 3*w.y()) - w.x()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())) - 3*C*v.z()*w.x()*w.z() - 3*C*w.x()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())
}, {
A*(w.x()*w.z() + w.y()) - B*w.x()*(w.x()*w.y() + 2*w.z()) - 3*C*w.x()*w.x()*w.y(),
A*(w.x() + w.y()*w.z()) - B*w.y()*(w.x()*w.y() + 2*w.z()) - 3*C*w.x()*w.y()*w.y(),
A*w.z()*w.z() + B*w.x()*w.x() - B*w.x()*w.y()*w.z() + B*w.y()*w.y() - B*w.z()*w.z() - 3*C*w.x()*w.y()*w.z() - Scalar(0.5),
A*(v.x()*w.z() + v.y()) + B*(v.x()*w.z() - v.x()*(w.x()*w.y() + 3*w.z()) + 2*v.z()*w.x() - w.y()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())) - 3*C*v.x()*w.x()*w.y() - 3*C*w.y()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()),
A*(v.x() + v.y()*w.z()) + B*(v.y()*w.z() - v.y()*(w.x()*w.y() + 3*w.z()) + 2*v.z()*w.y() - w.x()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())) - 3*C*v.y()*w.x()*w.y() - 3*C*w.x()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()),
A*(v.x()*w.x() + v.y()*w.y() + 2*v.z()*w.z()) - B*(2*v.x()*w.x() + 2*v.y()*w.y() - v.z()*w.z() + v.z()*(w.x()*w.y() + 3*w.z())) - 3*C*v.z()*w.x()*w.y(),
w.x()*(-2*A + B*(w.x()*w.x() + w.z()*w.z()) + 3*C*(w.x()*w.x() + w.z()*w.z())),
w.y()*(B + 3*C)*(w.x()*w.x() + w.z()*w.z()),
w.z()*(-2*A + B*(w.x()*w.x() + w.z()*w.z()) + 3*C*(w.x()*w.x() + w.z()*w.z())),
-2*A*v.x() + B*v.x()*(w.x()*w.x() + w.z()*w.z()) + 2*B*w.x()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()) + 3*C*(v.x()*w.x()*w.x() + v.x()*w.z()*w.z() + 2*w.x()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())),
v.y()*(B + 3*C)*(w.x()*w.x() + w.z()*w.z()),
-2*A*v.z() + B*v.z()*(w.x()*w.x() + w.z()*w.z()) + 2*B*w.z()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()) + 3*C*(v.z()*w.x()*w.x() + v.z()*w.z()*w.z() + 2*w.z()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())),
-A*w.x()*w.x() + B*w.x()*w.x() - B*w.x()*w.y()*w.z() - B*w.y()*w.y() - B*w.z()*w.z() - 3*C*w.x()*w.y()*w.z() + Scalar(0.5),
-A*(w.x()*w.y() - w.z()) + B*w.y()*(2*w.x() - w.y()*w.z()) - 3*C*w.y()*w.y()*w.z(),
-A*(w.x()*w.z() - w.y()) + B*w.z()*(2*w.x() - w.y()*w.z()) - 3*C*w.y()*w.z()*w.z(),
-A*(2*v.x()*w.x() + v.y()*w.y() + v.z()*w.z()) + B*(-v.x()*w.x() + v.x()*(3*w.x() - w.y()*w.z()) + 2*v.y()*w.y() + 2*v.z()*w.z()) - 3*C*v.x()*w.y()*w.z(),
-A*(v.y()*w.x() - v.z()) - B*(2*v.x()*w.y() + v.y()*w.x() - v.y()*(3*w.x() - w.y()*w.z()) + w.z()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())) - 3*C*v.y()*w.y()*w.z() - 3*C*w.z()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()),
A*(v.y() - v.z()*w.x()) - B*(2*v.x()*w.z() + v.z()*w.x() - v.z()*(3*w.x() - w.y()*w.z()) + w.y()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())) - 3*C*v.z()*w.y()*w.z() - 3*C*w.y()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())
}, {
-A*(w.x()*w.y() - w.z()) - B*w.x()*(w.x()*w.z() - 2*w.y()) - 3*C*w.x()*w.x()*w.z(),
-A*w.y()*w.y() - B*w.x()*w.x() - B*w.x()*w.y()*w.z() + B*w.y()*w.y() - B*w.z()*w.z() - 3*C*w.x()*w.y()*w.z() + Scalar(0.5),
A*(w.x() - w.y()*w.z()) - B*w.z()*(w.x()*w.z() - 2*w.y()) - 3*C*w.x()*w.z()*w.z(),
-A*(v.x()*w.y() - v.z()) - B*(v.x()*w.y() + v.x()*(w.x()*w.z() - 3*w.y()) + 2*v.y()*w.x() + w.z()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())) - 3*C*v.x()*w.x()*w.z() - 3*C*w.z()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()),
-A*(v.x()*w.x() + 2*v.y()*w.y() + v.z()*w.z()) + B*(2*v.x()*w.x() - v.y()*w.y() - v.y()*(w.x()*w.z() - 3*w.y()) + 2*v.z()*w.z()) - 3*C*v.y()*w.x()*w.z(),
A*(v.x() - v.z()*w.y()) - B*(2*v.y()*w.z() + v.z()*w.y() + v.z()*(w.x()*w.z() - 3*w.y()) + w.x()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())) - 3*C*v.z()*w.x()*w.z() - 3*C*w.x()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()),
A*w.x()*w.x() - B*w.x()*w.x() - B*w.x()*w.y()*w.z() + B*w.y()*w.y() + B*w.z()*w.z() - 3*C*w.x()*w.y()*w.z() - Scalar(0.5),
A*(w.x()*w.y() + w.z()) - B*w.y()*(2*w.x() + w.y()*w.z()) - 3*C*w.y()*w.y()*w.z(),
A*(w.x()*w.z() + w.y()) - B*w.z()*(2*w.x() + w.y()*w.z()) - 3*C*w.y()*w.z()*w.z(),
A*(2*v.x()*w.x() + v.y()*w.y() + v.z()*w.z()) - B*(-v.x()*w.x() + v.x()*(3*w.x() + w.y()*w.z()) + 2*v.y()*w.y() + 2*v.z()*w.z()) - 3*C*v.x()*w.y()*w.z(),
A*(v.y()*w.x() + v.z()) + B*(2*v.x()*w.y() + v.y()*w.x() - v.y()*(3*w.x() + w.y()*w.z()) - w.z()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())) - 3*C*v.y()*w.y()*w.z() - 3*C*w.z()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()),
A*(v.y() + v.z()*w.x()) + B*(2*v.x()*w.z() + v.z()*w.x() - v.z()*(3*w.x() + w.y()*w.z()) - w.y()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())) - 3*C*v.z()*w.y()*w.z() - 3*C*w.y()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()),
w.x()*(-2*A + B*(w.x()*w.x() + w.y()*w.y()) + 3*C*(w.x()*w.x() + w.y()*w.y())),
w.y()*(-2*A + B*(w.x()*w.x() + w.y()*w.y()) + 3*C*(w.x()*w.x() + w.y()*w.y())),
w.z()*(B + 3*C)*(w.x()*w.x() + w.y()*w.y()),
-2*A*v.x() + B*v.x()*(w.x()*w.x() + w.y()*w.y()) + 2*B*w.x()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()) + 3*C*(v.x()*w.x()*w.x() + v.x()*w.y()*w.y() + 2*w.x()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())),
-2*A*v.y() + B*v.y()*(w.x()*w.x() + w.y()*w.y()) + 2*B*w.y()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z()) + 3*C*(v.y()*w.x()*w.x() + v.y()*w.y()*w.y() + 2*w.y()*(v.x()*w.x() + v.y()*w.y() + v.z()*w.z())),
v.z()*(B + 3*C)*(w.x()*w.x() + w.y()*w.y()) }};
// clang-format on
// parts with dA, dB, dC
for (auto i = 0u; i < 3; ++i) {
const Scalar dA_dwi = dA_over_th * w(i);
const Scalar dB_dwi = dB_over_th * w(i);
const Scalar dC_dwi = dC_over_th * w(i);
for (auto j = 0u; j < 3; ++j) {
dQ.col(3 + i + 6 * j) += dA_dwi * PA.row(j).transpose() + dB_dwi * PB.row(j).transpose()
+ dC_dwi * PC.row(j).transpose();
}
}
return {Q, dQ};
}
static void dr_exp(TRefIn a_in, TMapRefOut A_out)
{
SO3Impl<Scalar>::dr_exp(a_in.template tail<3>(), A_out.template topLeftCorner<3, 3>());
A_out.template topRightCorner<3, 3>() = calculate_q(-a_in);
A_out.template bottomRightCorner<3, 3>() = A_out.template topLeftCorner<3, 3>();
A_out.template bottomLeftCorner<3, 3>().setZero();
}
static void dr_expinv(TRefIn a_in, TMapRefOut A_out)
{
SO3Impl<Scalar>::dr_expinv(a_in.template tail<3>(), A_out.template topLeftCorner<3, 3>());
A_out.template topRightCorner<3, 3>().noalias() = -A_out.template topLeftCorner<3, 3>()
* calculate_q(-a_in)
* A_out.template topLeftCorner<3, 3>();
A_out.template bottomRightCorner<3, 3>() = A_out.template topLeftCorner<3, 3>();
A_out.template bottomLeftCorner<3, 3>().setZero();
}
static void d2r_exp(TRefIn a_in, THessRefOut H_out)
{
H_out.setZero();
// DERIVATIVES OF SO3 JACOBIAN
Eigen::Matrix<Scalar, 3, 9> Hso3;
SO3Impl<Scalar>::d2r_exp(a_in.template tail<3>(), Hso3);
for (auto i = 0u; i < 3; ++i) {
H_out.template block<3, 3>(0, 6 * i + 3) = Hso3.template block<3, 3>(0, 3 * i);
H_out.template block<3, 3>(3, 18 + 6 * i + 3) = Hso3.template block<3, 3>(0, 3 * i);
}
// DERIVATIVE OF Q TERM
const auto [Q, dQ] = calculate_Q_dQ(-a_in);
H_out.template block<3, 18>(3, 0) = -dQ;
}
static void d2r_expinv(TRefIn a_in, THessRefOut H_out)
{
H_out.setZero();
// DERIVATIVES OF SO3 JACOBIAN
Eigen::Matrix<Scalar, 3, 9> Hso3;
SO3Impl<Scalar>::d2r_expinv(a_in.template tail<3>(), Hso3);
for (auto i = 0u; i < 3; ++i) {
H_out.template block<3, 3>(0, 6 * i + 3) = Hso3.template block<3, 3>(0, 3 * i);
H_out.template block<3, 3>(3, 18 + 6 * i + 3) = Hso3.template block<3, 3>(0, 3 * i);
}
// DERIVATIVE OF -J Q J TERM
auto [Q, dQ] = calculate_Q_dQ(-a_in);
dQ *= -1; // account for -a_in
Eigen::Matrix3<Scalar> Jso3;
SO3Impl<Scalar>::dr_expinv(a_in.template tail<3>(), Jso3);
// Hso3 contains derivatives w.r.t. w, we extend for derivatives w.r.t. [v, w]
Eigen::Matrix<Scalar, 3, 18> Hso3_exp = Eigen::Matrix<Scalar, 3, 18>::Zero();
for (auto i = 0u; i < 3; ++i) {
Hso3_exp.template middleCols<3>(6 * i + 3) = Hso3.template middleCols<3>(3 * i);
}
const Eigen::Matrix3<Scalar> Jtmp = Jso3 * Q;
const Eigen::Matrix<Scalar, 3, 18> Htmp = d_matrix_product(Jso3, Hso3_exp, Q, dQ);
H_out.template block<3, 18>(3, 0) = -d_matrix_product(Jtmp, Htmp, Jso3, Hso3_exp);
}
};
} // namespace smooth
#endif // SMOOTH__INTERNAL__SE3_HPP_
|
{"hexsha": "ddae25f2407b2652618305eee36c66b11ef979e7", "size": 19928, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/smooth/internal/se3.hpp", "max_stars_repo_name": "tgurriet/smooth", "max_stars_repo_head_hexsha": "c19e35e23c8e0084314726729d0cf6729192240f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/smooth/internal/se3.hpp", "max_issues_repo_name": "tgurriet/smooth", "max_issues_repo_head_hexsha": "c19e35e23c8e0084314726729d0cf6729192240f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/smooth/internal/se3.hpp", "max_forks_repo_name": "tgurriet/smooth", "max_forks_repo_head_hexsha": "c19e35e23c8e0084314726729d0cf6729192240f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.82, "max_line_length": 225, "alphanum_fraction": 0.4777699719, "num_tokens": 7808}
|
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
from armor_py.options import args_parser
from armor_py.utils import alter_re, alter, del_blank_line, dict_avg, test_remove
def asr_per_process():
client_num_in_total = args.client_num_in_total
path = dataset_path + "client_num_{}/".format(client_num_in_total)
file_path = path + prefix_pgd + model_name + ".out"
pure_acc_file_path = path + prefix_pgd + model_name + "_pure_acc.out"
shutil.copyfile(file_path, pure_acc_file_path)
acc_file_path = path + prefix_pgd + model_name + "_acc.out"
shutil.copyfile(file_path, acc_file_path)
asr_file_path = path + prefix_pgd + model_name + "_asr.out"
shutil.copyfile(file_path, asr_file_path)
### Pure Acc ###
alter_re(pure_acc_file_path, "eps=.*", "")
alter(pure_acc_file_path, "################################ Attack begin ################################", "")
alter(pure_acc_file_path, "##############################################################################", "")
alter_re(pure_acc_file_path, "Adversary Examples Generated on Client .*", "")
alter_re(pure_acc_file_path, "Model Acc of Client .*: ", "")
alter_re(pure_acc_file_path, "Test on Client .*", "")
alter_re(pure_acc_file_path, "\(%\).*", "")
del_blank_line(pure_acc_file_path)
### Acc ###
alter_re(acc_file_path, "eps=.*", "")
alter(acc_file_path, "################################ Attack begin ################################", "")
alter(acc_file_path, "##############################################################################", "")
alter(acc_file_path, "Adversary Examples Generated on Client ", "")
alter_re(acc_file_path, "Test on Client .* Generated", "")
alter_re(acc_file_path, "Test on Client .* Acc: ", "")
alter_re(acc_file_path, "Model Acc of Client .*", "")
alter_re(acc_file_path, "\(%\).*", "")
del_blank_line(acc_file_path)
### ASR ###
alter_re(asr_file_path, "eps=.*", "")
alter(asr_file_path, "################################ Attack begin ################################", "")
alter(asr_file_path, "##############################################################################", "")
alter(asr_file_path, "Adversary Examples Generated on Client ", "")
alter_re(asr_file_path, "Test on Client .* Generated", "")
alter_re(asr_file_path, "Test on Client .* ASR: ", "")
alter_re(asr_file_path, "Model Acc of Client .*", "")
alter_re(asr_file_path, "\(%\).*", "")
del_blank_line(asr_file_path)
result_path = dataset_path + "ASR/"
if not os.path.exists(result_path):
os.makedirs(result_path)
result_file = result_path + args.dataset + "_client_num_{}".format(client_num_in_total) + model_name + ".out"
file_data = "Client\tPure Acc\n"
pure_acc_file = open(pure_acc_file_path)
pure_acc = []
for i in pure_acc_file:
pure_acc.append(float(i))
for corrupted_idx in range(client_num_in_total):
file_data += "{}\t{:.2f}%\n".format(corrupted_idx, pure_acc[corrupted_idx])
pure_acc_avg = np.average(pure_acc)
file_data += "Average Pure Acc {:.2f}%\n".format(pure_acc_avg)
file_data += "\n\nClient\tAcc of AE\n"
acc_file = open(acc_file_path)
acc, acc_avg = {}, {}
i_idx = 0
for i in acc_file:
if i_idx % (client_num_in_total) == 0:
corrupted_idx = int(i)
acc[corrupted_idx] = []
else:
acc[corrupted_idx].append(float(i))
i_idx = i_idx + 1
for corrupted_idx in range(client_num_in_total):
acc_avg[corrupted_idx] = np.average(acc[corrupted_idx])
file_data += "{}\t{:.2f}%\n".format(corrupted_idx, acc_avg[corrupted_idx])
acc_avg_avg = dict_avg(acc_avg)
file_data += "Average Acc of AE {:.2f}%\n".format(acc_avg_avg)
file_data += "\n\nClient\tASR of AE\n"
asr_file = open(asr_file_path)
asr, asr_avg = {}, {}
i_idx = 0
for i in asr_file:
if i_idx % (client_num_in_total) == 0:
corrupted_idx = int(i)
asr[corrupted_idx] = []
else:
asr[corrupted_idx].append(float(i))
i_idx = i_idx + 1
for corrupted_idx in range(client_num_in_total):
asr_avg[corrupted_idx] = np.average(asr[corrupted_idx])
file_data += "{}\t{:.2f}%\n".format(corrupted_idx, asr_avg[corrupted_idx])
asr_avg_avg = dict_avg(asr_avg)
file_data += "Average ASR of AE {:.2f}%\n".format(asr_avg_avg)
with open(result_file, "w", encoding="utf-8") as f:
f.write(file_data)
test_remove(pure_acc_file_path)
test_remove(acc_file_path)
test_remove(asr_file_path)
return pure_acc_avg, acc_avg_avg, asr_avg_avg
def atr_per_process():
client_num_in_total = args.client_num_in_total
path = dataset_path + "client_num_{}/".format(client_num_in_total)
file_path = path + prefix_attack_list + model_name + ".out"
processed_file_path = path + prefix_attack_list + model_name + "_processed.out"
shutil.copyfile(file_path, processed_file_path)
alter_re(processed_file_path, "eps=.*", "")
alter(processed_file_path, "################################ Attack begin ################################", "")
alter(processed_file_path, "##############################################################################", "")
alter(processed_file_path, "Adversary Examples Generated on Client ", "")
del_blank_line(processed_file_path)
result_path = dataset_path + "ATR/out/"
if not os.path.exists(result_path):
os.makedirs(result_path)
result_file = result_path + args.dataset + "_client_num_{}".format(client_num_in_total) + model_name + ".out"
file_data = "Client\tATR\n"
file_attack_list = open(processed_file_path)
# 0 predict incorrectly
# 1 predict correctly & attack failed
# 2 predict correctly & attack succeed
num_items = {}
raw_arr = {}
i_idx = 0
for i in file_attack_list:
if i_idx % (client_num_in_total + 1) == 0:
corrupted_idx = int(i)
elif i_idx % (client_num_in_total + 1) == 1:
num_items[corrupted_idx] = len(i.split())
raw_arr[corrupted_idx] = np.zeros((client_num_in_total, num_items[corrupted_idx]))
raw_arr[corrupted_idx][i_idx % (client_num_in_total + 1) - 1] = i.split()
else:
raw_arr[corrupted_idx][i_idx % (client_num_in_total + 1) - 1] = i.split()
i_idx = i_idx + 1
num_incorrect, num_attack_fail, num_attack_succeed, num_predict_correct = {}, {}, {}, {}
TR, ATR, AATR = {}, {}, {}
for corrupted_idx in range(client_num_in_total):
num_image_used = num_items[corrupted_idx]
num_incorrect[corrupted_idx], num_attack_fail[corrupted_idx], num_attack_succeed[corrupted_idx], \
num_predict_correct[corrupted_idx], TR[corrupted_idx] = [], [], [], [], []
for image_idx_used in range(num_image_used):
num_incorrect[corrupted_idx].append(
np.equal(raw_arr[corrupted_idx][:, image_idx_used], np.zeros(client_num_in_total)).sum())
num_attack_fail[corrupted_idx].append(
np.equal(raw_arr[corrupted_idx][:, image_idx_used], np.ones(client_num_in_total)).sum())
num_attack_succeed[corrupted_idx].append(
np.equal(raw_arr[corrupted_idx][:, image_idx_used], 2 * np.ones(client_num_in_total)).sum())
num_predict_correct[corrupted_idx].append(
num_attack_fail[corrupted_idx][image_idx_used] + num_attack_succeed[corrupted_idx][image_idx_used])
TR[corrupted_idx].append(
num_attack_succeed[corrupted_idx][image_idx_used] / num_predict_correct[corrupted_idx][image_idx_used])
ATR[corrupted_idx] = np.average(TR[corrupted_idx])
file_data += "{}\t{:.2f}%\n".format(corrupted_idx, ATR[corrupted_idx] * 100)
AATR = dict_avg(ATR)
file_data += "\nAATR\t{:.2f}%\n".format(AATR * 100)
TR_array = []
for TR_idx in range(len(TR)):
TR_array.append(np.array(TR[TR_idx]))
TR_flatten = np.hstack(TR_array)
fontsize_ticks = 22
fontsize_label = 26
fontsize_legend = 18
linewidth = 1.5
plt.figure()
bins = 10
plt.xlabel("ATR on benign", fontsize=fontsize_label)
plt.ylabel("Cumulative probability", fontsize=fontsize_label)
plt.xticks(fontsize=fontsize_ticks)
plt.yticks(fontsize=fontsize_ticks)
plt.ylim(0, 1.1)
plt.grid(True, linestyle='-.')
plt.tight_layout()
plt.hist(TR_flatten, bins, range=(0, 1), density=True, histtype='step', cumulative=True, linewidth=linewidth,
label="ATR on benign")
plt.legend(loc='lower right', fontsize=fontsize_legend)
fig_path = dataset_path + "ATR/cdf/"
if not os.path.exists(fig_path):
os.makedirs(fig_path)
plt.savefig(fig_path + "cdf_client_num_{}".format(client_num_in_total) + model_name + ".pdf")
plt.close()
weights = np.zeros_like(TR_flatten) + 1. / TR_flatten.size
plt.figure()
bins = 10
plt.xlabel("ATR on benign", fontsize=fontsize_label)
plt.ylabel("Frequency of samples", fontsize=fontsize_label)
plt.ylim(0, 0.6)
plt.xticks(fontsize=fontsize_ticks)
plt.yticks(fontsize=fontsize_ticks)
plt.grid(True, linestyle='-.')
plt.tight_layout()
plt.hist(TR_flatten, bins, range=(0, 1), density=False, weights=weights, alpha=0.6, label="ATR on benign")
plt.legend(loc='upper right', fontsize=fontsize_legend)
fig_path = dataset_path + "ATR/pdf/"
if not os.path.exists(fig_path):
os.makedirs(fig_path)
plt.savefig(fig_path + "pdf_client_num_{}".format(client_num_in_total) + model_name + ".pdf")
plt.close()
with open(result_file, "w", encoding="utf-8") as f:
f.write(file_data)
test_remove(processed_file_path)
return AATR
if __name__ == '__main__':
args = args_parser()
prefix_pgd = "pgd"
prefix_attack_list = "attack_list"
model_name = "_sub_{:.2f}_eta_{}_epoch_1000_p_{:.2f}".format(args.percent_sub, args.eta, args.p)
path = "./at_model/"
dataset_path = path + args.dataset + "/"
# rcd_path = path + "rcd_" + args.dataset + model_name + "_num_{}".format(args.client_num_in_total) + ".out"
# rcd_data = "num\tall_acc\tclean_acc\tasr\taatr\n"
pure_acc_avg, acc_avg_avg, asr_avg_avg = asr_per_process()
aatr = atr_per_process()
print("num={} all_acc={:.2f}% clean_acc={:.2f}% asr={:.2f}% aatr={:.2f}%".format(args.client_num_in_total, pure_acc_avg,
acc_avg_avg, asr_avg_avg, aatr * 100))
# rcd_data += "{}\t{:.2f}%\t{:.2f}%\t{:.2f}%\t{:.2f}%\n".format(args.client_num_in_total, pure_acc_avg,
# acc_avg_avg, asr_avg_avg, aatr * 100)
# print("dataset = " + args.dataset + ", num of client = {}, model{} completed!".format(args.client_num_in_total,
# model_name))
# with open(rcd_path, "w", encoding="utf-8") as f:
# f.write(rcd_data)
|
{"hexsha": "410f7a60a9fc8ed7f0f48f723a8541304b9e746a", "size": 11167, "ext": "py", "lang": "Python", "max_stars_repo_path": "process_exp.py", "max_stars_repo_name": "ARMOR-FL/ARMOR", "max_stars_repo_head_hexsha": "c2ec73dbc436c9f478a789a49fbb40e9c465b0d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-01-28T11:00:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-19T08:23:43.000Z", "max_issues_repo_path": "process_exp.py", "max_issues_repo_name": "ARMOR-FL/ARMOR", "max_issues_repo_head_hexsha": "c2ec73dbc436c9f478a789a49fbb40e9c465b0d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "process_exp.py", "max_forks_repo_name": "ARMOR-FL/ARMOR", "max_forks_repo_head_hexsha": "c2ec73dbc436c9f478a789a49fbb40e9c465b0d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-22T16:58:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-22T16:58:48.000Z", "avg_line_length": 45.5795918367, "max_line_length": 128, "alphanum_fraction": 0.6156532641, "include": true, "reason": "import numpy", "num_tokens": 2862}
|
\appendix
\chapter{Analytical Modelling of Buckled Beam Mechanism}\label{chap:appendixA}
The bistable mechanism consists of an initially flat beam which, when compressed by a distance $\Delta l$, buckles and forms a structure that exists in two stable positions. In the case of the actuator, which consists of a pair of SMA coils and the buckled beam itself, is considered to require an input torque $M_\mathrm{in}$ at the input pivot to switch between its two stable states.
As the entire kinematic stage is comprised of flexure-based mechanisms, as shown in \cref{fig:bistable-mechanism}, the pivots that support the buckled beam present with an inherent angular stiffness, $K_\mathrm{in}$ and $K_\mathrm{out}$ at the input and output pivot, respectively. The buckled beam is considered to have a flexural rigidity of $EI$ and an initial length before compression of $L$. The distance between the centre of the flexural pivots and the beam is considered to be offset by a distance of $p$, as shown in \cref{fig:buckled-beam-schematic}.
Based on the hypothesis described in the work by \cite{tivot2021} and the Euler-Bernoulli beam theory, the beam deflection can be described using the following equation:
\begin{equation}\label{eq:deflection_A}
y(x) = \left(A\sin{kx}+B(\cos{kx}-1)+C\frac{x}{l}\right) l {\theta }_\textrm{in}
\end{equation}
with $k=\sqrt{P/(EI)}$ and the boundary conditions of the supported beam are as follows
\[y(0)=0\]
\[y'(0)\cong{\theta }_\textrm{out}\]
\[M_0\cong K_\textrm{out}\theta_\textrm{out}+Vp-Pp\theta_\textrm{out}\]
\[y(l)\cong -p(\theta_\textrm{out}+\theta_\textrm{in})\]
\[y'(l)\cong \theta_\textrm{in}\]
Furthermore, the deflection parameters of \cref{eq:deflection_A} are given by
\begin{equation}\label{A_norm}
A = \frac{(1+2\overline{p})kl+{\varepsilon }_0\left(\overline{p} \sin{kl}-\frac{\cos{kl}-1}{kl}\right)}
{kl\left( kl \cos{kl}-\sin{kl}-\left({\overline{p}}^2+\overline{p}\right){(kl)}^2\sin{kl}+{\varepsilon }_0\left(\sin{kl}+2\frac{\cos{kl}-1}{kl}\right)\right)}
\end{equation}
\begin{equation} \label{B_norm}
B = \frac{ \overline{p}(1+2\overline{p}){(kl)}^2+{\varepsilon }_0 \left(\overline{p} \left(\cos{kl}-1\right) + \frac{\sin{kl}}{kl} -1\right)}
{kl\left( kl \cos{kl}-\sin{kl}-\left({\overline{p}}^2+\overline{p}\right){(kl)}^2\sin{kl}+{\varepsilon }_0\left(\sin{kl}+2\frac{\cos{kl}-1}{kl}\right)\right)}
\end{equation}
\begin{equation} \label{C_norm}
C = \frac{ {\overline{p}}^2{(kl)}^2\sin{kl} -2\overline{p} kl\cos{kl} -\sin{kl} - {\varepsilon }_0\left(\overline{p}\sin{kl}-\frac{\cos{kl}-1}{kl}\right)}
{ kl \cos{kl}-\sin{kl}-\left({\overline{p}}^2+\overline{p}\right){(kl)}^2\sin{kl}+{\varepsilon }_0\left(\sin{kl}+2\frac{\cos{kl}-1}{kl}\right)}
\end{equation}
where $\overline{p} = p/l $ and $\varepsilon_0=K_\textrm{out}/(EI/l)$. When considering the beam’s arc length as constant, the end-shorting, $\Delta l$, can be approximated using the following expression
\begin{equation}\label{eq:delta_l}
\Delta l\cong \frac{p}{2}({\theta }^2_\textrm{in}+\theta ^2_\textrm{out})+\int^l_0{\frac{y'(x)^2}{2}dx}=H l{\theta }^2_\textrm{in}
\end{equation}
Here, the coefficient $H$ is expressed as
\begin{multline}\label{eq:H-smabb}
H = \frac{\left({A}^2+{B}^2\right){\left(kl\right)}^2}{4} + \frac{\left({A}^2-{B}^2\right)kl\sin{2kl} }{8} + \frac{AB kl\left(\cos{2kl}-1\right)}{4}\\
+AC\sin{kl} + BC\left(\cos{kl}-1\right) + \frac{{C}^2}{2} +\frac{\overline{p}}{2}\left({\left( Akl+C\right)}^2+1\right)
\end{multline}
By rearranging \cref{eq:delta_l}, the input angle can be expressed as
\begin{equation}\label{eq:theta_in_A}
{\theta}_\textrm{in}=\pm \sqrt{\frac{\Delta l}{l}}\sqrt{\frac{1}{H}}
\end{equation}
Finally, the input moment can be described as the following \cref{eq:M_in_A}
\begin{equation}
\begin{split}
M_\textrm{in} &\cong M_l+K_\textrm{in}{\theta }_\textrm{in}+Vp-Pp{\theta}_\textrm{in}\\
&=\frac{EI}{l}\left({\left(kl\right)}^2\left(\overline{p}\left(C-1\right)-A\sin{kl}-B\cos{kl}\right)+{\varepsilon }_l\right){\theta}_\textrm{in}
\label{eq:M_in_A}
\end{split}
\end{equation}
\noindent{where ${\varepsilon }_l=K_\textrm{in}/(EI/l)$.}
These equations, as developed by Loic Tissot-Daguette, are used to obtain the moment and angular stroke requirements of the bistable element when sizing the SMA elements for the bistable gripper.
|
{"hexsha": "b6268f2c3965c335a8bec9666baec416c66cfb71", "size": 4323, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "tail/appendix.tex", "max_stars_repo_name": "seanthomas0409/sethomas_EPFL_thesis", "max_stars_repo_head_hexsha": "5cc1b082be09da01e7545b7da93d1b113edc77b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tail/appendix.tex", "max_issues_repo_name": "seanthomas0409/sethomas_EPFL_thesis", "max_issues_repo_head_hexsha": "5cc1b082be09da01e7545b7da93d1b113edc77b6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tail/appendix.tex", "max_forks_repo_name": "seanthomas0409/sethomas_EPFL_thesis", "max_forks_repo_head_hexsha": "5cc1b082be09da01e7545b7da93d1b113edc77b6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 78.6, "max_line_length": 561, "alphanum_fraction": 0.7032153597, "num_tokens": 1606}
|
[STATEMENT]
lemma card_surjective_functions_range_permutation:
assumes "finite A" "finite B"
shows "card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = Stirling (card A) (card B)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = Stirling (card A) (card B)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = Stirling (card A) (card B)
[PROOF STEP]
have "bij_betw (partitions_of A B) ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) {P. partition_on A P \<and> card P = card B}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bij_betw (partitions_of A B) ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) {P. partition_on A P \<and> card P = card B}
[PROOF STEP]
using \<open>finite A\<close> \<open>finite B\<close>
[PROOF STATE]
proof (prove)
using this:
finite A
finite B
goal (1 subgoal):
1. bij_betw (partitions_of A B) ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) {P. partition_on A P \<and> card P = card B}
[PROOF STEP]
by (rule bij_betw_partitions_of)
[PROOF STATE]
proof (state)
this:
bij_betw (partitions_of A B) ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) {P. partition_on A P \<and> card P = card B}
goal (1 subgoal):
1. card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = Stirling (card A) (card B)
[PROOF STEP]
from this
[PROOF STATE]
proof (chain)
picking this:
bij_betw (partitions_of A B) ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) {P. partition_on A P \<and> card P = card B}
[PROOF STEP]
have "card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = card {P. partition_on A P \<and> card P = card B}"
[PROOF STATE]
proof (prove)
using this:
bij_betw (partitions_of A B) ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) {P. partition_on A P \<and> card P = card B}
goal (1 subgoal):
1. card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = card {P. partition_on A P \<and> card P = card B}
[PROOF STEP]
by (rule bij_betw_same_card)
[PROOF STATE]
proof (state)
this:
card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = card {P. partition_on A P \<and> card P = card B}
goal (1 subgoal):
1. card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = Stirling (card A) (card B)
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = card {P. partition_on A P \<and> card P = card B}
goal (1 subgoal):
1. card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = Stirling (card A) (card B)
[PROOF STEP]
have "card {P. partition_on A P \<and> card P = card B} = Stirling (card A) (card B)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. card {P. partition_on A P \<and> card P = card B} = Stirling (card A) (card B)
[PROOF STEP]
using \<open>finite A\<close>
[PROOF STATE]
proof (prove)
using this:
finite A
goal (1 subgoal):
1. card {P. partition_on A P \<and> card P = card B} = Stirling (card A) (card B)
[PROOF STEP]
by (rule card_partition_on)
[PROOF STATE]
proof (state)
this:
card {P. partition_on A P \<and> card P = card B} = Stirling (card A) (card B)
goal (1 subgoal):
1. card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = Stirling (card A) (card B)
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = Stirling (card A) (card B)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = Stirling (card A) (card B)
goal (1 subgoal):
1. card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = Stirling (card A) (card B)
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
card ({f \<in> A \<rightarrow>\<^sub>E B. f ` A = B} // range_permutation A B) = Stirling (card A) (card B)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1770, "file": "Twelvefold_Way_Twelvefold_Way_Entry9", "length": 15}
|
from numpy.core.arrayprint import DatetimeFormat
from numpy.lib.function_base import piecewise
import taichi as ti
import numpy as np
from .sph_solver import SPHSolver
class SoilSPHSolver(SPHSolver):
def __init__(self, particle_system, TDmethod, gamma, coh, fric):
super().__init__(particle_system, TDmethod)
print("Hallo, class SOILSPH Solver 2D starts to serve!")
# Basic paras
self.density_0 = gamma # reference density of soil, kg/m3
self.cohesion = coh # the material cohesion, Pa
self.friction_deg = fric # the angle of internal friction, DEG
self.poisson = 0.3 # Poisson’s ratio
self.E = 8e7 # Young’s modulus, Pa
# Paras based on basic paras
self.mass = self.ps.m_V * self.density_0 # the self.mass of each particle, kg
self.friction = self.friction_deg / 180 * np.pi # the angle of internal friction, RAD
self.Depq = self.E / (1 + self.poisson) / (1 - 2 * self.poisson) * ti.Matrix(
[[1 - self.poisson, self.poisson, 0, self.poisson],
[self.poisson, 1 - self.poisson, 0, self.poisson],
[0, 0, (1 - 2 * self.poisson) / 2, 0],
[self.poisson, self.poisson, 0, 1 - self.poisson]])
self.alpha_fric = ti.tan(self.friction) / ti.sqrt(9 + 12 * (ti.tan(self.friction))**2)
self.kc = 3 * self.cohesion / ti.sqrt(9 + 12 * (ti.tan(self.friction))**2)
self.Gshear = self.E / 2 / (1 + self.poisson)
self.Kbulk = self.E / 3 / (1 - 2 * self.poisson)
# Allocate memories
self.f_stress = ti.Matrix.field(self.ps.dim, self.ps.dim, dtype=float)
self.f_u = ti.Matrix.field(self.ps.dim_stress, self.ps.dim, dtype=float)
self.f_stress_grad = ti.Vector.field(self.ps.dim, dtype=float)
self.f_u_grad = ti.Vector.field(self.ps.dim_stress, dtype=float)
self.f_ext = ti.Vector.field(self.ps.dim, dtype=float)
self.g_p = ti.Vector.field(self.ps.dim_stress, dtype=float) # item in constitutive equation
self.g_DP = ti.field(dtype=float) # the value of checking stress state
self.s = ti.Vector.field(self.ps.dim_stress, dtype=float) # the deviatoric stress
self.p = ti.field(dtype=float) # the hydrostatic pressure
self.I1 = ti.field(dtype=float) # the firse invariant of the stress tensor
self.sJ2 = ti.field(dtype=float) # sqrt of the second invariant of the deviatoric stress tensor
self.r_sigma = ti.field(dtype=float) # the scaling factor
self.spin = ti.field(dtype=float) # the spin rate tensor
self.Jaumann = ti.Vector.field(self.ps.dim_stress, dtype=float) # the Jaumann stress rate, tilde σ
self.F1 = ti.Vector.field(self.ps.dim, dtype=float)
self.F2 = ti.Vector.field(self.ps.dim_stress, dtype=float)
self.u1234 = ti.Vector.field(self.ps.dim, dtype=float)
self.stress1234 = ti.Vector.field(self.ps.dim_stress, dtype=float)
particle_node = ti.root.dense(ti.i, self.ps.particle_max_num)
particle_node.place(self.f_stress, self.f_u, self.f_stress_grad, self.f_u_grad, self.f_ext, self.g_p, self.g_DP, self.s, self.p, self.I1, self.sJ2, self.spin, self.Jaumann, self.u1234, self.stress1234)
particle_node.dense(ti.j, 4).place(self.F1, self.F2)
@ti.kernel
def init_data(self):
for p_i in range(self.ps.particle_num[None]):
for m in range(4):
self.F1[p_i, m] = ti.Vector([0.0 for _ in range(self.ps.dim)])
self.F2[p_i, m] = ti.Vector([0.0 for _ in range(self.ps.dim_stress)])
@ti.kernel
def update_u_stress_1(self, m: int):
for p_i in range(self.ps.particle_num[None]):
if self.ps.material[p_i] != self.ps.material_soil:
continue
if m > 0:
print('m1 =', m, end='; ')
print('p_i =', p_i)
continue
# assert m > 0, 'My Error: m > 0 when it should be 0!'
self.u1234[p_i] = self.ps.v[p_i]
self.stress1234[p_i] = self.ps.stress[p_i]
@ti.kernel
def update_u_stress_234(self, m: int):
for p_i in range(self.ps.particle_num[None]):
if self.ps.material[p_i] != self.ps.material_soil:
continue
if m == 0 or m > 3:
print('m2 =', m, end='; ')
print('p_i =', p_i)
continue
# assert m == 0, 'My Error: m = 0 when it should be 1, 2, 3!'
# assert m > 3, 'My Error: m > 3 when it should be 1, 2, 3!'
self.u1234[p_i] = self.ps.v[p_i] + 0.5 * self.dt[None] * self.F1[p_i, m-1]
self.stress1234[p_i] = self.ps.stress[p_i] + 0.5 * self.dt[None] * self.F2[p_i, m-1]
# Assign constant density
@ti.kernel
def compute_densities(self):
for p_i in range(self.ps.particle_num[None]):
if self.ps.material[p_i] == self.ps.material_soil:
self.ps.density[p_i] = self.density_0
# Calculate term fσ and fu
@ti.kernel
def compute_term_f(self):
for p_i in range(self.ps.particle_num[None]):
self.f_stress[p_i] = ti.Matrix(
[[self.stress1234[p_i][0], self.stress1234[p_i][2]],
[self.stress1234[p_i][2], self.stress1234[p_i][1]]])
self.f_u[p_i] = ti.Matrix(
[[self.Depq[0, 0] * self.u1234[p_i][0], self.Depq[0, 1] * self.u1234[p_i][1]],
[self.Depq[1, 0] * self.u1234[p_i][0], self.Depq[1, 1] * self.u1234[p_i][1]],
[self.Depq[2, 2] * self.u1234[p_i][1], self.Depq[2, 2] * self.u1234[p_i][0]],
[self.Depq[3, 0] * self.u1234[p_i][0], self.Depq[3, 1] * self.u1234[p_i][1]]])
# TODO: Check stress state and adapt
@ti.kernel
def compute_g_DP(self):
for p_i in range(self.ps.particle_num[None]):
self.I1[p_i] = self.stress1234[p_i][0] + self.stress1234[p_i][1] + self.stress1234[p_i][3]
self.p[p_i] = -self.I1[p_i] / 3
self.s[p_i] = ti.Vector([self.stress1234[p_i][0] - self.p[p_i], self.stress1234[p_i][1] - self.p[p_i], self.stress1234[p_i][2], self.stress1234[p_i][3] - self.p[p_i]])
self.sJ2[p_i] = ti.sqrt(0.5 * (self.s[p_i][0]**2 + self.s[p_i][1]**2 + 2 * self.s[p_i][2]**2 + self.s[p_i][3]**2))
self.g_DP[p_i] = self.sJ2[p_i] + self.alpha_fric * self.I1[p_i] - self.kc
@ti.func
def adapt_stress(self, p_i):
flag_state = -self.alpha_fric * self.I1[p_i] + self.kc
return flag_state
@ti.kernel
def check_adapt_stress_DP(self):
pass
# Update boundary particles
@ti.kernel
def update_boundary(self):
pass
# Calculate gradients of fσ and fu
@ti.func
def compute_f_stress_grad(self, p_i, p_j, r):
tmp = self.mass * (self.f_stress[p_i] / self.ps.density[p_i]**2 + self.f_stress[p_j] / self.ps.density[p_j]**2)
tmp_ckd = self.cubic_kernel_derivative(r)
res = tmp@tmp_ckd
return res
@ti.func
def compute_f_u_grad(self, p_i, p_j, r):
tmp = self.mass / self.ps.density[p_j] * (self.f_u[p_j] - self.f_u[p_i])
tmp_ckd = self.cubic_kernel_derivative(r)
res = tmp@tmp_ckd
return res
@ti.func
def cal_d_BA(self, p_i, p_j):
x_i = self.ps.x[p_i]
x_j = self.ps.x[p_j]
boundary = ti.Vector([
self.ps.bound[1] - self.ps.padding, self.ps.padding,
self.ps.bound[0] - self.ps.padding, self.ps.padding])
db_i = ti.Vector([x_i[1] - boundary[0], x_i[1] - boundary[1], x_i[0] - boundary[2], x_i[0] - boundary[3]])
db_j = ti.Vector([x_j[1] - boundary[0], x_j[1] - boundary[1], x_j[0] - boundary[2], x_j[0] - boundary[3]])
flag_b = db_i * db_j
flag_dir = flag_b < 0
if sum(flag_dir) > 1:
flag_choose = abs(flag_dir * db_i)
tmp_max = 0
for i in ti.static(range(4)):
tmp_max = max(tmp_max, flag_choose[i])
flag_choose -= tmp_max
flag_choose = flag_choose == 0.0
flag_dir -= flag_choose # will cause a warning: Local store may lose precision & Atomic add (i32 to f32) may lose precision
d_A = abs(db_i.dot(flag_dir))
d_B = abs(db_j.dot(flag_dir))
return d_B / d_A
@ti.func
def update_boundary_particles(self, p_i, p_j):
self.ps.density[p_j] = self.density_0
d_BA = self.cal_d_BA(p_i, p_j)
beta_max = 1.5
beta = min(beta_max, 1 + d_BA)
self.u1234[p_j] = (1 - beta) * self.u1234[p_i]
self.stress1234[p_j] = self.stress1234[p_i]
self.f_stress[p_j] = ti.Matrix([[self.stress1234[p_i][0], self.stress1234[p_i][2]],
[self.stress1234[p_i][2], self.stress1234[p_i][1]]])
self.f_u[p_j] = ti.Matrix([[self.Depq[0, 0] * self.u1234[p_i][0], self.Depq[0, 1] * self.u1234[p_i][1]],
[self.Depq[1, 0] * self.u1234[p_i][0], self.Depq[1, 1] * self.u1234[p_i][1]],
[self.Depq[2, 2] * self.u1234[p_i][1], self.Depq[2, 2] * self.u1234[p_i][0]],
[self.Depq[3, 0] * self.u1234[p_i][0], self.Depq[3, 1] * self.u1234[p_i][1]]])
@ti.kernel
def compute_f_grad(self):
for p_i in range(self.ps.particle_num[None]):
if self.ps.material[p_i] != self.ps.material_soil:
continue
x_i = self.ps.x[p_i]
f_stress_grad_i = ti.Vector([0.0 for _ in range(self.ps.dim)])
f_u_grad_i = ti.Vector([0.0 for _ in range(self.ps.dim_stress)])
for j in range(self.ps.particle_neighbors_num[p_i]):
p_j = self.ps.particle_neighbors[p_i, j]
x_j = self.ps.x[p_j]
if self.ps.material[p_j] == self.ps.material_dummy:
self.update_boundary_particles(p_i, p_j)
f_stress_grad_i += self.compute_f_stress_grad(p_i, p_j, x_i - x_j)
f_u_grad_i += self.compute_f_u_grad(p_i, p_j, x_i - x_j)
self.f_stress_grad[p_i] = f_stress_grad_i
self.f_u_grad[p_i] = f_u_grad_i
# Assign external forces
@ti.func
def compute_f_ext(self, p_i):
self.f_ext[p_i] = ti.Vector([0.0, self.g] if self.ps.dim == 2 else [0.0, 0.0, self.g])
# TODO: Calculate plastic strain
@ti.func
def compute_g_p(self, p_i):
self.g_p[p_i] = ti.Vector([0.0 for _ in range(self.ps.dim_stress)])
# TODO: Calculate the Jaumann stress rate
@ti.func
def compute_Jaumann(self, p_i):
self.Jaumann[p_i] = ti.Vector([0.0 for _ in range(self.ps.dim_stress)])
# Compute F1 and F2
@ti.kernel
def compute_F(self, m: int):
for p_i in range(self.ps.particle_num[None]):
if self.ps.material[p_i] != self.ps.material_soil:
continue
self.compute_f_ext(p_i)
self.compute_Jaumann(p_i)
self.compute_g_p(p_i)
self.F1[p_i, m] = self.f_stress_grad[p_i] + self.f_ext[p_i]
self.F2[p_i, m] = self.Jaumann[p_i] + self.f_u_grad[p_i] - self.g_p[p_i]
# Update u, σ, x through RK4
@ti.kernel
def update_particle(self):
for p_i in range(self.ps.particle_num[None]):
if self.ps.material[p_i] != self.ps.material_soil:
continue
if self.ps.material[p_i] == self.ps.material_soil:
self.ps.v[p_i] += self.dt[None] / 6 * (
self.F1[p_i, 0] + 2 * self.F1[p_i, 1] +
2 * self.F1[p_i, 2] + self.F1[p_i, 3])
self.ps.stress[p_i] += self.dt[None] / 6 * (
self.F2[p_i, 0] + 2 * self.F2[p_i, 1] +
2 * self.F2[p_i, 2] + self.F2[p_i, 3])
self.ps.x[p_i] += self.dt[None] * self.ps.v[p_i]
def RK4_one_step(self, m):
# print('RK4 start to compute step', m)
self.update_boundary()
self.check_adapt_stress_DP()
self.compute_term_f()
self.compute_f_grad()
self.compute_F(m)
def advect_RK4(self):
for m in ti.static(range(4)):
if m == 0:
self.update_u_stress_1(m)
elif m < 4:
self.update_u_stress_234(m)
self.RK4_one_step(m)
self.update_particle()
def substep(self):
self.init_data()
self.compute_densities()
self.advect_RK4()
|
{"hexsha": "d6b3f8503c69d9feffa7c63fe1f8d67b997691dc", "size": 12674, "ext": "py", "lang": "Python", "max_stars_repo_path": "temp/eng - 20220411/soilsph.py", "max_stars_repo_name": "Rabmelon/tiSPHi", "max_stars_repo_head_hexsha": "8ffb0e505edd01cb31cb049bfe54f1f2b99cf121", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2022-01-03T12:14:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T01:22:52.000Z", "max_issues_repo_path": "temp/eng - 20220411/soilsph.py", "max_issues_repo_name": "Rabmelon/taichiCourse01_tiSPHi", "max_issues_repo_head_hexsha": "8ffb0e505edd01cb31cb049bfe54f1f2b99cf121", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "temp/eng - 20220411/soilsph.py", "max_forks_repo_name": "Rabmelon/taichiCourse01_tiSPHi", "max_forks_repo_head_hexsha": "8ffb0e505edd01cb31cb049bfe54f1f2b99cf121", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.2554744526, "max_line_length": 209, "alphanum_fraction": 0.5675398454, "include": true, "reason": "import numpy,from numpy", "num_tokens": 3779}
|
from itertools import product
from pyomo.core import *
class Model:
model = AbstractModel()
model.T = Set() # Index Set for time steps of optimization horizon
# Feasible charge powers to ESS under the given conditions
model.Feasible_ESS_Decisions = Set()
# Feasible charge powers to VAC under the given conditions
model.Feasible_VAC_Decisions = Set()
model.Value_Index = Set(dimen=3)
model.Value = Param(model.Value_Index, mutable=True, within=Reals)
model.P_PV = Param(model.T, within=NonNegativeReals) # PV PMPP forecast
model.PV_Inv_Max_Power = Param(within=PositiveReals) # PV inverter capacity
model.P_Load = Param(model.T, within=NonNegativeReals) # Active power demand
model.Initial_ESS_SoC = Param(within=Reals, default=0)
model.Initial_VAC_SoC = Param(within=Reals, default=0.0)
model.Number_of_Parked_Cars = Param(within=PositiveIntegers)
model.Unit_Consumption_Assumption = Param(within=PositiveReals)
model.Unit_Drop_Penalty = Param(within=PositiveReals)
model.ESS_Capacity = Param(within=PositiveReals)
model.VAC_Capacity = Param(within=PositiveReals)
model.Behavior_Model_Index = Set(dimen=2)
model.Behavior_Model = Param(model.Behavior_Model_Index)
model.dT = Param(within=PositiveIntegers)
model.Recharge = Param(within=Binary)
model.VAC_States_Min = Param(within=NonNegativeReals) #it should accept 0
model.Timestep = Param(within=NonNegativeIntegers)
model.final_ev_soc = Param(within=Reals, default=0.0)
model.P_Grid_Max_Export_Power = Param(within=NonNegativeReals) # Max active power export
model.ESS_Max_Charge_Power = Param(within=PositiveReals) # Max Charge Power of ESSs
model.ESS_Max_Discharge_Power = Param(within=PositiveReals) # Max Discharge Power of ESSs
model.Max_Charging_Power_kW = Param(within=NonNegativeReals)
####################################### Outputs #######################################################
# Combined decision
model.Decision = Var(model.Feasible_ESS_Decisions, model.Feasible_VAC_Decisions, within=Binary)
model.expected_future_cost = Var(model.Feasible_ESS_Decisions, model.Feasible_VAC_Decisions, within=Reals,
initialize=0.0)
model.P_ESS_OUTPUT = Var(within=Reals, bounds=(-model.ESS_Max_Charge_Power, model.ESS_Max_Discharge_Power))
model.P_VAC_OUTPUT = Var(within=NonNegativeReals, bounds=(0, model.Max_Charging_Power_kW))
model.P_PV_OUTPUT = Var(within=NonNegativeReals, bounds=(0, model.PV_Inv_Max_Power))
model.P_GRID_OUTPUT = Var(within=Reals, bounds=(-model.P_Grid_Max_Export_Power, model.P_Grid_Max_Export_Power))
model.P_PV_single = Var(within=NonNegativeReals, bounds=(0, model.PV_Inv_Max_Power))
model.P_Load_single = Var(within=NonNegativeReals)
model.future_cost = Var(within=Reals)
def combinatorics(model):
# only one of the feasible decisions can be taken
return 1 == sum(model.Decision[ess, vac] for ess, vac in
product(model.Feasible_ESS_Decisions, model.Feasible_VAC_Decisions))
model.const_integer = Constraint(rule=combinatorics)
def rule_iniPV(model):
for j in model.P_PV:
if j == model.Timestep:
return model.P_PV_single == model.P_PV[j]
model.con_ess_IniPV = Constraint(rule=rule_iniPV)
def rule_iniLoad(model):
for j in model.P_Load:
if j == model.Timestep:
return model.P_Load_single == model.P_Load[j]
model.con_ess_IniLoad = Constraint(rule=rule_iniLoad)
def con_rule_pv_potential(model):
return model.P_PV_OUTPUT <= model.P_PV_single
model.con_pv_pmax = Constraint(rule=con_rule_pv_potential)
def ess_chargepower(model):
return model.P_ESS_OUTPUT == sum(model.Decision[ess, vac] * ess for ess, vac in
product(model.Feasible_ESS_Decisions,
model.Feasible_VAC_Decisions)) * (model.ESS_Capacity * 3600) / (100 * model.dT)
model.const_esschargepw = Constraint(rule=ess_chargepower)
def vac_chargepower(model):
if model.Recharge == 1:
return model.P_VAC_OUTPUT == sum(model.Decision[ess, vac] * vac for ess, vac in
product(model.Feasible_ESS_Decisions,
model.Feasible_VAC_Decisions)) * (model.VAC_Capacity * 3600) / (
100 * model.dT)
else:
return model.P_VAC_OUTPUT == 0
model.const_evchargepw = Constraint(rule=vac_chargepower)
def home_demandmeeting(model):
return model.P_Load_single + model.P_VAC_OUTPUT == model.P_ESS_OUTPUT + model.P_PV_OUTPUT + model.P_GRID_OUTPUT
model.const_demand = Constraint(rule=home_demandmeeting)
def con_expected_future_value(model, p_ess, p_vac):
if model.Recharge == 1:
# model.powerFromEss = -p_ess / 100 * model.ESS_Capacity / model.dT
essSoC = -p_ess + model.Initial_ESS_SoC # Transition between ESS SOC states are always deterministic
vacSoC = p_vac + model.Initial_VAC_SoC # Transition between EV SOC states are deterministic when the car is at home now
valueOf_home = model.Value[(
essSoC, vacSoC, 1)] # Value of having fin_ess_soc,fin_ev_soc and home position in next time interval
valueOf_away = model.Value[(
essSoC, vacSoC, 0)] # Value of having fin_ess_soc,fin_ev_soc and away position in next time interval
# Expected future value= probability of swiching to home state*value of having home state
# +probability of swiching to away state*value of having away state
return model.expected_future_cost[p_ess, p_vac] == model.Behavior_Model[(1, 1)] * valueOf_home + \
model.Behavior_Model[(1, 0)] * valueOf_away
# print("value home "+str(valueOf_home)+" value away "+str(valueOf_away)+" future cost "+str(expected_future_cost) )
# immediate_cost = model.GlobalTargetWeight * model.G
# future_cost += model.Decision[p_ess, p_vac] * (immediate_cost + expected_future_cost)
elif model.Recharge == 0:
# If vac is charged with one of the feasible decision 'p_ev'
# model.powerFromEss = -p_ess / 100 * model.ESS_Capacity / model.dT
essSoC = -p_ess + model.Initial_ESS_SoC # Transition between ESS SOC states are always deterministic
# vacSoC = p_vac + model.Initial_VAC_SoC # # Transition between EV SOC states are deterministic when the car is at home now
vacSoC = 0 + model.final_ev_soc
# Extra penalty for dropping below predefined ev_minSoC limit
penalty_for_negative_soc_home = (model.VAC_States_Min - model.final_ev_soc) / 100 * model.Unit_Drop_Penalty * model.VAC_Capacity if model.final_ev_soc < model.VAC_States_Min else 0
## penalty_for_negative_soc_away = (model.VAC_States_Min - model.final_ev_soc) / 100 * model.Unit_Drop_Penalty * model.VAC_Capacity if final_ev_soc < model.VAC_States_Min else 0
penalty_for_negative_soc_away = penalty_for_negative_soc_home
valueOf_home = model.Value[(essSoC, vacSoC,
1)] + penalty_for_negative_soc_home # Value of having fin_ess_soc,fin_ev_soc and home position in next time interval
valueOf_away = model.Value[(essSoC, vacSoC,
0)] + penalty_for_negative_soc_away # Value of having fin_ess_soc,fin_ev_soc and away position in next time interval
# Expected future value= probability of swiching to home state*value of having home state
# +probability of swiching to away state*value of having away state
return model.expected_future_cost[p_ess, p_vac] == model.Behavior_Model[(0, 1)] * valueOf_home + \
model.Behavior_Model[
(0, 0)] * valueOf_away
model.con_expected_future_value = Constraint(model.Feasible_ESS_Decisions, model.Feasible_VAC_Decisions,
rule=con_expected_future_value)
def con_future_cost(model):
return model.future_cost == sum(model.Decision[p_ess, p_vac] * model.expected_future_cost[p_ess, p_vac]
for p_ess, p_vac in
product(model.Feasible_ESS_Decisions, model.Feasible_VAC_Decisions))
model.rule_future_cost = Constraint(rule=con_future_cost)
def objrule1(model):
return model.P_PV_single - model.P_PV_OUTPUT + model.future_cost
model.obj = Objective(rule=objrule1, sense=minimize)
|
{"hexsha": "fc0ece31136e7b4f0fac9e5635d147e6434c0179", "size": 8896, "ext": "py", "lang": "Python", "max_stars_repo_path": "optimization/models/StochasticResidentialMaxPVSimulation.py", "max_stars_repo_name": "garagonc/optimization-framework", "max_stars_repo_head_hexsha": "1ca57699d6a3f2f98dcaea96430e75c3f847b49f", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "optimization/models/StochasticResidentialMaxPVSimulation.py", "max_issues_repo_name": "garagonc/optimization-framework", "max_issues_repo_head_hexsha": "1ca57699d6a3f2f98dcaea96430e75c3f847b49f", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "optimization/models/StochasticResidentialMaxPVSimulation.py", "max_forks_repo_name": "garagonc/optimization-framework", "max_forks_repo_head_hexsha": "1ca57699d6a3f2f98dcaea96430e75c3f847b49f", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.2598870056, "max_line_length": 192, "alphanum_fraction": 0.6692895683, "include": true, "reason": "from pyomo", "num_tokens": 2234}
|
// Copyright (c) 2013, German Neuroinformatics Node (G-Node)
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted under the terms of the BSD License. See
// LICENSE file in the root of the Project.
#include <nix/hydra/multiArray.hpp>
#include <nix.hpp>
#include <iostream>
#include <sstream>
#include <iterator>
#include <stdexcept>
#include <limits>
#include <vector>
#include <cppunit/TestFixture.h>
#include <cppunit/extensions/HelperMacros.h>
#include <cppunit/CompilerOutputter.h>
#include <cppunit/extensions/TestFactoryRegistry.h>
#include <cppunit/TestResult.h>
#include <cppunit/TestResultCollector.h>
#include <cppunit/TestRunner.h>
#include <cppunit/BriefTestProgressListener.h>
#include <boost/optional.hpp>
// define some tag like class with units- & unit-getter that allows compound units
struct tag_tmp {
std::vector<std::string> units_ref;
tag_tmp() : units_ref(std::vector<std::string>()) {}
tag_tmp(std::vector<std::string> units) : units_ref(units) {}
std::vector<std::string> units() const {
return units_ref;
}
std::string unit() const {
return units_ref.front();
}
boost::optional<std::string> unito() const {
boost::optional<std::string> ret = units_ref.front();
return ret;
}
};
class TestValidate : public CPPUNIT_NS::TestFixture {
private:
CPPUNIT_TEST_SUITE(TestValidate);
CPPUNIT_TEST(test);
CPPUNIT_TEST_SUITE_END ();
time_t startup_time;
nix::File file;
nix::Block block;
nix::DataArray array1;
nix::DataArray array2;
nix::DataArray array3;
nix::DataArray array4;
nix::DataArray array5;
nix::DataFrame frame1;
std::vector<nix::DataArray> refs;
std::vector<double> extent, position;
nix::DataArray positions;
nix::DataArray extents;
std::vector<std::string> atomic_units;
std::vector<std::string> compound_units;
std::vector<std::string> invalid_units;
nix::MultiTag mtag;
nix::Tag tag;
tag_tmp units_tmp;
nix::SetDimension dim_set1;
nix::SetDimension dim_set2;
nix::SetDimension dim_set3;
nix::SampledDimension dim_sample1;
nix::SampledDimension dim_sample2;
nix::SampledDimension dim_sample3;
nix::RangeDimension dim_range1;
nix::RangeDimension dim_range2;
nix::RangeDimension dim_range3;
nix::DataFrameDimension dim_frame1;
void setValid();
void setInvalid();
public:
void setUp();
void tearDown();
void test();
};
|
{"hexsha": "81828e15199b071420ecc654b5d16e9a8d02b032", "size": 2602, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "test/TestValidate.hpp", "max_stars_repo_name": "mpsonntag/nix", "max_stars_repo_head_hexsha": "3e2b874973355f51fcfbaee31eeeb5d9eccab943", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 53.0, "max_stars_repo_stars_event_min_datetime": "2015-02-10T01:04:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-24T14:26:04.000Z", "max_issues_repo_path": "test/TestValidate.hpp", "max_issues_repo_name": "mpsonntag/nix", "max_issues_repo_head_hexsha": "3e2b874973355f51fcfbaee31eeeb5d9eccab943", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 262.0, "max_issues_repo_issues_event_min_datetime": "2015-01-09T13:24:21.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-02T13:45:31.000Z", "max_forks_repo_path": "test/TestValidate.hpp", "max_forks_repo_name": "mpsonntag/nix", "max_forks_repo_head_hexsha": "3e2b874973355f51fcfbaee31eeeb5d9eccab943", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 34.0, "max_forks_repo_forks_event_min_datetime": "2015-03-27T16:41:14.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-27T06:47:59.000Z", "avg_line_length": 26.02, "max_line_length": 82, "alphanum_fraction": 0.6871637202, "num_tokens": 663}
|
import json
import os
import numbers
import datetime
import operator
# File description:
# Generates lightweight decision tree and ensemble models using SKLearn
# Then ports these models into JSON string so that the frontend can
# parse and evaluate the model.
# The ported version is still based on the sklearn internals,
# but inclused significant amounts of metadata mapping variable names
# to feature orderings, and also provides maps for categorical data
# to convert them into one-hot strings.
# Furthermore, provides methods to *explicitly* convert input data with
# each record as dicts to a 2d array as is convention
# TODO: delineate structure out format
# Usage notes:
# This will not be shipped with prod. It is as script for generating
# and hardcoding learned ML models into the frontend. It will only
# ever be run in-house.
# Once xcalar-solutions is refactored this will likely be moved there.
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction import DictVectorizer
# Iris dataset for testing only
from sklearn.datasets import load_iris
# Hardcode path as script is for in-house dev use only.
XLRGUIDIR = os.getenv('XLRGUIDIR','/var/www/xcalar-gui')
thisPath = XLRGUIDIR + "/assets/js/suggest/"
def getOneHotEncodingFromValues(values):
"""Creates one hot mapping"""
# Literally creates an identity matrix at the moment.
# In the future, may switch this infrastructure to
# numpy vectorizer + numpy onehot encoding
oneHotArr = [[1 if j == i else 0 for j in range(len(values))]
for i in range(len(values))]
return oneHotArr
def getCategoryMapFromColumn(column):
# Column should be array-like of string values
"""Returns map from possible values to one hot repr.
e.g. for column with name "type" and possible values "string" and "int"
provides a map from "string" -> [0,1] and "int" -> [1,0]
"""
uniqueValues = np.unique(column)
oneHotMatrix = getOneHotEncodingFromValues(uniqueValues)
valueToOneHotMap = {value:arr for (value, arr)
in zip(uniqueValues,oneHotMatrix)}
return valueToOneHotMap
def getOneHotIndices(oneHotMaps):
# OneHotMap is array of maps to one hot reps
"""returned quantity is indices that features map to when convertedto one-hot.
E.g. if input data is features ["type", "color"]
and "type" has values that map to 3 length 1-hot arrays and
"color" has values that map to 2 length 1-hot arrays then
outputs [0,3,5]
"""
oneHotIndices = [0]
for idx, oneHotMap in enumerate(oneHotMaps):
oneHotIndices.append(oneHotIndices[idx] + len(oneHotMap.values()[0]))
return oneHotIndices
def getCategoryMapsFromAllData(catDataFrame, featureOrdering):
# XCategoricalDict is a pandas dataframe
"""Returns map from feature to categoricalMaps"""
categoryMaps = {feature:getCategoryMapFromColumn(column)
for feature, column in catDataFrame.iteritems()}
return categoryMaps
def getOneHotRepr(catDataFrame, categoricalMapMeta):
indicesMap = categoricalMapMeta["oneHotIndicesMap"]
oneHotMatrix = np.zeros([catDataFrame.shape[0],
categoricalMapMeta["reprLength"]])
for feature, column in catDataFrame.iteritems():
lowerFieldIdx = indicesMap[feature][0]
upperFieldIdx = indicesMap[feature][1]
oneHotMap = categoricalMapMeta["categoricalMaps"][feature]
for recordIdx, entry in enumerate(column):
oneHotMatrix[recordIdx, lowerFieldIdx:upperFieldIdx] = \
oneHotMap[entry]
return oneHotMatrix
def getOneHotIndicesMap(categoryMaps, featureOrdering):
# Category maps map from feature to (feature value -> oneHotRepresentation)
# featureOrdering map from feature to (idx)
"""Returns map from featurename to (lower, upper) indices in final repr
See getOneHotIndices, similar return quantity.
"""
oneHotIndicesMap = {}
# Stores representation length
reprLength = 0
sortedFeatures = sorted(categoryMaps.keys(), key=featureOrdering.get)
for idx, feature in enumerate(sortedFeatures):
lowIdx = reprLength
highIdx = lowIdx + len(categoryMaps[feature].values()[0])
oneHotIndicesMap[feature] = (lowIdx, highIdx)
reprLength = highIdx
return oneHotIndicesMap, reprLength
# Currently: if string, assume categorical. Else assume cts, must be numeric
# NOTE: strings of numbers, e.g. "1", are considered categorical
def isContinuous(value):
return isinstance(value, numbers.Number)
def isCategorical(value):
return isinstance(value, basestring)
def getFeatureTypeMap(X):
# Assume that all features are present in all records (may change)
# Infer feature types of all from feature types of first record
"""Returns: dict mapping from feature name to featuretype"""
if not X:
# Empty dataset
return {}
if not X[0]:
# Empty features in first element
return {}
types = {}
for feature in set(X[0].keys()):
if isContinuous(X[0][feature]):
types[feature] = "continuous"
elif isCategorical(X[0][feature]):
types[feature] = "categorical"
else:
# Unrecognized type
types[feature] = None
return types
def checkInputDataValid(X):
# Takes X as dict list.
# MUST be done before importing to pandas as pandas will
# silently gloss over malformatted, inputting NaN or empty,
# and converting types to lowest common denominator type
# Hard coded with reason, see large commment below
ArbitraryMaxCategoricalLabels = 31
if not X:
# Empty array
return True
types = getFeatureTypeMap(X)
if not types:
# TODO: Assert that every element of X is empty
pass
categoricalBuckets = {}
for feature, mlType in types.items():
if mlType == "continuous":
categoricalBuckets[feature] = None
elif mlType == "categorical":
categoricalBuckets[feature] = set({})
elif mlType == None:
print("Unrecognized type.")
return False
else:
# Should _never_ happen.
print("Type not cts, categorical, or invalid type.")
return False
featureSet = set(types.keys())
for recordDict in X:
curFeatures = set(recordDict.keys())
if not featureSet == curFeatures:
print("Not all records have same features.")
return False
for feature in curFeatures:
if (isContinuous(recordDict[feature]) and
(types[feature] == "continuous")):
# feature types match: continuous
continue
elif (isCategorical(recordDict[feature]) and
(types[feature] == "categorical")):
# feature types match: categorical
categoricalBuckets[feature].add(recordDict[feature])
else:
# feature types do not match
print("Feature type not consistent across records.")
return False
# Check that categorical features have small numbers of possible labels
# realistically, this should be < ~10, but RF begins to lose numerical
# stability at around 32 labels with one-hot encoding
# depending on implementation (see R RandomForest categorical implement)
# Math note: because RF picks best features from random feature subset
# to decide what feature to split on, and one-hot representation turns
# one feature into many features, naive RF with one-hot is biased towards
# categorical features with many possible labels.
for bucket in categoricalBuckets:
if bucket == None:
# Continuous random variable
continue
else:
if len(bucket) > ArbitraryMaxCategoricalLabels:
print("Too many labels for category.")
return True
return True
def sortAndOrderFeatures(X):
"""Creates an explicit ordering on all features
Additionally, puts categorical features last.
"""
# X here is list of dicts
if not checkInputDataValid(X):
print("Invalid input data.")
typeMap = getFeatureTypeMap(X)
categorical = set({})
continuous = set({})
orderMap = {}
for feature, mlType in typeMap.items():
if mlType == "continuous":
continuous.add(feature)
elif mlType == "categorical":
categorical.add(feature)
else:
print("Invalid type.")
return {}
for idx, feature in enumerate(continuous):
orderMap[feature] = idx
categoricalInitIdx = len(continuous)
for idx, feature in enumerate(categorical):
orderMap[feature] = categoricalInitIdx + idx
return orderMap, typeMap, continuous, categorical
def prepModelStr(X, modType):
# X is list of records represented as dicts
"""Include current time, model type, categorical variable map"""
timeOnCreate = str(datetime.datetime.now())
modelType = modType
orderMap, typeMap, continuous, categorical = sortAndOrderFeatures(X)
inputMeta = {
"orderMap" : orderMap,
"typeMap" : typeMap,
"categoricalMapMeta": None
}
# Use pandas to convert list of dict-records to dataframe
# for easy transposition
# Note, can use pandas earlier but at a loss of transparency.
# Purpose of dataframe: turns array of dicts into structure
# that allows for indexing columns by feature name dictionaries,
# i.e. can get whole column for a feature by providing the feature
# name as key.
dataFrame = pd.DataFrame(X)
ctsDataFrame = dataFrame[sorted(list(continuous), key=orderMap.get)]
catDataFrame = dataFrame[sorted(list(categorical), key=orderMap.get)]
if categorical:
categoricalMaps = getCategoryMapsFromAllData(catDataFrame,orderMap)
oneHotIndicesMap, reprLength = getOneHotIndicesMap(categoricalMaps,
orderMap)
categoricalMapMeta = {
"categoricalMaps" : categoricalMaps,
"oneHotIndicesMap": oneHotIndicesMap,
"reprLength" : reprLength
}
inputMeta["categoricalMapMeta"] = categoricalMapMeta
modelMeta = {
"timeOnCreate": timeOnCreate,
"modelType" : modelType,
"inputMeta" : inputMeta
}
# only return dataframe to save on computation, should be in sorted order
return modelMeta, ctsDataFrame, catDataFrame
def prepInputData(modelMeta, ctsDataFrame, catDataFrame):
overallValues = ctsDataFrame.values
if (modelMeta["inputMeta"]["categoricalMapMeta"]):
categoricalMapMeta = modelMeta["inputMeta"]["categoricalMapMeta"]
overallValues = np.concatenate((overallValues,
getOneHotRepr(catDataFrame,
categoricalMapMeta)),
axis = 1)
return overallValues
def exportDT(dtModel):
"""Creates barebones representation from sklearn internal repr"""
skTree = dtModel.tree_
skExportObj = {
"children_left" :skTree.children_left.tolist(),
"children_right":skTree.children_right.tolist(),
"feature" :skTree.feature.tolist(),
"threshold" :skTree.threshold.tolist(),
"value" :skTree.value.tolist(),
"node_count" :skTree.node_count
}
return skExportObj
def exportRF(rfModel):
"""Creates barebones representation from sklearn internal repr"""
skEstimators = []
for estimator in rfModel.estimators_:
skEstimators.append(exportDT(estimator))
skExportObj = {
"estimators_": skEstimators
}
return skExportObj
def hardcodeJSONStr(strIn, strOut, jsFileIn, jsFileOut):
with open(jsFileIn, 'r') as fileI, open(jsFileOut, 'w') as fileO:
for line in fileI:
if line.strip().startswith(strIn):
# Preserves the surrounding whitespace
amtWhiteSpaceBef = len(line) - len(line.lstrip())
amtWhiteSpaceAft = len(line) - len(line.rstrip())
fileO.write(line[:amtWhiteSpaceBef])
fileO.write(strOut)
if (amtWhiteSpaceAft > 0):
fileO.write(line[-amtWhiteSpaceAft:])
else:
fileO.write(line)
def makeDTStr(X, y):
modelMeta, ctsDataFrame, catDataFrame = prepModelStr(X, "DecisionTree")
XProcessed = prepInputData(modelMeta, ctsDataFrame, catDataFrame)
skModel = DecisionTreeClassifier(random_state=0).fit(XProcessed,y)
exportObj = {
"model" : exportDT(skModel),
"modelMeta": modelMeta
}
return json.dumps(exportObj)
def makeRFStr(X, y):
modelMeta, ctsDataFrame, catDataFrame = prepModelStr(X, "RandomForest")
XProcessed = prepInputData(modelMeta, ctsDataFrame, catDataFrame)
skModel = RandomForestClassifier(random_state=0).fit(XProcessed,y)
exportObj = {
"model" : exportRF(skModel),
"modelMeta": modelMeta
}
return json.dumps(exportObj)
def makeAndAppendModelsTemplate(X,y):
# X array of dicts where keys are features, values are
# feature values, y array of labels
rfStr = makeRFStr(X,y)
strIn = "joinModelStr:"
strOut = "joinModelStr: '" + \
rfStr + "',"
jsFileIn = thisPath + "skRFModels.js"
jsFileOut = thisPath + "skRFModelsTmp.js"
hardcodeJSONStr(strIn, strOut, jsFileIn, jsFileOut)
os.rename(jsFileOut, jsFileIn)
def irisToDictarray(iris):
dictArray = []
for row in iris.data:
tempDict = {}
for idx, field in enumerate(row):
tempDict[iris.feature_names[idx]] = field
tempDict["cat1"] = "hehe"
dictArray.append(tempDict)
return dictArray
def IrisTest():
iris = load_iris()
data = irisToDictarray(iris)
print makeRFStr(data, iris.target)
if __name__ == "__main__":
IrisTest()
|
{"hexsha": "bad38641d029396d12f891a62aa679c07d1d34cf", "size": 14253, "ext": "py", "lang": "Python", "max_stars_repo_path": "ts/shared/util/suggest/makeSKLearnModel.py", "max_stars_repo_name": "xcalar/xcalar-idl", "max_stars_repo_head_hexsha": "69aa08fb42cde6c905b3aa2129c365c4c3e575f9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ts/shared/util/suggest/makeSKLearnModel.py", "max_issues_repo_name": "xcalar/xcalar-idl", "max_issues_repo_head_hexsha": "69aa08fb42cde6c905b3aa2129c365c4c3e575f9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ts/shared/util/suggest/makeSKLearnModel.py", "max_forks_repo_name": "xcalar/xcalar-idl", "max_forks_repo_head_hexsha": "69aa08fb42cde6c905b3aa2129c365c4c3e575f9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-31T20:52:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-31T20:52:28.000Z", "avg_line_length": 37.5078947368, "max_line_length": 82, "alphanum_fraction": 0.6592997965, "include": true, "reason": "import numpy", "num_tokens": 3231}
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import unittest
import numpy as np
from tests.unit import utils
from tests.unit.hazmat import test_geometric_intersection
@utils.needs_speedup
class Test_speedup_bbox_intersect(
test_geometric_intersection.Test_bbox_intersect
):
@staticmethod
def _call_function_under_test(nodes1, nodes2):
from bezier import _speedup
return _speedup.bbox_intersect(nodes1, nodes2)
@utils.needs_speedup
class Test_speedup_all_intersections(
test_geometric_intersection.Test_all_intersections
):
@staticmethod
def _call_function_under_test(nodes_first, nodes_second, **kwargs):
from bezier import _speedup
return _speedup.curve_intersections(
nodes_first, nodes_second, **kwargs
)
@staticmethod
def reset_curves_workspace(workspace_size):
from bezier import _speedup
return _speedup.reset_curves_workspace(workspace_size)
@staticmethod
def curves_workspace_size():
from bezier import _speedup
return _speedup.curves_workspace_size()
def test_workspace_resize(self):
nodes1 = np.asfortranarray([[-3.0, 5.0], [0.0, 0.0]])
nodes2 = np.asfortranarray(
[[-7.0, 9.0, -7.0, 9.0], [-9.0, 13.0, -13.0, 9.0]]
)
# NOTE: These curves intersect 3 times, so a workspace of
# 2 is not large enough.
self.reset_curves_workspace(2)
intersections, coincident = self._call_function_under_test(
nodes1, nodes2
)
expected = np.asfortranarray([[0.5, 0.375, 0.625], [0.5, 0.25, 0.75]])
self.assertEqual(intersections, expected)
self.assertFalse(coincident)
# Make sure the workspace was resized.
self.assertEqual(self.curves_workspace_size(), 3)
def test_workspace_too_small(self):
from bezier import _speedup
nodes1 = np.asfortranarray([[-3.0, 5.0], [0.0, 0.0]])
nodes2 = np.asfortranarray(
[[-7.0, 9.0, -7.0, 9.0], [-9.0, 13.0, -13.0, 9.0]]
)
# NOTE: These curves intersect 3 times, so a workspace of
# 2 is not large enough.
self.reset_curves_workspace(2)
with self.assertRaises(ValueError) as exc_info:
self._call_function_under_test(nodes1, nodes2, allow_resize=False)
exc_args = exc_info.exception.args
expected = _speedup.TOO_SMALL_TEMPLATE.format(3, 2)
self.assertEqual(exc_args, (expected,))
# Make sure the workspace was **not** resized.
self.assertEqual(self.curves_workspace_size(), 2)
@utils.needs_speedup
class Test_reset_curves_workspace(unittest.TestCase):
@staticmethod
def _call_function_under_test(workspace_size):
from bezier import _speedup
return _speedup.reset_curves_workspace(workspace_size)
def test_it(self):
from bezier import _speedup
size = 5
return_value = self._call_function_under_test(size)
self.assertIsNone(return_value)
self.assertEqual(_speedup.curves_workspace_size(), size)
@unittest.expectedFailure
def test_threadsafe(self):
from bezier import _speedup
size_main = 3
self._call_function_under_test(size_main)
worker = WorkspaceThreadedAccess()
self.assertIsNone(worker.size1)
self.assertIsNone(worker.size2)
size1 = 7
size2 = 8
thread1 = threading.Thread(target=worker.task1, args=(size1,))
thread2 = threading.Thread(target=worker.task2, args=(size2,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# This check demonstrates the **broken-ness** of the implementation.
# The sizes for each thread should be the sizes actually **set** in
# the given thread and the workspace in the main thread should be
# unchanged (i.e. should have ``size_main``). What we'll actually
# observe is ``(size2, size1, size2)``.
expected = (size1, size2, size_main)
actual = (worker.size1, worker.size2, _speedup.curves_workspace_size())
self.assertEqual(actual, expected)
@utils.needs_speedup
class Test_curves_workspace_size(unittest.TestCase):
@staticmethod
def _call_function_under_test():
from bezier import _speedup
return _speedup.curves_workspace_size()
def test_it(self):
from bezier import _speedup
size = 5
_speedup.reset_curves_workspace(size)
self.assertEqual(self._call_function_under_test(), size)
class WorkspaceThreadedAccess:
def __init__(self):
self.barrier1 = threading.Event()
self.barrier2 = threading.Event()
self.barrier3 = threading.Event()
self.size1 = None
self.size2 = None
def event1(self, size):
from bezier import _speedup
# NOTE: There is no need to ``wait`` since this is the first event.
_speedup.reset_curves_workspace(size)
self.barrier1.set()
def event2(self):
from bezier import _speedup
self.barrier1.wait()
result = _speedup.curves_workspace_size()
self.barrier2.set()
return result
def event3(self, size):
from bezier import _speedup
self.barrier2.wait()
_speedup.reset_curves_workspace(size)
self.barrier3.set()
def event4(self):
from bezier import _speedup
self.barrier3.wait()
# NOTE: There is no barrier to ``set`` since this is the last event.
return _speedup.curves_workspace_size()
def task1(self, size):
self.event1(size)
self.size1 = self.event4()
def task2(self, size):
self.size2 = self.event2()
self.event3(size)
|
{"hexsha": "ee8cf4e82f72c00d6b7a9496f357ac19b62912df", "size": 6313, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/unit/test__geometric_intersection.py", "max_stars_repo_name": "dibir-magomedsaygitov/bezier", "max_stars_repo_head_hexsha": "a3c408d11133aa1b97fb6dd673888cf56f03178e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 165, "max_stars_repo_stars_event_min_datetime": "2017-05-27T08:22:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T17:31:41.000Z", "max_issues_repo_path": "tests/unit/test__geometric_intersection.py", "max_issues_repo_name": "dibir-magomedsaygitov/bezier", "max_issues_repo_head_hexsha": "a3c408d11133aa1b97fb6dd673888cf56f03178e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 254, "max_issues_repo_issues_event_min_datetime": "2016-11-18T02:43:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-07T15:43:07.000Z", "max_forks_repo_path": "tests/unit/test__geometric_intersection.py", "max_forks_repo_name": "dibir-magomedsaygitov/bezier", "max_forks_repo_head_hexsha": "a3c408d11133aa1b97fb6dd673888cf56f03178e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2017-06-19T07:14:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-13T15:54:46.000Z", "avg_line_length": 32.2091836735, "max_line_length": 79, "alphanum_fraction": 0.6676698875, "include": true, "reason": "import numpy", "num_tokens": 1517}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
[path]
cd /Users/brunoflaven/Documents/03_git/BlogArticlesExamples/extending_streamlit_usage/010_streamlit_design/
[file]
streamlit run streamlit_design_4.py
# source
https://github.com/Jcharis/Streamlit_DataScience_Apps/blob/master/Streamlit_Python_Crash_Course/docs_app.py
https://github.com/Jcharis/Streamlit_DataScience_Apps
# command
pip install streamlit --upgrade
"""
import time
import datetime
from PIL import Image
import streamlit as st
# Title
st.title("Streamlit Crash Course")
# Header
st.header("Simple Header")
# Subheader
st.subheader("Another sub header")
# Text
st.text("For a simple text")
# Markdown
st.markdown("#### A Markdown ")
# Error text
st.success("Successful")
st.info("This is an info alert ")
st.warning("This is a warning ")
st.error("This shows an error ")
# st.exception("NameError('name not defined')")
# Getting Help Info From Python
st.help(range)
# Writing Text/Super Fxn
st.write("Text with write")
st.write("Python Range with write", range(10))
# Images
img = Image.open("garden_mountain_and_river_20.jpg")
st.image(img, width=300, caption='Streamlit Images')
# Videos
video_file = open("drone_footage_valley.mp4", 'rb')
video_bytes = video_file.read()
st.video(video_bytes)
# Audio
audio_file = open("sample_house_lo.mp3",'rb')
audio_bytes = audio_file.read()
st.audio(audio_bytes,format='audio/mp3')
# Widget
# Checkbox
if st.checkbox("Show/Hide"):
st.text("Showing or Hiding Widget")
# Radio Button
status = st.radio("What is your status", ('Active', 'Inactive'))
if status == 'Active':
st.text("Status is Active")
else:
st.warning("Not Active Yet")
# SelectBox
occupation = st.selectbox(
"Your Occupation", ['Data Scientist', 'Programmer', 'Doctor', 'Businessman'])
st.write("You selected this option", occupation)
# MultiSelect
location = st.multiselect("Where do you stay", ("London",
"New York", "Accra", "Kiev", "Berlin", "New Delhi"))
st.write("You selected", len(location), "location")
# Slider
salary = st.slider("What is your salary", 1000, 10000)
# Buttons
st.button("Simple Button")
# Text Input
name = st.text_input("Enter Name", "Type Here...")
if st.button('Submit'):
result = name.title()
st.success(result)
else:
st.write("Press the above button..")
# Text Area
c_text = st.text_area("Enter Text", "Type Here...")
if st.button('Analyze'):
c_result = c_text.title()
st.success(c_result)
else:
st.write("Press the above button..")
# Date Input
today = st.date_input("Today is", datetime.datetime.now())
# Time Input
t = st.time_input("The time now is", datetime.time())
# SIDE Bar
st.sidebar.header("Side Bar Header")
st.sidebar.text("Hello")
# Display JSON
st.text("Display JSON")
st.json({'name': 'hello', 'age': 34})
# Display Raw Code
st.text("Display Raw Code")
st.code("import numpy as np")
st.text("Display Raw Code Alternative Method")
with st.echo():
# This will also be shown
import pandas as pd
df = pd.DataFrame()
# Progress Bar
# import time
# my_bar = st.progress(0)
# for p in range(10):
# my_bar.progress(p +1)
# Spinner
with st.spinner("Waiting .."):
time.sleep(5)
st.success("Finished!")
# Placeholder with empty
# age = st.empty()
# age.text("Your Age")
# Replace with image
# age.image(img)
# Cache For Performance
@st.cache
def run_multiple():
return range(100)
# Display the result of function
st.write(run_multiple())
|
{"hexsha": "63ef5e37b1a5fdd9050a2ee017ceeb6de259d69e", "size": 3483, "ext": "py", "lang": "Python", "max_stars_repo_path": "extending_streamlit_usage/010_streamlit_design/streamlit_design_4.py", "max_stars_repo_name": "bflaven/BlogArticlesExamples", "max_stars_repo_head_hexsha": "5df2dfc26170ffbbade78ba136bf3172391e3b2a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2018-05-03T08:16:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-04T03:44:24.000Z", "max_issues_repo_path": "extending_streamlit_usage/010_streamlit_design/streamlit_design_4.py", "max_issues_repo_name": "bflaven/BlogArticlesExamples", "max_issues_repo_head_hexsha": "5df2dfc26170ffbbade78ba136bf3172391e3b2a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-28T19:27:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-28T19:27:19.000Z", "max_forks_repo_path": "extending_streamlit_usage/010_streamlit_design/streamlit_design_4.py", "max_forks_repo_name": "bflaven/BlogArticlesExamples", "max_forks_repo_head_hexsha": "5df2dfc26170ffbbade78ba136bf3172391e3b2a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-09-10T13:33:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T11:07:38.000Z", "avg_line_length": 19.5674157303, "max_line_length": 107, "alphanum_fraction": 0.6982486362, "include": true, "reason": "import numpy", "num_tokens": 895}
|
import numpy as np
from locintel.core.datamodel.geo import GeoCoordinate
from locintel.graphs.datamodel.jurbey import Edge
from locintel.graphs.datamodel.types import (
EdgeType,
RoadClass,
RoadAccessibility,
VehicleType,
)
from typing import Sequence
def no_geometry(coord1, coord2):
return {}
def simple_node_geometry(coord1, coord2):
return [coord1, coord2]
def interpolated_geometry(coord1, coord2):
new_geometry = np.linspace(
*[(coord.lat, coord.lng) for coord in (coord1, coord2)], num=5
)
return [GeoCoordinate(point[0], point[1]) for point in new_geometry]
def create_edge(**kwargs):
args = {
"edge_type": EdgeType.LANE_STRAIGHT,
"from_node": 0,
"to_node": 1,
"road_class": RoadClass.MajorRoad,
"road_accessibility": RoadAccessibility.NoRestriction,
"geometry": [],
"metadata": {"oneway": "yes", "highway": "primary"},
"vehicle_accessibility": [VehicleType.Car],
}
args.update(kwargs)
return Edge(
args["edge_type"],
args["from_node"],
args["to_node"],
args["road_class"],
args["road_accessibility"],
args["vehicle_accessibility"],
args["geometry"],
metadata=args["metadata"],
)
def requires(requirements):
def _needs(func):
def _needs_wrapper(*args, **kwargs):
return func(*args, **kwargs)
_needs_wrapper.__annotations__ = {
"requirements": requirements
if isinstance(requirements, Sequence)
else [requirements]
}
_needs_wrapper.__name__ = func.__name__
return _needs_wrapper
return _needs
def find_midpoint(geometry):
start_coord = geometry[0]
end_coord = geometry[-1]
return GeoCoordinate(
(end_coord.lng + start_coord.lng / 2), (end_coord.lat + start_coord.lat / 2)
)
|
{"hexsha": "ddb0f3cff1424c908ba8bd575ceda138c3aec2cc", "size": 1910, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/graphs/synthetic/utils.py", "max_stars_repo_name": "pedrofreitascampospro/locintel", "max_stars_repo_head_hexsha": "eb9c56cdc308660c31d90abe9fe62bd3634ba273", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/graphs/synthetic/utils.py", "max_issues_repo_name": "pedrofreitascampospro/locintel", "max_issues_repo_head_hexsha": "eb9c56cdc308660c31d90abe9fe62bd3634ba273", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/graphs/synthetic/utils.py", "max_forks_repo_name": "pedrofreitascampospro/locintel", "max_forks_repo_head_hexsha": "eb9c56cdc308660c31d90abe9fe62bd3634ba273", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.8051948052, "max_line_length": 84, "alphanum_fraction": 0.635078534, "include": true, "reason": "import numpy", "num_tokens": 448}
|
open import Level using (0ℓ)
open import Relation.Binary.PropositionalEquality using (_≡_; _≢_; cong; cong₂; isEquivalence; setoid)
open import Relation.Binary.PropositionalEquality.WithK using (≡-irrelevant)
open import Data.Unit using (⊤; tt)
open import Agda.Builtin.FromNat using (Number)
open import Data.Product using (_,_)
module AKS.Rational.Properties where
open import AKS.Rational.Base using (ℚ; _+_; _*_; -_; _/_; _≟_)
open import Algebra.Structures {A = ℚ} _≡_ using
( IsCommutativeRing; IsRing; IsAbelianGroup
; IsGroup; IsMonoid; IsSemigroup; IsMagma
)
open import Algebra.Definitions {A = ℚ} _≡_ using
( _DistributesOver_; _DistributesOverʳ_; _DistributesOverˡ_
; RightIdentity; LeftIdentity; Identity; Associative; Commutative
; RightInverse; LeftInverse; Inverse
)
open import AKS.Algebra.Structures ℚ _≡_ using (IsNonZeroCommutativeRing; IsIntegralDomain; IsGCDDomain; IsDecField)
open import Algebra.Bundles using (Ring; CommutativeRing)
open import AKS.Algebra.Bundles using (NonZeroCommutativeRing; DecField)
open import AKS.Unsafe using (BOTTOM; ≢-irrelevant)
+-isMagma : IsMagma _+_
+-isMagma = record
{ isEquivalence = isEquivalence
; ∙-cong = cong₂ _+_
}
+-assoc : Associative _+_
+-assoc = BOTTOM
+-isSemigroup : IsSemigroup _+_
+-isSemigroup = record
{ isMagma = +-isMagma
; assoc = +-assoc
}
+-comm : Commutative _+_
+-comm = BOTTOM
+-identityˡ : LeftIdentity 0 _+_
+-identityˡ = BOTTOM
open import Algebra.FunctionProperties.Consequences.Propositional using (comm+idˡ⇒idʳ; comm+invˡ⇒invʳ; comm+distrˡ⇒distrʳ)
+-identityʳ : RightIdentity 0 _+_
+-identityʳ = BOTTOM -- comm+idˡ⇒idʳ +-comm +-identityˡ
+-identity : Identity 0 _+_
+-identity = +-identityˡ , +-identityʳ
+-isMonoid : IsMonoid _+_ 0
+-isMonoid = record
{ isSemigroup = +-isSemigroup
; identity = +-identity
}
-‿inverseˡ : LeftInverse 0 -_ _+_
-‿inverseˡ = BOTTOM
-‿inverseʳ : RightInverse 0 -_ _+_
-‿inverseʳ = BOTTOM -- comm+invˡ⇒invʳ +-comm -‿inverseˡ
-‿inverse : Inverse 0 -_ _+_
-‿inverse = -‿inverseˡ , -‿inverseʳ
+-isGroup : IsGroup _+_ 0 -_
+-isGroup = record
{ isMonoid = +-isMonoid
; inverse = -‿inverse
; ⁻¹-cong = cong -_
}
+-isAbelianGroup : IsAbelianGroup _+_ 0 -_
+-isAbelianGroup = record
{ isGroup = +-isGroup
; comm = +-comm
}
*-isMagma : IsMagma _*_
*-isMagma = record
{ isEquivalence = isEquivalence
; ∙-cong = cong₂ _*_
}
*-assoc : Associative _*_
*-assoc = BOTTOM
*-isSemigroup : IsSemigroup _*_
*-isSemigroup = record
{ isMagma = *-isMagma
; assoc = *-assoc
}
*-comm : Commutative _*_
*-comm x y = BOTTOM
*-identityˡ : LeftIdentity 1 _*_
*-identityˡ x = BOTTOM
*-identityʳ : RightIdentity 1 _*_
*-identityʳ = BOTTOM -- comm+idˡ⇒idʳ *-comm *-identityˡ
*-identity : Identity 1 _*_
*-identity = *-identityˡ , *-identityʳ
*-isMonoid : IsMonoid _*_ 1
*-isMonoid = record
{ isSemigroup = *-isSemigroup
; identity = *-identity
}
*-distribˡ-+ : _*_ DistributesOverˡ _+_
*-distribˡ-+ = BOTTOM
*-distribʳ-+ : _*_ DistributesOverʳ _+_
*-distribʳ-+ = BOTTOM -- comm+distrˡ⇒distrʳ *-comm *-distribˡ-+
*-distrib-+ : _*_ DistributesOver _+_
*-distrib-+ = *-distribˡ-+ , *-distribʳ-+
+-*-isRing : IsRing _+_ _*_ -_ 0 1
+-*-isRing = record
{ +-isAbelianGroup = +-isAbelianGroup
; *-isMonoid = *-isMonoid
; distrib = *-distrib-+
}
+-*-isCommutativeRing : IsCommutativeRing _+_ _*_ -_ 0 1
+-*-isCommutativeRing = record
{ isRing = +-*-isRing
; *-comm = *-comm
}
+-*-isNonZeroCommutativeRing : IsNonZeroCommutativeRing _+_ _*_ -_ 0 1
+-*-isNonZeroCommutativeRing = record
{ isCommutativeRing = +-*-isCommutativeRing
; 0#≉1# = λ ()
}
+-*-nonZeroCommutativeRing : NonZeroCommutativeRing 0ℓ 0ℓ
+-*-nonZeroCommutativeRing = record { isNonZeroCommutativeRing = +-*-isNonZeroCommutativeRing }
/-inverse : ∀ x y {y≢0} → x ≡ y * (x / y) {y≢0}
/-inverse x y {y≢0} = BOTTOM
open import AKS.Algebra.Consequences +-*-nonZeroCommutativeRing using (module Inverse⇒Field)
open Inverse⇒Field _≟_ ≡-irrelevant ≢-irrelevant _/_ /-inverse
using (gcd)
renaming (isField to +-*-/-isField; [field] to +-*-/-field) public
+-*-/-isDecField : IsDecField _≟_ _+_ _*_ -_ 0 1 _/_ gcd
+-*-/-isDecField = record { isField = +-*-/-isField }
+-*-/-decField : DecField 0ℓ 0ℓ
+-*-/-decField = record { isDecField = +-*-/-isDecField }
|
{"hexsha": "935be09790206cdd540adc00726d7f0546c5d774", "size": 4321, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "proofs/AKS/Rational/Properties.agda", "max_stars_repo_name": "mckeankylej/thesis", "max_stars_repo_head_hexsha": "ddad4c0d5f384a0219b2177461a68dae06952dde", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-12-01T22:38:27.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-01T22:38:27.000Z", "max_issues_repo_path": "proofs/AKS/Rational/Properties.agda", "max_issues_repo_name": "mckeankylej/thesis", "max_issues_repo_head_hexsha": "ddad4c0d5f384a0219b2177461a68dae06952dde", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "proofs/AKS/Rational/Properties.agda", "max_forks_repo_name": "mckeankylej/thesis", "max_forks_repo_head_hexsha": "ddad4c0d5f384a0219b2177461a68dae06952dde", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3475609756, "max_line_length": 122, "alphanum_fraction": 0.6873408933, "num_tokens": 1608}
|
#include "graph-properties-convert-mysql.h"
#include <mysql.h>
#include <unistd.h>
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <deque>
#include <fstream>
#include <iostream>
#include <limits>
#include <map>
#include <optional>
#include <random>
#include <sstream>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <boost/algorithm/string.hpp>
#include <boost/lexical_cast.hpp>
#include "katana/ErrorCode.h"
#include "katana/Galois.h"
#include "katana/GraphMLSchema.h"
#include "katana/Logging.h"
#include "katana/PropertyGraph.h"
#include "katana/SharedMemSys.h"
#include "katana/Threads.h"
using katana::GraphComponents;
using katana::ImportData;
using katana::ImportDataType;
using katana::LabelRule;
using katana::PropertyKey;
namespace {
struct MysqlRes {
MYSQL_RES* res;
MysqlRes(MYSQL_RES* res_) : res(res_) {}
~MysqlRes() { mysql_free_result(res); }
};
struct Relationship {
std::string label;
std::string source_table;
std::string source_field;
size_t source_index;
std::string target_table;
std::string target_field;
Relationship(
std::string source_table_, std::string source_field_,
std::string target_table_, std::string target_field_)
: label(source_table_ + "_" + target_table_ + "_" + source_field_),
source_table(std::move(source_table_)),
source_field(std::move(source_field_)),
source_index(0),
target_table(std::move(target_table_)),
target_field(std::move(target_field_)) {}
};
struct TableData {
std::string name;
bool is_node;
int64_t primary_key_index;
std::vector<Relationship> out_references;
std::vector<Relationship> in_references;
std::vector<std::string> field_names;
std::vector<size_t> field_indexes;
std::unordered_set<std::string> ignore_list;
TableData(std::string name_)
: name(std::move(name_)),
is_node(true),
primary_key_index(-1),
out_references(std::vector<Relationship>{}),
in_references(std::vector<Relationship>{}),
field_names(std::vector<std::string>{}),
field_indexes(std::vector<size_t>{}),
ignore_list(std::unordered_set<std::string>{}) {}
void ResolveOutgoingKeys(const std::string& field, size_t field_index) {
for (auto& relation : this->out_references) {
if (relation.source_field == field) {
relation.source_index = field_index;
}
}
}
bool IsValidEdge() {
if (this->out_references.size() != 2) {
return false;
}
if (!this->in_references.empty()) {
return false;
}
return true;
}
};
template <typename T>
ImportData
Resolve(ImportDataType type, bool is_list, T val) {
ImportData data{type, is_list};
data.value = val;
return data;
}
ImportData
ResolveBool(const std::string& val) {
if (val.empty()) {
return ImportData{ImportDataType::kUnsupported, false};
}
ImportData data{ImportDataType::kBoolean, false};
bool res = val[0] > '0' && val[0] <= '9';
if (res) {
data.value = res;
return data;
}
res = val[0] == 't' || val[0] == 'T';
if (res) {
data.value = res;
return data;
}
res = val[0] == 'y' || val[0] == 'Y';
if (res) {
data.value = res;
return data;
}
data.value = false;
return data;
}
ImportData
ResolveValue(const std::string& val, ImportDataType type, bool is_list) {
if (is_list) {
return ImportData{ImportDataType::kUnsupported, is_list};
}
try {
switch (type) {
case ImportDataType::kString:
return Resolve(type, is_list, val);
case ImportDataType::kInt64:
return Resolve(type, is_list, boost::lexical_cast<int64_t>(val));
case ImportDataType::kInt32:
return Resolve(type, is_list, boost::lexical_cast<int32_t>(val));
case ImportDataType::kDouble:
return Resolve(type, is_list, boost::lexical_cast<double>(val));
case ImportDataType::kFloat:
return Resolve(type, is_list, boost::lexical_cast<float>(val));
case ImportDataType::kBoolean:
return ResolveBool(val);
case ImportDataType::kTimestampMilli:
return ImportData{ImportDataType::kUnsupported, false};
default:
return ImportData{ImportDataType::kUnsupported, false};
}
} catch (const boost::bad_lexical_cast&) {
return ImportData{ImportDataType::kUnsupported, false};
}
}
ImportDataType
ExtractTypeMysql(enum_field_types type) {
switch (type) {
case MYSQL_TYPE_TINY:
return ImportDataType::kBoolean;
case MYSQL_TYPE_SHORT:
return ImportDataType::kInt32;
case MYSQL_TYPE_INT24:
return ImportDataType::kInt32;
case MYSQL_TYPE_LONG:
return ImportDataType::kInt32;
case MYSQL_TYPE_LONGLONG:
return ImportDataType::kInt64;
case MYSQL_TYPE_FLOAT:
return ImportDataType::kFloat;
case MYSQL_TYPE_DOUBLE:
return ImportDataType::kDouble;
case MYSQL_TYPE_STRING:
return ImportDataType::kString;
case MYSQL_TYPE_VAR_STRING:
return ImportDataType::kString;
case MYSQL_TYPE_BLOB:
return ImportDataType::kString;
default:
return ImportDataType::kString;
}
}
std::string
GenerateFetchForeignKeyQuery(const std::string& table) {
return std::string{
"SELECT DISTINCT "
"TABLE_NAME, "
"COLUMN_NAME, "
"CONSTRAINT_NAME, "
"REFERENCED_TABLE_NAME, "
"REFERENCED_COLUMN_NAME "
"FROM "
"INFORMATION_SCHEMA.KEY_COLUMN_USAGE "
"WHERE "
"REFERENCED_TABLE_NAME IS NOT NULL AND "
"TABLE_NAME = '" +
table + "';"};
}
std::string
GenerateFetchRowQuery(const std::string& table) {
return std::string{"SELECT * FROM " + table + " LIMIT 1;"};
}
std::string
GenerateFetchTableQuery(const std::string& table) {
return std::string{"SELECT * FROM " + table + ";"};
}
std::vector<std::string>
FetchTableNames(MYSQL* con) {
std::vector<std::string> table_names;
MysqlRes tables{mysql_list_tables(con, NULL)};
auto num_fields = mysql_num_fields(tables.res);
MYSQL_ROW row;
while ((row = mysql_fetch_row(tables.res))) {
auto lengths = mysql_fetch_lengths(tables.res);
for (size_t i = 0; i < num_fields; i++) {
table_names.emplace_back(std::string(row[i], lengths[i]));
}
}
return table_names;
}
/*
std::vector<std::string> FetchFieldNames(MysqlRes* table) {
std::vector<std::string> field_names;
MYSQL_FIELD* field;
while ((field = mysql_fetch_field(table->res))) {
field_names.emplace_back(std::string(field->name, field->name_length));
}
return field_names;
}*/
MysqlRes
RunQuery(MYSQL* con, const std::string& query) {
if (mysql_real_query(con, query.c_str(), query.size())) {
KATANA_LOG_FATAL("Could not run query {}: {}", query, mysql_error(con));
}
return MysqlRes(mysql_use_result(con));
}
void
AddNodeTable(
katana::PropertyGraphBuilder* builder, MYSQL* con,
const TableData& table_data) {
MysqlRes table = RunQuery(con, GenerateFetchTableQuery(table_data.name));
MYSQL_ROW row;
while ((row = mysql_fetch_row(table.res))) {
auto lengths = mysql_fetch_lengths(table.res);
builder->StartNode();
builder->AddLabel(table_data.name);
// if table has a primary key, add it as node's ID
auto primary_index = table_data.primary_key_index;
if (primary_index >= 0) {
std::string primary_key{row[primary_index], lengths[primary_index]};
builder->AddNodeID(table_data.name + primary_key);
}
// add data fields
for (size_t i = 0; i < table_data.field_names.size(); i++) {
auto index = table_data.field_indexes[i];
// if the data is null then do not add it
if (row[index] != NULL) {
std::string value{row[index], lengths[index]};
builder->AddValue(
table_data.field_names[i],
[]() {
return PropertyKey{
"invalid", ImportDataType::kUnsupported, false};
},
[&value](ImportDataType type, bool is_list) {
return ResolveValue(value, type, is_list);
});
}
}
// if table has outgoing edges, add them
for (auto relation : table_data.out_references) {
auto foreign_index = relation.source_index;
// if the target is null then do not add an edge
if (row[foreign_index] != NULL) {
std::string foreign_key{row[foreign_index], lengths[foreign_index]};
std::string edge_id = relation.target_table + foreign_key;
builder->AddOutgoingEdge(edge_id, relation.label);
}
}
builder->FinishNode();
}
}
void
AddEdgeTable(
katana::PropertyGraphBuilder* builder, MYSQL* con,
const TableData& table_data) {
MysqlRes table = RunQuery(con, GenerateFetchTableQuery(table_data.name));
MYSQL_ROW row;
while ((row = mysql_fetch_row(table.res))) {
auto lengths = mysql_fetch_lengths(table.res);
builder->StartEdge();
builder->AddLabel(table_data.name);
bool adding_source = true;
// if the source or target is null then add a placeholder node
for (auto relation : table_data.out_references) {
auto foreign_index = relation.source_index;
std::string foreign_key{row[foreign_index], lengths[foreign_index]};
std::string edge_id = relation.target_table + foreign_key;
if (adding_source) {
builder->AddEdgeSource(edge_id);
adding_source = false;
} else {
builder->AddEdgeTarget(edge_id);
}
}
// add data fields
for (size_t i = 0; i < table_data.field_names.size(); i++) {
auto index = table_data.field_indexes[i];
// if the data is null then do not add it
if (row[index] != NULL) {
std::string value{row[index], lengths[index]};
builder->AddValue(
table_data.field_names[i],
[]() {
return PropertyKey{
"invalid", ImportDataType::kUnsupported, false};
},
[&value](ImportDataType type, bool is_list) {
return ResolveValue(value, type, is_list);
});
}
}
builder->FinishEdge();
}
}
/************************************/
/* Functions for getting user input */
/************************************/
bool
GetUserBool(const std::string& prompt) {
while (true) {
std::cout << prompt << " (y/n): ";
std::string res;
std::getline(std::cin, res);
if (res.empty()) {
std::cout << "Please enter yes or no\n";
} else if (res[0] == 'y' || res[0] == 'Y') {
return true;
} else if (res[0] == 'n' || res[0] == 'N') {
return false;
} else {
std::cout << "Please enter yes or no\n";
}
}
}
// TODO support multiple labels per collection
void
GetUserInputForLabels(
xmlTextWriterPtr writer, const std::map<std::string, TableData>& table_data,
bool for_node) {
for (auto [name, data] : table_data) {
if (for_node == data.is_node) {
std::cout << "Choose label for " << name << " (" << name << "): ";
std::string res;
std::getline(std::cin, res);
std::string existing_key;
if (res.empty()) {
LabelRule rule{name, for_node, !for_node, name};
katana::graphml::WriteGraphmlRule(writer, rule);
} else {
LabelRule rule{name, for_node, !for_node, res};
katana::graphml::WriteGraphmlRule(writer, rule);
}
}
}
}
// TODO support multiple labels per collection
void
GetUserInputForLabels(
xmlTextWriterPtr writer,
const std::map<std::string, LabelRule>& foreign_labels) {
for (auto [name, rule] : foreign_labels) {
std::cout << "Choose label for " << name << " (" << name << "): ";
std::string res;
std::getline(std::cin, res);
std::string existing_key;
if (res.empty()) {
katana::graphml::WriteGraphmlRule(writer, rule);
} else {
rule.label = res;
katana::graphml::WriteGraphmlRule(writer, rule);
}
}
}
void
GetUserInputForFields(
xmlTextWriterPtr writer, std::map<std::string, PropertyKey>* fields) {
std::cout << "Total Detected Fields: " << fields->size() << "\n";
for (auto& [name, key] : (*fields)) {
std::cout << "Choose property name for field " << name << " (" << name
<< "): ";
std::string res;
std::getline(std::cin, res);
if (!res.empty()) {
key.name = res;
}
bool done = false;
auto type_name = katana::graphml::TypeName(key.type);
while (!done) {
std::cout << "Choose type for field " << name << " (" << type_name;
if (key.is_list) {
std::cout << " array";
}
std::cout << "): ";
std::getline(std::cin, res);
if (!res.empty()) {
std::istringstream iss(res);
std::vector<std::string> tokens{
std::istream_iterator<std::string>{iss},
std::istream_iterator<std::string>{}};
if (tokens.size() <= 2) {
auto new_type = katana::graphml::ParseType(tokens[0]);
if (new_type != ImportDataType::kUnsupported) {
if (tokens.size() == 2) {
if (new_type == ImportDataType::kStruct) {
std::cout << "Arrays of structs are not supported\n";
} else if (
boost::to_lower_copy<std::string>(tokens[1]) == "array") {
key.type = new_type;
key.is_list = true;
done = true;
} else {
std::cout
<< "Second argument could not be recognized, to specify an "
"array use the format: \"double array\"\n";
}
} else {
key.type = new_type;
key.is_list = false;
done = true;
}
} else {
std::cout << "Inputted datatype could not be recognized, valid "
"datatypes:\n";
std::cout << "\"string\", \"string array\"\n";
std::cout << "\"int64\", \"int64 array\"\n";
std::cout << "\"int32\", \"int32 array\"\n";
std::cout << "\"double\", \"double array\"\n";
std::cout << "\"float\", \"float array\"\n";
std::cout << "\"bool\", \"bool array\"\n";
std::cout << "\"timestamp\", \"timestamp array\"\n";
std::cout << "\"struct\"\n";
}
} else {
std::cout << "Too many arguments\n";
}
} else {
done = true;
}
}
katana::graphml::WriteGraphmlKey(writer, key);
}
}
/***********************************************/
/* Functions for preprocessing MySQL databases */
/***********************************************/
void
ExhaustResultSet(MysqlRes* res) {
while (mysql_fetch_row(res->res))
;
}
bool
ContainsRelation(
const std::vector<LabelRule>& rules, const std::string& label) {
for (auto rule : rules) {
if (rule.for_edge && rule.id == label) {
return true;
}
}
return false;
}
bool
ContainsKey(
const std::vector<PropertyKey>& keys, const std::string& id,
bool for_node) {
for (auto key : keys) {
if (key.for_node == for_node && key.for_edge == !for_node && key.id == id) {
return true;
}
}
return false;
}
PropertyKey
ProcessField(MYSQL_FIELD* field) {
std::string id{field->name, field->name_length};
bool for_node = false;
bool for_edge = false;
std::string attr_name = id;
ImportDataType type = ExtractTypeMysql(field->type);
bool is_list = false;
return PropertyKey{
id, for_node, for_edge, attr_name, type, is_list,
};
}
template <typename T>
void
PreprocessForeignKeys(
MysqlRes* foreign_keys, T* table_data, const std::string& table_name) {
TableData data{table_name};
MYSQL_ROW row;
// each row consists of:
// Source Table, Source Column, Constraint Name, Target Table, Target Column
while ((row = mysql_fetch_row(foreign_keys->res))) {
auto lengths = mysql_fetch_lengths(foreign_keys->res);
std::string source_table{row[0], lengths[0]};
std::string source_field{row[1], lengths[1]};
std::string target_table{row[3], lengths[3]};
std::string target_field{row[4], lengths[4]};
data.ignore_list.insert(source_field);
Relationship relation{
std::move(source_table),
std::move(source_field),
std::move(target_table),
std::move(target_field),
};
data.out_references.emplace_back(std::move(relation));
}
table_data->insert(std::pair<std::string, TableData>(table_name, data));
}
template <typename T>
void
PreprocessForeignKeys(
MysqlRes* foreign_keys, T* table_data, const std::vector<LabelRule>& rules,
const std::string& table_name) {
TableData data{table_name};
data.is_node = !ContainsRelation(rules, table_name);
MYSQL_ROW row;
// each row consists of:
// Source Table, Source Column, Constraint Name, Target Table, Target Column
while ((row = mysql_fetch_row(foreign_keys->res))) {
auto lengths = mysql_fetch_lengths(foreign_keys->res);
std::string source_table{row[0], lengths[0]};
std::string source_field{row[1], lengths[1]};
std::string target_table{row[3], lengths[3]};
std::string target_field{row[4], lengths[4]};
data.ignore_list.insert(source_field);
Relationship relation{
std::move(source_table),
std::move(source_field),
std::move(target_table),
std::move(target_field),
};
if (!data.is_node || ContainsRelation(rules, relation.label)) {
data.out_references.emplace_back(std::move(relation));
}
}
table_data->insert(std::pair<std::string, TableData>(table_name, data));
}
template <typename T>
void
FillForeignKeyRelations(T* table_data) {
for (auto& iter : (*table_data)) {
for (auto relation : iter.second.out_references) {
auto& dest = table_data->find(relation.target_table)->second;
dest.in_references.emplace_back(relation);
}
}
}
template <typename T>
void
SetEdges(T* table_data) {
for (auto& iter : (*table_data)) {
if (iter.second.IsValidEdge()) {
iter.second.is_node = !GetUserBool("Treat " + iter.first + " as an edge");
}
}
}
template <typename T>
void
PreprocessFields(
MysqlRes* table_row, T* table_data,
std::map<std::string, PropertyKey>* property_fields,
const std::string& table_name) {
auto table_iter = table_data->find(table_name);
MYSQL_FIELD* field;
size_t index = 0;
while ((field = mysql_fetch_field(table_row->res))) {
auto key = ProcessField(field);
// if this field is a primary key, do not add it for now
if (IS_PRI_KEY(field->flags)) {
table_iter->second.primary_key_index = static_cast<int64_t>(index);
} else if (
table_iter->second.ignore_list.find(key.id) ==
table_iter->second.ignore_list.end()) {
// if this field will be added to the database
key.for_node = table_iter->second.is_node;
key.for_edge = !table_iter->second.is_node;
property_fields->insert(std::pair<std::string, PropertyKey>(key.id, key));
table_iter->second.field_names.emplace_back(key.id);
table_iter->second.field_indexes.emplace_back(index);
} else {
// if this field is a foreign key, resolve its local field indexes
table_iter->second.ResolveOutgoingKeys(key.id, index);
}
index++;
}
}
template <typename T>
void
PreprocessFields(
MysqlRes* table_row, T* table_data, const std::vector<PropertyKey>& keys,
const std::string& table_name) {
auto table_iter = table_data->find(table_name);
MYSQL_FIELD* field;
size_t index = 0;
while ((field = mysql_fetch_field(table_row->res))) {
auto key = ProcessField(field);
// if this field is a primary key, do not add it for now
if (IS_PRI_KEY(field->flags)) {
table_iter->second.primary_key_index = static_cast<int64_t>(index);
} else if (
table_iter->second.ignore_list.find(key.id) !=
table_iter->second.ignore_list.end()) {
// if this field is a foreign key, resolve its local field indexes
table_iter->second.ResolveOutgoingKeys(key.id, index);
}
// if this field will be added to the database
if (ContainsKey(keys, key.id, table_iter->second.is_node)) {
table_iter->second.field_names.emplace_back(key.id);
table_iter->second.field_indexes.emplace_back(index);
}
index++;
}
}
std::unordered_map<std::string, TableData>
PreprocessTables(
MYSQL* con, katana::PropertyGraphBuilder* builder,
const std::vector<std::string>& table_names) {
std::unordered_map<std::string, TableData> table_data;
std::map<std::string, PropertyKey> node_fields;
std::map<std::string, PropertyKey> edge_fields;
// first process tables for primary and foreign keys
for (auto table_name : table_names) {
MysqlRes foreign_keys =
RunQuery(con, GenerateFetchForeignKeyQuery(table_name));
PreprocessForeignKeys(&foreign_keys, &table_data, table_name);
}
FillForeignKeyRelations(&table_data);
SetEdges(&table_data);
for (auto table_name : table_names) {
MysqlRes table_row = RunQuery(con, GenerateFetchRowQuery(table_name));
if (table_data.find(table_name)->second.is_node) {
PreprocessFields(&table_row, &table_data, &node_fields, table_name);
} else {
PreprocessFields(&table_row, &table_data, &edge_fields, table_name);
}
ExhaustResultSet(&table_row);
}
for (auto [name, data] : table_data) {
LabelRule rule{name, data.is_node, !data.is_node, name};
builder->AddLabelBuilder(rule);
}
for (auto iter : node_fields) {
builder->AddBuilder(iter.second);
}
for (auto iter : edge_fields) {
builder->AddBuilder(iter.second);
}
return table_data;
}
std::unordered_map<std::string, TableData>
PreprocessTables(
MYSQL* con, katana::PropertyGraphBuilder* builder,
const std::vector<std::string>& table_names,
const std::vector<LabelRule>& rules, const std::vector<PropertyKey>& keys) {
std::unordered_map<std::string, TableData> table_data;
// first process tables for primary and foreign keys
for (auto table_name : table_names) {
MysqlRes foreign_keys =
RunQuery(con, GenerateFetchForeignKeyQuery(table_name));
PreprocessForeignKeys(&foreign_keys, &table_data, rules, table_name);
}
FillForeignKeyRelations(&table_data);
for (auto table_name : table_names) {
MysqlRes table_row = RunQuery(con, GenerateFetchRowQuery(table_name));
if (table_data.find(table_name)->second.is_node) {
PreprocessFields(&table_row, &table_data, keys, table_name);
} else {
PreprocessFields(&table_row, &table_data, keys, table_name);
}
ExhaustResultSet(&table_row);
}
for (auto rule : rules) {
builder->AddLabelBuilder(rule);
}
for (auto key : keys) {
builder->AddBuilder(key);
}
return table_data;
}
void
GetMappingInput(
MYSQL* con, const std::vector<std::string>& table_names,
const std::string& outfile) {
std::map<std::string, TableData> table_data;
std::map<std::string, PropertyKey> node_fields;
std::map<std::string, PropertyKey> edge_fields;
std::map<std::string, LabelRule> foreign_rules;
std::vector<PropertyKey> keys;
std::vector<LabelRule> rules;
size_t nodes = 0;
size_t edges = 0;
// first process tables for primary and foreign keys
for (auto table_name : table_names) {
MysqlRes foreign_keys =
RunQuery(con, GenerateFetchForeignKeyQuery(table_name));
PreprocessForeignKeys(&foreign_keys, &table_data, table_name);
}
FillForeignKeyRelations(&table_data);
SetEdges(&table_data);
for (auto table_name : table_names) {
MysqlRes table_row = RunQuery(con, GenerateFetchRowQuery(table_name));
if (table_data.find(table_name)->second.is_node) {
PreprocessFields(&table_row, &table_data, &node_fields, table_name);
} else {
PreprocessFields(&table_row, &table_data, &edge_fields, table_name);
}
ExhaustResultSet(&table_row);
}
for (auto iter : node_fields) {
keys.emplace_back(iter.second);
}
for (auto iter : edge_fields) {
keys.emplace_back(iter.second);
}
// add tables that are nodes
for (auto [name, data] : table_data) {
if (data.is_node) {
nodes++;
rules.emplace_back(name, data.is_node, !data.is_node, name);
// find foreign key edges
for (auto relation : data.out_references) {
LabelRule rule{relation.label, false, true, relation.label};
foreign_rules.insert(
std::pair<std::string, LabelRule>(relation.label, rule));
}
}
}
// add tables that are edges
for (auto [name, data] : table_data) {
if (!data.is_node) {
edges++;
rules.emplace_back(name, data.is_node, !data.is_node, name);
}
}
// add edges that are foreign keys
for (auto iter : foreign_rules) {
rules.emplace_back(iter.second);
}
if (GetUserBool("Generate default mapping now")) {
katana::graphml::ExportSchemaMapping(outfile, rules, keys);
return;
}
auto writer = katana::graphml::CreateGraphmlFile(outfile);
// finalize labels for nodes and edges mappings
std::cout << "Nodes: " << nodes << "\n";
GetUserInputForLabels(writer, table_data, true);
std::cout << "Edges: " << edges << "\n";
GetUserInputForLabels(writer, table_data, false);
std::cout << "Edges: " << foreign_rules.size() << "\n";
GetUserInputForLabels(writer, foreign_rules);
// finalize field names and types
std::cout << "Node Fields:\n";
GetUserInputForFields(writer, &node_fields);
std::cout << "Edge Fields:\n";
GetUserInputForFields(writer, &edge_fields);
xmlTextWriterStartElement(writer, BAD_CAST "graph");
xmlTextWriterEndElement(writer);
katana::graphml::FinishGraphmlFile(writer);
}
} // end of unnamed namespace
GraphComponents
katana::ConvertMysql(
const std::string& db_name, const std::string& mapping,
const size_t chunk_size, const std::string& host, const std::string& user) {
katana::PropertyGraphBuilder builder{chunk_size};
std::string password{getpass("MySQL Password: ")};
MYSQL* con = mysql_init(NULL);
if (con == nullptr) {
KATANA_LOG_FATAL("mysql_init() failed");
}
if (mysql_real_connect(
con, host.c_str(), user.c_str(), password.c_str(), db_name.c_str(), 0,
NULL, 0) == NULL) {
KATANA_LOG_FATAL(
"Could not establish mysql connection: {}", mysql_error(con));
}
std::vector<std::string> table_names = FetchTableNames(con);
std::unordered_map<std::string, TableData> table_data;
if (!mapping.empty()) {
auto res = katana::graphml::ProcessSchemaMapping(mapping);
std::vector<LabelRule> rules = res.first;
std::vector<PropertyKey> keys = res.second;
table_data = PreprocessTables(con, &builder, table_names, rules, keys);
} else {
table_data = PreprocessTables(con, &builder, table_names);
}
for (auto table : table_data) {
if (table.second.is_node) {
AddNodeTable(&builder, con, table.second);
} else {
AddEdgeTable(&builder, con, table.second);
}
}
mysql_close(con);
auto out_result = builder.Finish();
if (!out_result) {
KATANA_LOG_FATAL("Failed to construct graph: {}", out_result.error());
}
katana::GraphComponents out = std::move(out_result.value());
out.Dump();
return out;
}
void
katana::GenerateMappingMysql(
const std::string& db_name, const std::string& outfile,
const std::string& host, const std::string& user) {
std::string password{getpass("MySQL Password: ")};
MYSQL* con = mysql_init(NULL);
if (con == nullptr) {
KATANA_LOG_FATAL("mysql_init() failed");
}
if (mysql_real_connect(
con, host.c_str(), user.c_str(), password.c_str(), db_name.c_str(), 0,
NULL, 0) == NULL) {
KATANA_LOG_FATAL(
"Could not establish mysql connection: {}", mysql_error(con));
}
std::vector<std::string> table_names = FetchTableNames(con);
// get user input on node/edge mappings, label names, property names and
// values
GetMappingInput(con, std::move(table_names), outfile);
mysql_close(con);
}
|
{"hexsha": "4b20d2865f7e20280b470e6b79e623a21d0bc2f9", "size": 27984, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tools/graph-convert/graph-properties-convert-mysql.cpp", "max_stars_repo_name": "chakpongchung/katana", "max_stars_repo_head_hexsha": "3278a39b504e0aeaec30d06cf629ab97dfeb3f22", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 64.0, "max_stars_repo_stars_event_min_datetime": "2020-05-22T23:32:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T10:42:45.000Z", "max_issues_repo_path": "tools/graph-convert/graph-properties-convert-mysql.cpp", "max_issues_repo_name": "chakpongchung/katana", "max_issues_repo_head_hexsha": "3278a39b504e0aeaec30d06cf629ab97dfeb3f22", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 705.0, "max_issues_repo_issues_event_min_datetime": "2020-02-17T20:50:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:28:09.000Z", "max_forks_repo_path": "tools/graph-convert/graph-properties-convert-mysql.cpp", "max_forks_repo_name": "chakpongchung/katana", "max_forks_repo_head_hexsha": "3278a39b504e0aeaec30d06cf629ab97dfeb3f22", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 93.0, "max_forks_repo_forks_event_min_datetime": "2020-03-18T17:34:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T02:11:09.000Z", "avg_line_length": 30.1551724138, "max_line_length": 80, "alphanum_fraction": 0.6456903945, "num_tokens": 7059}
|
import numpy as np
import tensorflow as tf
data = np.load("./cifar-10-test-data.npz")
labels = data.f.labels
data = data.f.data
w = tf.io.TFRecordWriter("./cifar-10-test-data.tfrecords")
for i in range(10000):
example = tf.train.Example(
features=tf.train.Features(
feature={
"data": tf.train.Feature(bytes_list=tf.train.BytesList(value=[data[i].tobytes()])),
"label": tf.train.Feature(int64_list=tf.train.Int64List(value=[labels[i]])),
}
)
)
w.write(example.SerializeToString())
w.close()
def map_func(example):
feature_map = {
'data': tf.FixedLenFeature((), tf.string),
'label': tf.FixedLenFeature((), tf.int64)
}
parsed_example = tf.parse_single_example(example, features=feature_map)
data = tf.decode_raw(parsed_example["data"], out_type=tf.uint8)
data = tf.reshape(data, [32, 32, 3])
label = parsed_example["label"]
return data, label
|
{"hexsha": "14aebe27c20c8b388b9334f6049c3c0318a25994", "size": 976, "ext": "py", "lang": "Python", "max_stars_repo_path": "elastic_demos/record.py", "max_stars_repo_name": "AlanFokCo/compensation-tools", "max_stars_repo_head_hexsha": "e3fbf2f583ff370d32ffa0e2b6a0c57c20ca9eb0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "elastic_demos/record.py", "max_issues_repo_name": "AlanFokCo/compensation-tools", "max_issues_repo_head_hexsha": "e3fbf2f583ff370d32ffa0e2b6a0c57c20ca9eb0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "elastic_demos/record.py", "max_forks_repo_name": "AlanFokCo/compensation-tools", "max_forks_repo_head_hexsha": "e3fbf2f583ff370d32ffa0e2b6a0c57c20ca9eb0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1111111111, "max_line_length": 99, "alphanum_fraction": 0.631147541, "include": true, "reason": "import numpy", "num_tokens": 240}
|
SUBROUTINE MD03BB( COND, N, IPAR, LIPAR, R, LDR, IPVT, DIAG, QTB,
$ DELTA, PAR, RANKS, X, RX, TOL, DWORK, LDWORK,
$ INFO )
C
C SLICOT RELEASE 5.7.
C
C Copyright (c) 2002-2020 NICONET e.V.
C
C PURPOSE
C
C To determine a value for the parameter PAR such that if x solves
C the system
C
C A*x = b , sqrt(PAR)*D*x = 0 ,
C
C in the least squares sense, where A is an m-by-n matrix, D is an
C n-by-n nonsingular diagonal matrix, and b is an m-vector, and if
C DELTA is a positive number, DXNORM is the Euclidean norm of D*x,
C then either PAR is zero and
C
C ( DXNORM - DELTA ) .LE. 0.1*DELTA ,
C
C or PAR is positive and
C
C ABS( DXNORM - DELTA ) .LE. 0.1*DELTA .
C
C It is assumed that a QR factorization, with column pivoting, of A
C is available, that is, A*P = Q*R, where P is a permutation matrix,
C Q has orthogonal columns, and R is an upper triangular matrix
C with diagonal elements of nonincreasing magnitude.
C The routine needs the full upper triangle of R, the permutation
C matrix P, and the first n components of Q'*b (' denotes the
C transpose). On output, MD03BB also provides an upper triangular
C matrix S such that
C
C P'*(A'*A + PAR*D*D)*P = S'*S .
C
C Matrix S is used in the solution process.
C
C This routine is an interface to SLICOT Library routine MD03BY,
C for solving standard nonlinear least squares problems using SLICOT
C routine MD03BD.
C
C ARGUMENTS
C
C Mode Parameters
C
C COND CHARACTER*1
C Specifies whether the condition of the matrices R and S
C should be estimated, as follows:
C = 'E' : use incremental condition estimation for R and S;
C = 'N' : do not use condition estimation, but check the
C diagonal entries of R and S for zero values;
C = 'U' : use the rank already stored in RANKS (for R).
C
C Input/Output Parameters
C
C N (input) INTEGER
C The order of the matrix R. N >= 0.
C
C IPAR (input) INTEGER array, dimension (LIPAR)
C The integer parameters describing the structure of the
C matrix R. IPAR and LIPAR are not used by this routine,
C but are provided for compatibility with SLICOT Library
C routine MD03BD.
C
C LIPAR (input) INTEGER
C The length of the array IPAR. LIPAR >= 0.
C
C R (input/output) DOUBLE PRECISION array, dimension (LDR, N)
C On entry, the leading N-by-N upper triangular part of this
C array must contain the upper triangular matrix R.
C On exit, the full upper triangle is unaltered, and the
C strict lower triangle contains the strict upper triangle
C (transposed) of the upper triangular matrix S.
C
C LDR INTEGER
C The leading dimension of array R. LDR >= MAX(1,N).
C
C IPVT (input) INTEGER array, dimension (N)
C This array must define the permutation matrix P such that
C A*P = Q*R. Column j of P is column IPVT(j) of the identity
C matrix.
C
C DIAG (input) DOUBLE PRECISION array, dimension (N)
C This array must contain the diagonal elements of the
C matrix D. DIAG(I) <> 0, I = 1,...,N.
C
C QTB (input) DOUBLE PRECISION array, dimension (N)
C This array must contain the first n elements of the
C vector Q'*b.
C
C DELTA (input) DOUBLE PRECISION
C An upper bound on the Euclidean norm of D*x. DELTA > 0.
C
C PAR (input/output) DOUBLE PRECISION
C On entry, PAR must contain an initial estimate of the
C Levenberg-Marquardt parameter. PAR >= 0.
C On exit, it contains the final estimate of this parameter.
C
C RANKS (input or output) INTEGER array, dimension (1)
C On entry, if COND = 'U' and N > 0, this array must contain
C the numerical rank of the matrix R.
C On exit, this array contains the numerical rank of the
C matrix S.
C RANKS is defined as an array for compatibility with SLICOT
C Library routine MD03BD.
C
C X (output) DOUBLE PRECISION array, dimension (N)
C This array contains the least squares solution of the
C system A*x = b, sqrt(PAR)*D*x = 0.
C
C RX (output) DOUBLE PRECISION array, dimension (N)
C This array contains the matrix-vector product -R*P'*x.
C
C Tolerances
C
C TOL DOUBLE PRECISION
C If COND = 'E', the tolerance to be used for finding the
C rank of the matrices R and S. If the user sets TOL > 0,
C then the given value of TOL is used as a lower bound for
C the reciprocal condition number; a (sub)matrix whose
C estimated condition number is less than 1/TOL is
C considered to be of full rank. If the user sets TOL <= 0,
C then an implicitly computed, default tolerance, defined by
C TOLDEF = N*EPS, is used instead, where EPS is the machine
C precision (see LAPACK Library routine DLAMCH).
C This parameter is not relevant if COND = 'U' or 'N'.
C
C Workspace
C
C DWORK DOUBLE PRECISION array, dimension (LDWORK)
C On exit, the first N elements of this array contain the
C diagonal elements of the upper triangular matrix S.
C
C LDWORK INTEGER
C The length of the array DWORK.
C LDWORK >= 4*N, if COND = 'E';
C LDWORK >= 2*N, if COND <> 'E'.
C
C Error Indicator
C
C INFO INTEGER
C = 0: successful exit;
C < 0: if INFO = -i, the i-th argument had an illegal
C value.
C
C METHOD
C
C This routine calls SLICOT Library routine MD03BY to perform the
C calculations.
C
C FURTHER COMMENTS
C
C For efficiency, the arguments are not checked. This is done in
C the routine MD03BY (except for LIPAR).
C
C CONTRIBUTORS
C
C V. Sima, Research Institute for Informatics, Bucharest, Dec. 2001.
C
C REVISIONS
C
C -
C
C KEYWORDS
C
C Linear system of equations, matrix operations, plane rotations.
C
C ******************************************************************
C
C .. Scalar Arguments ..
CHARACTER COND
INTEGER INFO, LDR, LDWORK, LIPAR, N
DOUBLE PRECISION DELTA, PAR, TOL
C .. Array Arguments ..
INTEGER IPAR(*), IPVT(*), RANKS(*)
DOUBLE PRECISION DIAG(*), DWORK(*), QTB(*), R(LDR,*), RX(*), X(*)
C .. External Subroutines ..
EXTERNAL MD03BY
C ..
C .. Executable Statements ..
C
CALL MD03BY( COND, N, R, LDR, IPVT, DIAG, QTB, DELTA, PAR,
$ RANKS(1), X, RX, TOL, DWORK, LDWORK, INFO )
RETURN
C
C *** Last line of MD03BB ***
END
|
{"hexsha": "108a3e49ec186eb86361048bfb31578b0de0e246", "size": 7054, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/MD03BB.f", "max_stars_repo_name": "bnavigator/SLICOT-Reference", "max_stars_repo_head_hexsha": "7b96b6470ee0eaf75519a612d15d5e3e2857407d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2020-11-10T23:47:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T04:34:43.000Z", "max_issues_repo_path": "src/MD03BB.f", "max_issues_repo_name": "RJHKnight/slicotr", "max_issues_repo_head_hexsha": "a7332d459aa0867d3bc51f2a5dd70bd75ab67ec0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-02-07T22:26:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T11:01:07.000Z", "max_forks_repo_path": "src/MD03BB.f", "max_forks_repo_name": "RJHKnight/slicotr", "max_forks_repo_head_hexsha": "a7332d459aa0867d3bc51f2a5dd70bd75ab67ec0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-11-26T11:06:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T00:37:21.000Z", "avg_line_length": 37.1263157895, "max_line_length": 72, "alphanum_fraction": 0.599801531, "num_tokens": 1932}
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
from gen_random import random_gaussian
import numpy as np
from akg.utils import kernel_exec as utils
from test_op import vector_matmul
logging.basicConfig(level=logging.DEBUG)
def np_matmul(matrix_a, matrix_b, trans_a=False, trans_b=False):
if trans_a:
matrix_a = matrix_a.transpose(1, 0)
if trans_b:
matrix_b = matrix_b.transpose(1, 0)
m, k_a = matrix_a.shape
k_b, n = matrix_b.shape
if k_a != k_b:
raise RuntimeError("matrix_a: %d %d vs matrix_b: %d %d" % (m, k_a, k_b, n))
result = np.dot(matrix_a, matrix_b)
return result
def gen_data(m, n, k, trans_a, trans_b, dtype):
shape_x, shape_y = vector_matmul.get_shape(m, n, k, trans_a, trans_b)
matrix_a = random_gaussian(shape_x, miu=0.5, sigma=0.01).astype(dtype)
# matrix_b = random_gaussian(shape_y, miu=0.5, sigma=0.01).astype(dtype)
# matrix_a = np.ones(shape_x, dtype=dtype)
matrix_b = np.ones(shape_y, dtype=dtype)
res = np_matmul(matrix_a, matrix_b, trans_a, trans_b)
return matrix_a, matrix_b, res
def get_name(caseIndex=1, name="leftMatrix", m=0, n=0, k=0, trans_a=False, trans_b=False):
res = "{}_{}_{}_{}_{}_{}_{}.bin".format(caseIndex, name, m, n, k, trans_a, trans_b)
return res
def read_from_file(case_index, m, n, k, trans_a, trans_b, dtype):
cur_path = os.path.abspath('.')
benchmark_path, tmp = cur_path.split("ci_test")
benchmark_path += "ci_test/AT-Benchmark/poly_benchmark/vector_matmul_benchmark/"
# print benchmark_path
left_matrix_name = get_name(case_index, "leftMatrix", m, n, k, trans_a, trans_b)
right_matrix_name = get_name(case_index, "rightMatrix", m, n, k, trans_a, trans_b)
result_name = get_name(case_index, "result", m, n, k, trans_a, trans_b)
m_a_shape, m_b_shape = vector_matmul.get_shape(m, n, k, trans_a, trans_b)
m_a = np.fromfile(benchmark_path + left_matrix_name, dtype=dtype).reshape(m_a_shape)
m_b = np.fromfile(benchmark_path + right_matrix_name, dtype=dtype).reshape(m_b_shape)
res_shape = (m, n)
res = np.fromfile(benchmark_path + result_name, dtype=dtype).reshape(res_shape)
return m_a, m_b, res
def vector_matmul_data(case_index, m, n, k, trans_a, trans_b, read_data, dump_data, dtype, debug_logging=False):
m_a = ()
m_b = ()
bench_mark = ()
if read_data:
logging.debug("read from file!")
m_a, m_b, bench_mark = read_from_file(case_index, m, n, k, trans_a, trans_b, dtype)
else:
m_a, m_b, bench_mark = gen_data(m, n, k, trans_a, trans_b, dtype)
if dump_data:
left_matrix_name = get_name(case_index, "leftMatrix", m, n, k, trans_a, trans_b)
right_matrix_name = get_name(case_index, "rightMatrix", m, n, k, trans_a, trans_b)
result_name = get_name(case_index, "result", m, n, k, trans_a, trans_b)
m_a.tofile(left_matrix_name)
m_b.tofile(right_matrix_name)
bench_mark.tofile(result_name)
if debug_logging:
logging.debug("m_a shape:{}".format(m_a.shape))
logging.debug("m_b shape:{}".format(m_b.shape))
logging.debug(type(m_a))
return m_a, m_b, bench_mark
def result_compare(actual, bench_mark, batch_tuple, M, N, K, r_tol=5e-3):
output_shape = (M, N)
error = 0
count = 0
lastErr = -2
continueErr = 0
maxContinue = -1
maxEnd = 0
logging.debug(actual.shape)
logging.debug(bench_mark.shape)
for m in range(output_shape[0]):
for n in range(output_shape[1]):
a = actual[m, n]
b = bench_mark[m, n]
if(abs(a - b) > abs(b) * r_tol):
error += 1
if lastErr + 1 == count:
continueErr += 1
else:
if continueErr > maxContinue:
maxContinue = continueErr
maxEnd = lastErr
continueErr = 1
lastErr = count
# if a != 0.0:
logging.debug("count: %6d expect: %20f actual: %20f %20.2f%%" % (count, b, a, abs(b - a) / b * 100))
count += 1
if continueErr > maxContinue:
maxContinue = continueErr
maxEnd = lastErr
logging.debug("error num: %d/%d (%.2f%%)" % (error, count, 100.0 * error / count))
logging.debug("longest error range: [%d, %d]" % (maxEnd - maxContinue + 1, maxEnd))
if maxContinue >= 16:
return False
logging.debug("\n\n******************** test ok *****************\n\n")
return True
def vector_matmul_run(case_index, m, n, k, trans_a, trans_b, read_data, dump_data, dtype, kernel_name, attrs):
batch_tuple = (1, )
# m = (m+15)//16*16
# n = (n+15)//16*16
# k = (k+15)//16*16
mod, out_shape = vector_matmul.vector_matmul(m, n, k, trans_a, trans_b, dtype, kernel_name, attrs)
utils.create_code(kernel_name, "./", mod.imported_modules[0].get_source())
# Generate data
m_a, m_b, bench_mark = vector_matmul_data(case_index, m, n, k, trans_a, trans_b, read_data, dump_data, dtype)
# mod launch
output = np.full(out_shape, np.nan, dtype=dtype)
output = utils.mod_launch(mod, (m_a, m_b, output), expect=batch_tuple)
# compare result
compare_result = result_compare(output, bench_mark, batch_tuple, m, n, k, r_tol=1e-2)
return (m_a, m_b), output, bench_mark, compare_result
|
{"hexsha": "8a504a829ffc05c07637092fabef53b45b84adfc", "size": 5968, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/common/test_run/vector_matmul_run.py", "max_stars_repo_name": "laekov/akg", "max_stars_repo_head_hexsha": "5316b8cb2340bbf71bdc724dc9d81513a67b3104", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-31T02:43:43.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-31T02:43:43.000Z", "max_issues_repo_path": "tests/common/test_run/vector_matmul_run.py", "max_issues_repo_name": "laekov/akg", "max_issues_repo_head_hexsha": "5316b8cb2340bbf71bdc724dc9d81513a67b3104", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/common/test_run/vector_matmul_run.py", "max_forks_repo_name": "laekov/akg", "max_forks_repo_head_hexsha": "5316b8cb2340bbf71bdc724dc9d81513a67b3104", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3, "max_line_length": 116, "alphanum_fraction": 0.6456099196, "include": true, "reason": "import numpy", "num_tokens": 1693}
|
import cv2
import numpy as np
import alglib.colour_space as colour
def hsv_mask(frame, lower=np.array([2, 35, 128], np.uint8), upper=np.array([30, 124, 255], np.uint8)):
colour_space = colour.hsv(frame)
mask = cv2.inRange(colour_space, lower, upper)
return mask
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def contour_hsv(frame, lower=np.array([2, 35, 128], np.uint8), upper=np.array([30, 124, 255], np.uint8)):
mask = hsv_mask(frame, lower, upper)
return cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
def contour_canny(frame, delta=0.33):
canny = auto_canny(frame, delta)
return cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)[-2]
def blob_detect(frame):
# Set up the detector with default parameters.
detector = cv2.SimpleBlobDetector_create()
return detector.detect(frame)
def filter_contours(contours, area=50):
rtn_contours = []
for cnt in contours:
if area < cv2.contourArea(cnt):
rtn_contours.append(cnt)
return rtn_contours
def hand_contours():
return
hsv_map = np.zeros((180, 256, 3), np.uint8)
h, s = np.indices(hsv_map.shape[:2])
hsv_map[:, :, 0] = h
hsv_map[:, :, 1] = s
hsv_map[:, :, 2] = 255
hsv_map = cv2.cvtColor(hsv_map, cv2.COLOR_HSV2BGR)
hist_scale = 10
def hsv_histogram(hsv):
#hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
dark = hsv[..., 2] < 32
hsv[dark] = 0
h = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
h = np.clip(h * 0.005 * hist_scale, 0, 1)
return hsv_map * h[:, :, np.newaxis] / 255.0
|
{"hexsha": "8eaa6068f254d6d582959722e45ae67bbe9398d1", "size": 1941, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/alglib/processing.py", "max_stars_repo_name": "zeryter-xyz/OpenHandTrack", "max_stars_repo_head_hexsha": "c619bd87c48fc8c64fa8855394369520f7931f7f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-10-15T04:04:19.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-15T04:04:19.000Z", "max_issues_repo_path": "src/alglib/processing.py", "max_issues_repo_name": "zeryter-xyz/OpenHandTrack", "max_issues_repo_head_hexsha": "c619bd87c48fc8c64fa8855394369520f7931f7f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/alglib/processing.py", "max_forks_repo_name": "zeryter-xyz/OpenHandTrack", "max_forks_repo_head_hexsha": "c619bd87c48fc8c64fa8855394369520f7931f7f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.5697674419, "max_line_length": 105, "alphanum_fraction": 0.6543019062, "include": true, "reason": "import numpy", "num_tokens": 608}
|
[STATEMENT]
lemma aadd_two_negg[simp]:"\<lbrakk>a < (0::ant); b < 0\<rbrakk> \<Longrightarrow> a + b < 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>a < 0; b < 0\<rbrakk> \<Longrightarrow> a + b < 0
[PROOF STEP]
by auto
|
{"llama_tokens": 111, "file": "Valuation_Valuation1", "length": 1}
|
# coding: utf-8
# # Fomalhaut A's vertical structure
# Multiple pointings... See splits.py for splits, statwt, and uv table creation
# In[1]:
import os
import numpy as np
import emcee
import scipy.optimize
import scipy.signal
import matplotlib.pyplot as plt
import corner
import pymultinest as pmn
import galario.double as gd
from galario import arcsec
import alma.image
#get_ipython().run_line_magic('load_ext', 'autoreload')
#get_ipython().run_line_magic('autoreload', '2')
#get_ipython().run_line_magic('matplotlib', 'notebook')
# In[2]:
# this may be needed to avoid emcee hanging when using multiple threads
gd.threads(num=1)
# In[3]:
# import the data, this assumes we're getting the output from uvplot
uvdata = []
all_weights = np.array([])
wavelength = []
fw = []
nfield = 7
nspw = 4
for i in range(nfield):
for j in range(nspw):
f = 'uv-field{}-spw{}.txt'.format(i,j)
u, v, Re, Im, w = np.require( np.loadtxt(f, unpack=True),requirements=["C_CONTIGUOUS"])
# meaning we can get the mean wavelength like so
with open(f) as tmp:
_ = tmp.readline()
tmp = tmp.readline()
wavelength_tmp = float(tmp.strip().split('=')[1])
u /= wavelength_tmp
v /= wavelength_tmp
wavelength.append(wavelength_tmp)
# estimate re-weighting factor (so that chi^2 for null model would be 1, and d.o.f = 2*len(w))
# weights would need to be multiplied by this number
fw.append( 2*len(w) / np.sum( (Re**2.0 + Im**2.0) * w ) )
# print('{}, {} rows, \twave {:9.7f}mm,'
# '\treweight factor {:g}'.format(os.path.basename(f),len(w),
# wavelength_tmp*1e3,
# fw[-1]))
uvdata.append( (u, v, Re, Im, w) )
all_weights = np.append(all_weights, w)
# In[4]:
# set image properties, take greatest resolution needed
nxy = 0
dxy = 1
for vis in uvdata:
u, v, _, _, _ = vis
nxy_tmp, dxy_tmp = gd.get_image_size(u, v, verbose=False)
if nxy_tmp > nxy and dxy_tmp < dxy:
nxy = nxy_tmp
dxy = dxy_tmp
dxy_arcsec = dxy / arcsec
#print('Final nxy:{}, dxy:{}, dxy arcsec:{}'.format(nxy, dxy, dxy_arcsec))
# In[5]:
# decide what density model we want to use
model_name = 'peri_glow'
# In[6]:
# make the image object, one will be used for all fields
# use an empirical pb from CASA, since it may matter here
ii = alma.image.Image(arcsec_pix=dxy_arcsec, image_size=(nxy, nxy), model='los_image',
dens_model=model_name, z_fact=1, wavelength=wavelength[0],
star=True, pb_fits='tmp.pb.fits')
# In[7]:
# drop-in function for ii.image, with fixed image parameters
# star_fwhm is small since images are centered on 0,0
ii.image = lambda p: alma.image.eccentric_ring_image(p, nxy, dxy_arcsec, n=10000000, star_fwhm=0.1, da_gauss=False)
# In[8]:
# create offset primary beam
def get_pb(ii, x0, y0):
return ii.primary_beam_image(x0=x0, y0=y0)
# In[9]:
# add weight factor to parameter list
ii.params += ['$f_{w}$']
ii.p_ranges += [[0,10]]
ii.n_params += 1
# In[10]:
# need finite ranges for multinest, modify for problem at hand
ii.p_ranges[0] = [-0.1,0.1]
ii.p_ranges[1] = [-0.1,0.1]
ii.p_ranges[2] = [140,170]
ii.p_ranges[3] = [-20,60]
ii.p_ranges[4] = [50,80]
ii.p_ranges[5] = [0.01,0.1]
ii.p_ranges[6] = [10,25]
ii.p_ranges[7] = [0,3]
#ii.p_ranges[9] = [0,0.005] #
#ii.p_ranges[11] = [0,0.005] # for flat MacGregor model
#ii.p_ranges[12] = [0,0.005] #
ii.p_ranges[13] = [0,0.0015]
# In[11]:
# offsets in x, y (i.e. -ra, dec), delta RA are (a-b)*15*cos(dec)
# we assume the pointing is good and that these are fixed
# (but can all move relative to some center together)
off = [0.0, 0.0, # field 0
-8.898409, -19.98620, # field 1 relative to 0
+8.898396, +19.98620, # 2 relative etc.
+10.22100, +7.42670, # 3
-1.32100, +12.56580, # 4
+1.32100, -10.93879, # 5
-10.22100, -7.42670] # 6
# pericentre glow model
p0 = [-0.06542785156837794,0.048660868203270105,156.37496611018472,40.74382055684831,66.64319590251502,
0.026595648373342343,18.173138876150325,1.5,0.1238431709670846,0.0025,
0.06,0.0025,0.0025,0.000673553452336449, 1.1]
p0 = np.array(p0)
#print('parameters and ranges for {}'.format(model_name))
#for i in range(ii.n_params):
# print('{}\t{}\t{}\t{}'.format(i,p0[i],ii.p_ranges[i],ii.params[i]))
# In[12]:
# set size of cutout used to generate images, which is based on the
# initial parameters. The tolerance in compute_rmax might be
# varied if the crop size turns out too large. We set 'zero_node'
# to True because we'll generate unrotated images, and let galario
# do the rest
# ii.compute_rmax(np.append([0, 0], p0[14:]), tol=1e-2, expand=10, zero_node=False)
# this gives an idea of how long an mcmc might take
# %timeit ii.image(p0)
# show an image and the primary beam
#im = ii.image(p0)
#fig,ax = plt.subplots(1,3, figsize=(9.5,5), sharey=True, sharex=True)
#ax[0].imshow(im, origin='bottom', vmax=np.percentile(im, 99.9))
#ax[1].imshow(get_pb(ii,0,0), origin='bottom')
#ax[2].imshow(im*get_pb(ii,0,0), origin='bottom', vmax=np.percentile(im, 99.9))
#fig.tight_layout()
# In[13]:
# sanity check on field offsets
#fig,ax = plt.subplots(2,4, figsize=(9.5,5), sharey=True, sharex=True)
#for i in range(nfield):
# tmp = np.append([(p0[0]-off[2*i]), (p0[1]-off[2*i+1])],p0[2:])
# im = ii.image(tmp)
# ax[np.unravel_index(i,ax.shape)].imshow(im * get_pb(ii,0,0), origin='bottom', vmax=np.percentile(im,99))
# ax[np.unravel_index(i,ax.shape)].contour(im, origin='lower', alpha=0.5)
# ax[np.unravel_index(i,ax.shape)].set_title('field {}'.format(i))
# ax[np.unravel_index(i,ax.shape)].plot(nxy/2, nxy/2, '+')
#
#fig.tight_layout()
# In[14]:
# sanity check on pb offsets
#fig,ax = plt.subplots(2,4, figsize=(9.5,5), sharey=True, sharex=True)
#tmp = np.append([0,0],p0[2:])
#image = ii.image(tmp)
#for i in range(nfield):
# x0, y0 = p0[0]-off[2*i], p0[1]-off[2*i+1]
# pb = get_pb(ii, -x0, -y0)
# ax[np.unravel_index(i,ax.shape)].imshow(image * pb, origin='bottom', vmax=np.percentile(image,99))
# ax[np.unravel_index(i,ax.shape)].set_title('field {}:{:5.2f},{:5.2f}'.format(i,x0,y0))
# ax[np.unravel_index(i,ax.shape)].plot(nxy/2-x0/dxy_arcsec, nxy/2-y0/dxy_arcsec, '+w')
#
#fig.tight_layout()
# In[21]:
def lnpostfn(p):
""" Log of posterior probability function """
for x,r in zip(p,ii.p_ranges):
if x < r[0] or x > r[1]:
return -np.inf
# images, star-centered with offset primary beam (i.e. second e.g. above), shifted by galario
chi2 = 0.0
tmp = np.append([0,0],p[2:-1])
image = ii.image(tmp)
for i in range(nfield):
x0, y0 = p[0]-off[2*i], p[1]-off[2*i+1]
pb = get_pb(ii, -x0, -y0)
for j in range(nspw):
u, v, Re, Im, w = uvdata[i*nspw + j]
chi2_tmp = gd.chi2Image(image*pb , dxy, u, v, Re, Im, w, origin='lower',
dRA = -x0*arcsec, dDec = y0*arcsec)
chi2 += chi2_tmp
# we include a weight factor to force reasonable uncertainties
return -0.5 * ( chi2*p[-1] + np.sum(2*np.log(2*np.pi/(all_weights*p[-1]))) )
nlnpostfn = lambda p: -lnpostfn(p)
# In[22]:
# check it works
#lnpostfn(p0)
# ### multinest fitting
# In[15]:
# where results go
pmn_out = 'multinest-da-full/'
model_name = pmn_out[:-1]
def mn_prior(cube, ndim, nparam):
pars = ii.p_ranges
for i in range(ndim):
cube[i] = pars[i][0] + cube[i] * (pars[i][1]-pars[i][0])
def mn_lnlike(cube, ndim, nparam):
param = np.array([])
for i in range(ndim):
param = np.append(param,cube[i])
return lnpostfn(param)
# In[ ]:
# run it (call python script of this notebook with >nice -5 mpiexec -n 40 python3 vis_model.py)
pmn.run(mn_lnlike, mn_prior, ii.n_params, n_live_points=75, verbose=True,
outputfiles_basename=pmn_out, multimodal=True)
# In[16]:
# output, start here if multinest was run outside this notebook
#a = pmn.Analyzer(outputfiles_basename=pmn_out, n_params=ii.n_params)
## print(a.get_stats())
#
#p = [a.get_stats()['marginals'][i]['median'] for i in range(ii.n_params)]
#print(p)
#
#for i in range(ii.n_params):
# print(ii.params[i], '\t',
# a.get_stats()['marginals'][i]['median'], '\t',
# p[i]-a.get_stats()['marginals'][i]['1sigma'][0], '\t',
# a.get_stats()['marginals'][i]['1sigma'][1]-p[i], '\t',
# a.get_stats()['marginals'][i]['3sigma'][1])
#
#
## In[17]:
#
#
## corner plot
#d = a.get_data()
#mask = d[:,0] > 1e-8
#fig = corner.corner(d[mask,2:], weights=d[mask,0], labels=ii.params, show_titles=True)
#fig.savefig('{}corner.pdf'.format(pmn_out))
#
#
## ### emcee fitting
#
## In[25]:
#
#
## set up and run mcmc fitting
#ndim = ii.n_params # number of dimensions
#nwalkers = 28 # number of walkers
#nsteps = 10 # total number of MCMC steps
#nthreads = 4 # CPU threads that emcee should use
#
#sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpostfn, threads=nthreads)
#
## initialize the walkers with an ndim-dimensional Gaussian ball
#pos = [p0 + p0*0.05*np.random.randn(ndim) for i in range(nwalkers)]
#
## execute the MCMC
#pos, prob, state = sampler.run_mcmc(pos, nsteps)
#
#
## In[27]:
#
#
#print(sampler.acceptance_fraction)
#
#
## In[29]:
#
#
## save the chains to file
#model_name = 'emcee_full'
#np.savez_compressed(model_name+'/chains-'+model_name+'.npz', sampler.chain, sampler.lnprobability)
#
#
## In[16]:
#
#
## load chains, start here if emcee was run outside this notebook
#with np.load(model_name+'/chains-'+model_name+'.npz') as data:
# chain = data['arr_0']
# lnprobability = data['arr_1']
#
#
## In[17]:
#
#
#nwalkers, nsteps, ndim = chain.shape
#print(chain.shape)
#
#
## In[18]:
#
#
## see what the chains look like, skip a burn in period if desired
#burn = 900
#fig,ax = plt.subplots(ndim+1,2,figsize=(9.5,9),sharex='col',sharey=False)
#
#for j in range(nwalkers):
# ax[-1,0].plot(lnprobability[j,:burn])
# for i in range(ndim):
# ax[i,0].plot(chain[j,:burn,i])
# ax[i,0].set_ylabel(ii.params[i])
#
#for j in range(nwalkers):
# ax[-1,1].plot(lnprobability[j,burn:])
# for i in range(ndim):
# ax[i,1].plot(chain[j,burn:,i])
# ax[i,1].set_ylabel(ii.params[i])
#
#ax[-1,0].set_xlabel('burn in')
#ax[-1,1].set_xlabel('sampling')
#fig.savefig(model_name+'/chains-'+model_name+'.pdf')
#
#
## In[19]:
#
#
## make the corner plot
#fig = corner.corner(chain[:,burn:,:].reshape((-1,ndim)), labels=ii.params,
# show_titles=True)
#
#fig.savefig(model_name+'/corner-'+model_name+'.pdf')
#
#
## In[19]:
#
#
## get the median parameters
#p = np.median(chain[:,burn:,:].reshape((-1,ndim)),axis=0)
#s = np.std(chain[:,burn:,:].reshape((-1,ndim)),axis=0)
#print(','.join(p.astype(str)))
#print(s)
#
#
## ### post-fitting stuff (regardless of fitting method)
#
## In[20]:
#
#
## see what it looks like
#im = ii.image(p)
#fig,ax = plt.subplots()
#ax.imshow(im, origin='bottom', vmax=np.percentile(im, 99.9))
#fig.tight_layout()
#fig.savefig(model_name+'/best-'+model_name+'.pdf', dpi=500)
#
#
## In[21]:
#
#
## save the visibilities for subtraction from the data
#tmp = np.append([0,0],p[2:])
#image = ii.image(tmp)
#for i in range(nfield):
# x0, y0 = p[0]-off[2*i], p[1]-off[2*i+1]
# pb = get_pb(ii, -x0, -y0)
# for j in range(nspw):
# u, v, Re, Im, w = uvdata[i*nspw + j]
# vis_mod = gd.sampleImage(image * pb, dxy, u, v, origin='lower', dRA = -x0*arcsec, dDec = y0*arcsec)
# np.save(model_name+'/vis-{}-field{}-spw{}.npy'.format(model_name, i, j), vis_mod)
#
#
## ## Creating a map of the residuals
## See splits.py
|
{"hexsha": "dc68680792464dfc436dc4bd8c08ff427a2a8fc2", "size": 11899, "ext": "py", "lang": "Python", "max_stars_repo_path": "fomalhaut/vis_model_mn.py", "max_stars_repo_name": "drgmk/eccentric-width", "max_stars_repo_head_hexsha": "4506bb0a856c62a106c4105147121e818802efd0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fomalhaut/vis_model_mn.py", "max_issues_repo_name": "drgmk/eccentric-width", "max_issues_repo_head_hexsha": "4506bb0a856c62a106c4105147121e818802efd0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fomalhaut/vis_model_mn.py", "max_forks_repo_name": "drgmk/eccentric-width", "max_forks_repo_head_hexsha": "4506bb0a856c62a106c4105147121e818802efd0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4422222222, "max_line_length": 115, "alphanum_fraction": 0.6186234137, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3911}
|
import json
import argparse
from typing import Dict, List
from collections import namedtuple
from enum import IntFlag
import networkx as nx
from networkx import DiGraph
# BBNode is used for the nodes in the networkx digraph
BBNode = namedtuple("BBNode", ["index"])
# EdgeData is used to store edge metadata in the networkx digraph
EdgeData = namedtuple("EdgeData", ["flags", "type"])
# enum class for gcc edge flags, see below links for references
# https://gcc.gnu.org/onlinedocs/gccint/Edges.html#Edges
# https://github.com/gcc-mirror/gcc/blob/master/gcc/cfg-flags.def
class GccEdgeFlag(IntFlag):
"""enum to store gcc edge flags.
It is a subclass of `IntFlag` to allow bitwise operations
e.g. one can have an int variable `flag` and do
flag & GccEdgeFlag.FALLTHROUGH
"""
FALLTHROUGH = 2**0
ABNORMAL = 2**1
EH = 2**3
TRUE_VALUE = 2**8
FALSE_VALUE = 2**9
def edge_flags_to_str(flags: int):
to_return = ""
for flag in GccEdgeFlag:
if flag & flags:
to_return += flag.name + " "
return to_return[:-1]
def basic_blocks_to_digraph(basic_blocks: List, return_edges_data = False):
"""
Parameters:
`basic_blocks` should be a list of basic_blocks obtained from
the gcc AST plugin. For example, it could be the list of basic blocks
obtained from a function definition.
`return_edges_data` is a boolean. If it is True, a string
representing the digraphs collective edge data will be returned.
Returns:
returns a networkx digraph where the nodes are `BBNode`s and the edge relationship
is defined by the `edges` field in the `basic_blocks` list. The returned graph also
stores `EdgeData` instances at each edge using edge objects.
Optionally returns a string of the digraphs collective edge data if
`return_edges_data` is True.
"""
digraph = nx.DiGraph()
collective_edges_data = ""
# we complete two passes to make the digraph
# on the first pass, we add the BBNodes, and cache them within a dict
bb_cache = {}
for bb in basic_blocks:
bb_node = make_bbnode(bb)
bb_cache[bb["index"]] = bb_node
digraph.add_node(bb_node)
# on the second pass, we add in the edges
for bb in basic_blocks:
index = bb["index"]
collective_edges_data += f"\nEdges for BB{index}"
for e in bb["edges"]:
src = bb_cache[e["source"]]
tgt = bb_cache[e["target"]]
flags = e["flags"]
edge_data = EdgeData(flags=flags, type=edge_flags_to_str(flags))
digraph.add_edge(src, tgt, object=edge_data)
collective_edges_data += f"\n\t{src} --> {tgt} with data: {edge_data}"
# prune of first '\n' of collective_edges_data
collective_edges_data = collective_edges_data[1:]
if return_edges_data:
return digraph, collective_edges_data
# otherwise
return digraph
def make_bbnode(bb: Dict):
"""
Parameters:
bb: the dict storing the basic block data from the json output of gcc plugin
Returns:
returns a BBNode encompassing the data stored in `bb`
"""
return BBNode(index=bb["index"])
def digraph_to_pdf(digraph: DiGraph, filename: str):
"""
Convert the digraph to a PyGraphviz AGraph, and then
save it to a pdf with filename `filename`
"""
agraph = nx.nx_agraph.to_agraph(digraph)
agraph.graph_attr.update(
{"dpi": 227, "fontsize": 20, "fontname": "Menlo", "rankdir": "TB"}
)
agraph.node_attr.update({"fontname": "Menlo"})
agraph.draw(f"{filename}--basic_blocks.pdf", prog="dot")
def json_ast_to_bb_graphs(gcc_ast: Dict):
"""
Given a gcc AST json, create the networkx basic block digraphs for each function in it.
Generates the digraphs pdfs, and also prints the edge data out to the console
"""
input_file = gcc_ast["mainInputFilename"]
input_file_stripped = input_file.split("/")[-1]
functions = gcc_ast["functions"]
for f in functions:
basic_blocks = f["basicBlocks"]
digraph, output = basic_blocks_to_digraph(basic_blocks, return_edges_data=True)
print(f"\nCollective Edge Data for function {f['name']}")
print(f"{30*'-'}")
print(output)
filename = f"{input_file_stripped}.{f['name']}"
digraph_to_pdf(digraph, filename)
def main():
parser = argparse.ArgumentParser(description=("Creates networkx digraphs for the "
"basic blocks in each function from the provided gcc ast json file. "
"The edge data for each digraph is printed to the console, and a "
"pdf of the graph is generated in the cwd."))
parser.add_argument("json_file", nargs=1,
help="the gcc ast json file to be read")
json_file = parser.parse_args().json_file[0]
print(f"Loaded json_file: {json_file}")
ast_json = json.load(open(json_file))
json_ast_to_bb_graphs(ast_json)
if __name__ == "__main__":
main()
|
{"hexsha": "f5d00976b119de32e6fc13239f8193cf544e68fd", "size": 5067, "ext": "py", "lang": "Python", "max_stars_repo_path": "automates/program_analysis/GCC2GrFN/gcc_basic_blocks_to_digraph.py", "max_stars_repo_name": "ml4ai/automates", "max_stars_repo_head_hexsha": "3bb996be27e9ee9f99e931b885707dae2c2ac567", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2018-12-19T16:32:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-05T07:58:15.000Z", "max_issues_repo_path": "automates/program_analysis/GCC2GrFN/gcc_basic_blocks_to_digraph.py", "max_issues_repo_name": "ml4ai/automates", "max_issues_repo_head_hexsha": "3bb996be27e9ee9f99e931b885707dae2c2ac567", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 183, "max_issues_repo_issues_event_min_datetime": "2018-12-20T17:03:01.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-23T22:21:42.000Z", "max_forks_repo_path": "automates/program_analysis/GCC2GrFN/gcc_basic_blocks_to_digraph.py", "max_forks_repo_name": "ml4ai/automates", "max_forks_repo_head_hexsha": "3bb996be27e9ee9f99e931b885707dae2c2ac567", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-01-04T22:37:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-19T17:34:16.000Z", "avg_line_length": 33.3355263158, "max_line_length": 92, "alphanum_fraction": 0.6615354253, "include": true, "reason": "import networkx,from networkx", "num_tokens": 1265}
|
# encoding: utf-8
"""
@author : zhirui zhou
@contact: evilpsycho42@gmail.com
@time : 2020/5/20 17:17
"""
import pytest
from deepseries.dataset import create_seq2seq_data_loader
import numpy as np
def test_create_seq2seq_data_loader():
x = np.random.rand(30, 1, 24)
dl = create_seq2seq_data_loader(x, 12, 12, np.arange(x.shape[-1]), batch_size=4, num_iteration_per_epoch=30, seq_last=True)
for i, batch in enumerate(dl):
pass
assert i == 30-1
|
{"hexsha": "4ccdfad7ecc3a817afc0304488bcf0cb4b0738ec", "size": 473, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_dataset.py", "max_stars_repo_name": "EvilPsyCHo/Deep-Time-Series-Prediction", "max_stars_repo_head_hexsha": "f6a6da060bb3f7d07f2a61967ee6007e9821064e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 334, "max_stars_repo_stars_event_min_datetime": "2019-11-01T01:39:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:10:17.000Z", "max_issues_repo_path": "test/test_dataset.py", "max_issues_repo_name": "luxixiang/Deep-Time-Series-Prediction", "max_issues_repo_head_hexsha": "f6a6da060bb3f7d07f2a61967ee6007e9821064e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-12-30T08:01:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-06T05:27:29.000Z", "max_forks_repo_path": "test/test_dataset.py", "max_forks_repo_name": "luxixiang/Deep-Time-Series-Prediction", "max_forks_repo_head_hexsha": "f6a6da060bb3f7d07f2a61967ee6007e9821064e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 57, "max_forks_repo_forks_event_min_datetime": "2020-01-13T13:20:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:10:20.000Z", "avg_line_length": 22.5238095238, "max_line_length": 127, "alphanum_fraction": 0.7061310782, "include": true, "reason": "import numpy", "num_tokens": 148}
|
module asflowf_cube_to_vtk
use asflowf_crystal, only : write_xyz
use asflowf_cube, only : cube
use asflowf_constants
implicit none
contains
subroutine cube_to_vtk(cube_file_in, vtk_file_out)
integer :: i, j, k
integer :: ngridx, ngridy, ngridz
type(cube) :: cube_i
real(kind=dp) :: cell_volume, cell_volume_per_unit, tmp, tmp_vec(3)
real(kind=dp) :: a, b, c, x, y, z, total_electron
! character, allocatable :: cube_file_in
!character(len=128), intent(in) :: cube_file_in, vtk_file_out
character(len=*), intent(in) :: cube_file_in, vtk_file_out
! read cube file
write(*, *) "On getting the command line argument:"
if ( cube_file_in == "" .or. vtk_file_out == "") then
write(*, *) "You should provide the name for the input cube file and output vtk file!"
stop
else
write(*, *) "The input cube file name is: ", cube_file_in
write(*, *) "The output vtk file name is: ", vtk_file_out
end if
call cube_i%read_cube_file(cube_file_in)
write(*, *) "Successfully read the cube file!"
! value in cube file are \rho(r)_of_electrons in unit of e/Bohr^3
! namely number of electrons each Borh^3
! so we have to convert it to e/Angstrom^3, through divide it by bohr_to_angstrom**3
call cross_3(cube_i%cube_crystal%cell(1, :), cube_i%cube_crystal%cell(2, :), tmp_vec)
call dot_3(tmp_vec, cube_i%cube_crystal%cell(3, :), cell_volume)
ngridx = cube_i%ngridx
ngridy = cube_i%ngridy
ngridz = cube_i%ngridz
cell_volume_per_unit = cell_volume / ngridx / ngridy / ngridz
total_electron = sum(cube_i%data) * cell_volume_per_unit / bohr_to_angstrom**3
write(*, *) "-----------------------------------------------------------------"
write(*, *) " Out put collected information "
write(*, *) "-----------------------------------------------------------------"
write(*, *) "ngridx: ", ngridx
write(*, *) "ngridy: ", ngridy
write(*, *) "ngridz: ", ngridz
write(*, *) "cell volume: ", cell_volume
write(*, *) "total number of electrons: ", total_electron
write(*, *) "-----------------------------------------------------------------"
call dot_3(cube_i%cube_crystal%cell(1, :), cube_i%cube_crystal%cell(1, :), a)
a = sqrt(a)
call dot_3(cube_i%cube_crystal%cell(2, :), cube_i%cube_crystal%cell(2, :), b)
b = sqrt(b)
call dot_3(cube_i%cube_crystal%cell(3, :), cube_i%cube_crystal%cell(3, :), c)
c = sqrt(c)
!write(*, *) a, b ,c
! output data to vtk file
open(11, file=vtk_file_out, status="replace", action="write")
write(11, "(A)") "# vtk DataFile Version 5.1"
write(11, "(A)") cube_file_in
write(11, "(A)") "ASCII"
write(11, "(A)") "DATASET STRUCTURED_POINTS"
write(11, "(A, 3I10)") "DIMENSIONS ", ngridx, ngridy, ngridz
write(11, "(A, 3F15.6)") "SPACING", a/ngridx, b/ngridy, c/ngridz
!write(11, "(A, I10, A)") "POINTS ", ngridx * ngridy * ngridz, ' float'
!do i = 1, ngridx
! do j = 1, ngridy
! do k = 1, ngridz
! !x = real(i-1) / real(ngridx) * a
! !y = real(j-1) / real(ngridy) * b
! !z = real(k-1) / real(ngridz) * c
! x = real(i) / real(ngridx) * a
! y = real(j) / real(ngridy) * b
! z = real(k) / real(ngridz) * c
! write(11, *) x, y, z
! end do
! end do
!end do
! write grid point values:
write(11, "(A, I10)") 'POINT_DATA ', ngridx * ngridy * ngridz
write(11, "(A)") 'SCALARS CON float 1'
write(11, "(A)") 'LOOKUP_TABLE default'
do k = 1, ngridz
do j = 1, ngridy
do i = 1, ngridx
write(11, *) cube_i%data(i, j, k)
end do
end do
end do
close(11)
! output the total structure
call write_xyz(cube_i%cube_crystal, "cube-structure.xyz")
end subroutine cube_to_vtk
subroutine cross_3(x, y, z)
implicit none
real(kind=dp), dimension(3), intent(in) :: x, y
real(kind=dp), dimension(3), intent(out) :: z
z(1) = x(2) * y(3) - x(3) * y(2)
z(2) = x(3) * y(1) - x(1) * y(3)
z(3) = x(1) * y(2) - x(2) * y(1)
end subroutine
subroutine dot_3(x, y, z)
implicit none
real(kind=dp), dimension(3), intent(in) :: x, y
real(kind=dp), intent(out) :: z
z = x(1) * y(1) + x(2) * y(2) + x(3) * y(3)
end subroutine dot_3
end module asflowf_cube_to_vtk
|
{"hexsha": "7644fbbea96b4d2c45433b2f136d7c6712e3a006", "size": 5060, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "fortran/atomsciflowf/src/cube_to_vtk.f90", "max_stars_repo_name": "DeqiTang/build-test-atomsciflow", "max_stars_repo_head_hexsha": "6fb65c79e74993e2100fbbca31b910d495076805", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-25T01:44:32.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T01:44:32.000Z", "max_issues_repo_path": "fortran/atomsciflowf/src/cube_to_vtk.f90", "max_issues_repo_name": "DeqiTang/build-test-atomsciflow", "max_issues_repo_head_hexsha": "6fb65c79e74993e2100fbbca31b910d495076805", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fortran/atomsciflowf/src/cube_to_vtk.f90", "max_forks_repo_name": "DeqiTang/build-test-atomsciflow", "max_forks_repo_head_hexsha": "6fb65c79e74993e2100fbbca31b910d495076805", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.045112782, "max_line_length": 99, "alphanum_fraction": 0.493083004, "num_tokens": 1504}
|
##
# @file casmo.py
# @package openmoc.compatible.casmo
# @brief The parsing module provides utility functions to parse in data
# necessary to construct assembly geometries in OpenMOC
# @author Davis Tran (dvtran@mit.edu)
# @date April 24, 2014
import numpy
import h5py
import os
import openmoc.log as log
##
# @class casmo.py 'openmoc/compatible/casmo.py'
# @brief Contains data parsed from casmo output file
class Casmo(object):
##
# @brief Casmo object class constructor
def __init__(self):
self._assembly_name = None
self._filename = None
self._directory = None
self._is_symmetric = True
self._energy_groups = None
self._num_micro_regions = None
self._fuel_pin_rad = None
self._lattice_pitch = None
self._siga = None
self._sigd = None
self._sigt = None
self._sigf = None
self._signf = None
self._sigs = None
self._chi = None
self._width = None
self._min_microregions = None
self._max_microregions = None
self._kinf = None
self._pin_powers = None
self._cell_types = {}
self._cell_type_array = None
self._string_cell_type_array = None
self._average_cross_sections = None
##
# @brief Returns assembly type as string
# @return assembly type (string)
def getAssemblyName(self):
return self._assembly_name
##
# @brief Sets assembly type
# @param assembly_name a string that indicates assembly type
def setAssemblyName(self, assembly_name):
self._assembly_name = assembly_name
##
# @brief Returns name of casmo output file to be parsed
# @return name of casmo output file to be parsed
def getFilename(self):
return self._filename
##
# @brief Sets file name of casmo output file to be parsed
# @param filename the name of the casmo output file to be parsed (string)
def setFilename(self, filename):
self._filename = filename
##
# @brief Returns directory of casmo output file being parsed
# @return directory of casmo output file being parsed
def getDirectory(self):
return self._directory
##
# @brief Sets directory of casmo output file to be parsed
# @param directory directory of the casmo output file to be parsed (string)
def setDirectory(self, directory):
self._directory = directory
##
# @brief Returns whether the assembly for the casmo output file is symmetric
# @return True if symmetric, else False
def isSymmetric(self):
return self._is_symmetric
##
# @brief Sets whether the assembly for the casmo output file is symmetric
# @param is_symmetric boolean indicating whether the geometry is symmetric
def setSymmetric(self, is_symmetric):
self._is_symmetric = is_symmetric
##
# @brief Checks to see if assembly for casmo output file is symmetric
# @param f casmo output file
def checkSymmetry(self, f):
sym_counter = 0
for sym_line in f:
if 'LPI' in sym_line:
sym_counter += 1
continue
if sym_counter ==1:
sym_tokens = sym_line.split()
if len(sym_tokens) > 2:
self._is_symmetric = False
break
else:
self._is_symmetric = True
break
##
# @brief This method parses the casmo output file for the number of
# energy groups
# @return number of energy groups directly from casmo output file
def parseEnergyGroups(self):
f = open(self._directory + self._filename,'r')
for line in f:
if '[Usage Note]' in line:
tokens = line.split()
energy_groups = int(tokens[5])
break
f.close()
return energy_groups
##
# @brief Returns number of energy groups
# @return number of energy groups
def getEnergyGroups(self):
return self._energy_groups
##
# @brief Sets number of energy groups
# @param energy_groups number of energy groups (int)
def setEnergyGroups(self, energy_groups):
self._energy_groups = energy_groups
##
# @brief parses and sets number of energy groups from casmo output file
def importEnergyGroups(self):
self.setEnergyGroups(self.parseEnergyGroups())
##
# @brief This method parses the casmo output file for the number of
# microregions in the assembly
# @return number of microregions directly from casmo output file
def parseNumRegions(self):
f = open(self._directory + self._filename, 'r')
#check for symmetry
self.checkSymmetry(f)
counter = 0
newcounter = 0
num_micro_regions = 0
if self._is_symmetric:
for line in f:
if 'Micro-region number ' in line:
counter += 1
continue
if counter == 1:
tokens = line.split()
num_micro_regions = int(tokens[1])
break
else:
for newline in f:
if '--- ---- --------------- ------------ ' in newline:
newcounter += 1
continue
if newcounter == 1:
newtokens = newline.split()
num_micro_regions = int(newtokens[0])
break
f.close()
return num_micro_regions
##
# @brief Returns number of microregions in assembly
# @return number of microregions
def getNumRegions(self):
return self._num_micro_regions
##
# @brief Sets the number of microregions
# @param num_micro_regions the number of microregions in the assembly
def setNumRegions(self, num_micro_regions):
self._num_micro_regions = num_micro_regions
##
# @brief parses and sets number of microregions from casmo output file
def importNumRegions(self):
self.setNumRegions(self.parseNumRegions())
##
# @brief This method parses the casmo output file for the thermally
# expanded fuel pin radii
# @return fuel pin radii (float)
def parseFuelPinRadii(self):
f = open(self._directory + self._filename, 'r')
for line in f:
if 'Average fuel pellet diam.' in line:
tokens = line.split()
diameter = tokens[5]
break
f.close()
E = diameter.index('E')
radii = (0.5 * float(diameter[0:E]) * 10 ** int(diameter[E+1:]))
return radii
##
# @brief Returns fuel pin radii of the assembly
# @return fuel pin radii (float)
def getFuelPinRadii(self):
return self._fuel_pin_rad
##
# @brief Sets fuel pin radii of the assembly
# @param fuel_pin_rad fuel pin radii to be set for assembly (float)
def setFuelPinRadii(self, fuel_pin_rad):
self._fuel_pin_rad = fuel_pin_rad
##
# @brief parses and sets fuel pin radii of the assembly
def importFuelPinRadii(self):
self.setFuelPinRadii(self.parseFuelPinRadii())
##
# @brief This method parses the casmo output file for the thermally
# expanded lattice pitch
# @return lattice pitch (float)
def parseLatticePitch(self):
f = open(self._directory + self._filename, 'r')
for line in f:
if 'Bundle pitch' in line:
tokens = line.split()
raw_pitch = tokens[3]
break
f.close()
E = raw_pitch.index('E')
pitch = (float(raw_pitch[0:E]) * 10 ** int(raw_pitch[E+1:]))
return pitch
##
# @brief Returns lattice pitch of the assembly
# @return lattice pitch (float)
def getLatticePitch(self):
return self._lattice_pitch
##
# @brief Sets lattice pitch of the assembly
# @param lattice_pitch lattice pitch to be set for assembly (float)
def setLatticePitch(self, lattice_pitch):
self._lattice_pitch = lattice_pitch
##
# @brief parses and sets lattice pitch of the assembly
def importLatticePitch(self):
self.setLatticePitch(self.parseLatticePitch())
##
# @brief This method parses the casmo output file for the materials
# cross sections for every microregion in the assembly
# @param xs_name the name of cross section type (string in all CAPS)
# @return numpy array of cross sections
def parseXS(self, xs_name):
# Parses for cross sections that are not the scattering matrix
if xs_name != 'SIGS' and xs_name!='CHI':
xs_array = numpy.zeros((self._num_micro_regions, self._energy_groups))
f = open(self._directory + self._filename, 'r')
counter = 0
for line in f:
if xs_name in line:
tokens = line.split()
xs_array[counter, :] = [float(xs) for xs in tokens[2:2+self._energy_groups]]
counter += 1
if counter == self._num_micro_regions:
break
f.close()
# Parses for scattering matrix cross sections
if xs_name == 'SIGS':
xs_array = numpy.zeros((self._num_micro_regions, self._energy_groups, self._energy_groups))
f = open(self._directory + self._filename, 'r')
cur_region = 0
cur_group = 0
for line in f:
if xs_name in line:
words = line.split()
xs_array[cur_region, cur_group, :] = [float(xs) for xs in words[2:2+self._energy_groups]]
cur_group += 1
if cur_group == self._energy_groups:
cur_region += 1
cur_group = 0
if cur_region == self._num_micro_regions:
break
f.close()
return xs_array
##
# @brief Returns a specific cross section numpy array
# @param xs_name the name of a type of cross section (string)
# @return a cross section numpy array
def getXS(self, xs_name):
'''Retrieves cross-section attribute.'''
if xs_name == 'SIGA':
return self._siga
if xs_name == 'SIGD':
return self._sigd
if xs_name == 'SIGT':
return self._sigt
if xs_name == 'SIGF':
return self._sigf
if xs_name == 'SIGNF':
return self._signf
if xs_name == 'SIGS':
return self._sigs
if xs_name == 'CHI':
return self._chi
##
# @brief Sets a specific cross section
# @param xs_name the name of a type of cross section (string)
# @param xs_array a numpy array of cross section values
def setXS(self, xs_name, xs_array):
if xs_name == 'SIGA':
self._siga = xs_array
if xs_name == 'SIGD':
self._sigd = xs_array
if xs_name == 'SIGT':
self._sigt = xs_array
if xs_name == 'SIGF':
self._sigf = xs_array
if xs_name == 'SIGNF':
self._signf = xs_array
if xs_name == 'SIGS':
self._sigs = xs_array
if xs_name == 'CHI':
self._chi = xs_array
##
# @brief parses and sets a specific cross section type from casmo ouput file
# @param xs_name the name of a type of cross section (string)
def importXS(self, xs_name):
self.setXS(xs_name, self.parseXS(xs_name))
##
# @brief calls importXS for all types of cross sections needed by OpenMOC
def importAllXS(self):
xs_list = ['SIGA', 'SIGD', 'SIGT', 'SIGF', 'SIGNF', 'SIGS']
for xs_name in xs_list:
self.importXS(xs_name)
##
# @brief This method parses the casmo output file for the dimensions of
# the assembly. The width equals the number of fuel pins in a row
# or column of an assembly.
# @return width of the assembly
def parseWidth(self):
half_width = -1
f = open(self._directory + self._filename, 'r')
#check for symmetry
self.checkSymmetry(f)
for line in f:
if 'Layout' in line:
half_width += 1
continue
if half_width>=0 and line == '\n':
break
if half_width>=0:
half_width += 1
f.close()
if self._is_symmetric:
return half_width*2-1
else:
return half_width
##
# @brief Returns width of the assembly
# @return width of the assembly (int)
def getWidth(self):
return self._width
##
# @brief Sets width of the assembly
# @param width the width to be set for the assembly
def setWidth(self, width):
self._width = width
##
# @brief parses and sets a width of assembly from casmo ouput file
def importWidth(self):
self.setWidth(self.parseWidth())
##
# @brief This method parses the casmo output file for microregion ranges
# and returns a tuple of two numpy arrays, one is the minimum values
# and the other is the maximum values of those microregion ranges, each
# each located within its specific macroregion
# @return numpy array tuple (min microregion values, max microregion values)
def parseMicroregions(self):
half_width = (self._width+1)/2
min_array = numpy.zeros((self._width,self._width), dtype=numpy.int32)
max_array = numpy.zeros((self._width,self._width), dtype=numpy.int32)
min_quadrant4 = numpy.zeros((half_width,half_width), dtype=numpy.int32)
max_quadrant4 = numpy.zeros((half_width,half_width), dtype=numpy.int32)
min_values = []
max_values = []
f = open(self._directory + self._filename, 'r')
counter = 0
#check for symmetry
self.checkSymmetry(f)
if self._is_symmetric:
for line in f:
if counter >= 1 and '1_________' in line:
break
if 'Micro-region' in line:
counter += 1
continue
if counter >= 1:
tokens = line.split()
for index, token in enumerate(tokens):
token = token.strip('*')
token = token.strip('-')
if index%2 ==0:
min_quadrant4[counter-1, index/2] = float(token)
min_quadrant4[index/2, counter-1] = float(token)
else:
max_quadrant4[counter-1, (index-1)/2] = float(token)
max_quadrant4[(index-1)/2, counter-1] = float(token)
counter += 1
f.close()
min_array[(half_width-1):,(half_width-1):] = min_quadrant4
min_array[(half_width-1):, 0:(half_width)] = numpy.fliplr(min_quadrant4)
min_array[0:(half_width), (half_width-1):] = numpy.flipud(min_quadrant4)
min_array[0:(half_width), 0:(half_width)] = numpy.flipud(numpy.fliplr(min_quadrant4))
max_array[(half_width-1):,(half_width-1):] = max_quadrant4
max_array[(half_width-1):, 0:(half_width)] = numpy.fliplr(max_quadrant4)
max_array[0:(half_width), (half_width-1):] = numpy.flipud(max_quadrant4)
max_array[0:(half_width), 0:(half_width)] = numpy.flipud(numpy.fliplr(max_quadrant4))
else:
counter = 0
for line in f:
if 'Micro :' in line:
newline = line.lstrip('Micro :')
xline = newline.translate(None, '-')
tokens = xline.split()
for index, token in enumerate(tokens):
if index%2 ==0:
min_values.append(token)
else:
max_values.append(token)
for index, value in enumerate(min_values):
min_array[int(counter)/int(self._width), index%self._width] = float(value)
counter += 1
continue
counter = 0
for index, value in enumerate(max_values):
max_array[int(counter)/int(self._width), index%self._width] = float(value)
counter += 1
continue
f.close()
return min_array, max_array
##
# @brief Returns numpy array of minimum values of microregion range within
# each macroregion
# @return numpy array of minimum values of microregion ranges
def getMinMicroregions(self):
return self._min_microregions
##
# @brief Sets minimum values of microregion ranges within each macroregion
# @param min_array numpy array of minimum values of microregion ranges
def setMinMicroregions(self, min_array):
self._min_microregions = min_array
##
# @brief Returns numpy array of maximum values of microregion ranges within
# each macroregion
# @return numpy array of maximum values of microregion ranges
def getMaxMicroregions(self):
return self._max_microregions
##
# @brief Sets minimum values of microregion ranges within each macroregion
# @param max_array numpy array of minimum values of microregion ranges
def setMaxMicroregions(self, max_array):
self._max_microregions = max_array
##
# @brief parses and sets microregion value numpy arrays
def importMicroregions(self):
self.setMinMicroregions(self.parseMicroregions()[0])
self.setMaxMicroregions(self.parseMicroregions()[1])
##
# @brief This method parses the casmo output file for reference eigenvalue
# @return reference eigenvalue of assembly (float)
def parseKinf(self):
f = open(self._directory + self._filename, 'r')
for line in f:
if 'k-infinity' in line:
tokens = line.split()
kinf = float(tokens[2])
break
f.close()
return kinf
##
# @brief Returns reference eigenvalue of assembly from casmo output file
# @return reference eigenvalue of assembly (float)
def getKinf(self):
return self._kinf
##
# @brief Sets reference eigenvalue of assembly
# @param kinf the reference eigenvalue to be set for the assembly
def setKinf(self, kinf):
self._kinf = kinf
##
# @brief parses and sets eigenvalue of assembly
def importKinf(self):
self.setKinf(self.parseKinf())
##
# @brief This method parses the casmo output file for reference pin powers
# @return numpy array of float-valued reference pin powers of assembly
def parsePinPowers(self):
f = open(self._directory + self._filename, 'r')
half_width = (self._width+1)/2
pin_power_array = numpy.zeros((self._width,self._width), dtype=numpy.float32)
quadrant4 = numpy.zeros((half_width,half_width), dtype=numpy.float32)
counter = 0
#check for symmetry
self.checkSymmetry(f)
for line in f:
if counter >= 1 and line == '\n':
break
if 'Power Distribution' in line:
counter += 1
continue
if self._is_symmetric:
if counter >= 1:
powers = line.split()
for index, power in enumerate(powers):
power = power.strip('*')
quadrant4[counter-1, index] = float(power)
quadrant4[index, counter-1] = float(power)
counter += 1
# Arranges section of pin powers into larger array by symmetry
pin_power_array[(half_width-1):,(half_width-1):] = quadrant4
pin_power_array[(half_width-1):, 0:(half_width)] = numpy.fliplr(quadrant4)
pin_power_array[0:(half_width), (half_width-1):] = numpy.flipud(quadrant4)
pin_power_array[0:(half_width), 0:(half_width)] = numpy.flipud(numpy.fliplr(quadrant4))
else:
if counter >= 1:
powers = line.split()
for index, power in enumerate(powers):
power = power.strip('*')
pin_power_array[counter-1, index] = float(power)
counter+=1
f.close()
return pin_power_array
##
# @brief Returns reference pin powers of assembly from casmo output file
# @return numpy array of float valued reference pin powers of assembly
def getPinPowers(self):
return self._pin_powers
##
# @brief Sets reference pin powers of assembly
# @param pin_power_array numpy array of float-valued reference pin powers
def setPinPowers(self, pin_power_array):
self._pin_powers = pin_power_array
##
# @brief parses and sets pin powers of assembly
def importPinPowers(self):
self.setPinPowers(self.parsePinPowers())
##
# @brief Returns dictionary of cell type associated with each id number
# @return dictionary cell types by id number, int-->string
def getCellTypes(self):
return self._cell_types
##
# @brief Sets a cell type and cell type id key-value pair
# @param cell_types_id id number for a certain cell type (int)
# @param name name of a specific cell type associated an id number (string)
def setCellType(self, cell_types_id, name):
self._cell_types[cell_types_id] = name
##
# @brief This method parses the casmo output file for the type of material in
# each cell
# @return numpy array of int-valued cell types
def parseCellTypeArray(self):
half_width = (self._width+1)/2
full_width = self._width
cell_type_array = numpy.zeros((full_width,full_width), dtype=numpy.int32)
quadrant4 = numpy.zeros((half_width,half_width), dtype=numpy.int32)
counter = 0
f = open(self._directory + self._filename, 'r')
#check for symmetry
self.checkSymmetry(f)
for line in f:
if counter >=1 and line == '\n':
break
if 'Layout' in line:
counter += 1
continue
if counter >= 1:
cell_types = line.split()
for index, cell_type in enumerate(cell_types):
cell_type = cell_type.strip('*')
if self._is_symmetric:
quadrant4[counter-1, index] = int(cell_type)
else:
cell_type_array[counter-1, index] = int(cell_type)
counter += 1
f.close()
if self._is_symmetric:
# Arranges section of cell types into larger array by symmetry
cell_type_array[(half_width-1):,(half_width-1):] = quadrant4
cell_type_array[(half_width-1):, 0:(half_width)] = numpy.fliplr(quadrant4)
cell_type_array[0:(half_width), (half_width-1):] = numpy.flipud(quadrant4)
cell_type_array[0:(half_width), 0:(half_width)] = numpy.flipud(numpy.fliplr(quadrant4))
cell_type_array[half_width-1,half_width-1] = 2
return cell_type_array
##
# @brief Returns array of cell type ids for assembly
# @return array of cell types for every cell in assembly
def getCellTypeArray(self):
return self._cell_type_array
##
# @brief Sets array of cell type ids for assembly
# @param cell_type_array numpy array of int-valued cell type ids
def setCellTypeArray(self, cell_type_array):
self._cell_type_array = cell_type_array
##
# @brief parses and sets cell type ids for assembly
def importCellTypeArray(self):
self.setCellTypeArray(self.parseCellTypeArray())
##
# @brief This method converts the numerical cell type array to strings that
# indicate the cell type in clearer language
# @return numpy array of cell types as strings
def stringCellTypeArray(self):
#id of 1 corresponds to fuel (string of fuel)
#id of 2 corresponds to guide tube (string of gt)
#id of 3 corresponds to burnable poison (string of bp)
string_cell_type_array = numpy.zeros((self._width,self._width), dtype=numpy.str)
for i, row in enumerate(self._cell_type_array):
for j, cell in enumerate(row):
if self._cell_type_array[i,j] in self._cell_types.keys():
string_cell_type_array[i,j] = self._cell_types[self._cell_type_array[i,j]]
else:
log.py_printf('WARNING', 'Cell type id %d does not exist. Call'
' setCellTypes to set cell name for id.', self._cell_type_array[i,j])
return string_cell_type_array
##
# @brief Returns array of cell types as strings for assembly
# @return array of cell types as strings for assembly
def getStringCellTypeArray(self):
return self._string_cell_type_array
##
# @brief Sets array of cell types as strings for assembly
# @param string_cell_type_array array of cell types as strings
def setStringCellTypeArray(self, string_cell_type_array):
self._string_cell_type_array = string_cell_type_array
##
# @brief This method calls the Casmo import methods necessary to construct
# the geometry of an assembly in OpenMOC
# @param filename filename of casmo output file to be parsed
# @param directory directory of casmo output file to be parsed
def importFromCasmo(self, filename, directory):
self._filename = filename
self._directory = directory
self.importEnergyGroups()
self.importNumRegions()
self.importAllXS()
self.importWidth()
self.importMicroregions()
self.importKinf()
self.importPinPowers()
self.importCellTypeArray()
self.importFuelPinRadii()
self.importLatticePitch()
##
# @brief This method exports all data contained within member variables
# of the Casmo object to an hdf5 data file, data sets expect arrays
# @param filename filename of hdf5 data file
# @param directory directory where hdf5 data file will be stored
def export(self, directory = 'casmo-data/', filename = 'casmo-data.h5'):
if not os.path.exists(directory):
os.makedirs(directory)
f = h5py.File(directory + filename, 'w')
f.attrs['Energy Groups'] = self._energy_groups
f.attrs['Assembly Width'] = self._width
f.attrs['Num Microregions'] = self._num_micro_regions
f.attrs['Fuel Pin Radii'] = self._fuel_pin_rad
f.attrs['Lattice Pitch'] = self._lattice_pitch
big_data = f.create_group('Casmo Data')
big_data.create_dataset('K-Infinity', data=self._kinf)
big_data.create_dataset('Total XS', data=self._sigt)
big_data.create_dataset('Absorption XS', data=self._siga)
big_data.create_dataset('Fission XS', data=self._sigf)
big_data.create_dataset('Nu Fission XS', data=self._signf)
big_data.create_dataset('Scattering XS', data=self._sigs)
big_data.create_dataset('Dif Coefficient', data=self._sigd)
big_data.create_dataset('Chi', data=self._chi)
big_data.create_dataset('Pin Powers', data=self._pin_powers)
big_data.create_dataset('Cell Types', data=self._cell_type_array)
big_data.create_dataset('String Cell Types', data=self._string_cell_type_array)
big_data.create_dataset('Min Microregions', data=self._min_microregions)
big_data.create_dataset('Max Microregions', data=self._max_microregions)
f.close()
##
# @brief This method imports data from an hdf5 data file and assigns it
# to the corresponding member variables
# @param filename filename of hdf5 data file
# @param directory directory where hdf5 data file is stored
def importFromHDF5(self, directory = 'casmo-data/', filename = 'casmo-data.h5'):
f = h5py.File(directory + filename, 'r')
self._directory = directory
self._filename = filename
self._energy_groups = f.attrs['Energy Groups']
self._kinf = f.attrs['K-Infinity']
self._width = f.attrs['Assembly Width']
self._num_micro_regions = f.attrs['Num Microregions']
self._fuel_pin_rad = f.attrs['Fuel Pin Radii']
self._lattice_pitch = f.attrs['Lattice Pitch']
self._sigt = f['Casmo Data']['Total XS'][...]
self._siga = f['Casmo Data']['Absorption XS'][...]
self._sigf = f['Casmo Data']['Fission XS'][...]
self._signf = f['Casmo Data']['Nu Fission XS'][...]
self._sigs = f['Casmo Data']['Scattering XS'][...]
self._sigd = f['Casmo Data']['Dif Coefficient'][...]
self._chi = f['Casmo Data']['Chi'][...]
self._pin_powers = f['Casmo Data']['Pin Powers'][...]
self._cell_type_array = f['Casmo Data']['Cell Types'][...]
self._min_microregions = f['Casmo Data']['Min Microregions'][...]
self._max_microregions = f['Casmo Data']['Max Microregions'][...]
f.close()
##
# @brief This method exports only cross sectional arrays contained within
# member variables of the Casmo object to an hdf5 data file
# @param assembly_name name of assembly for materials being exported
# @param directory directory where hdf5 data file will be stored
def exportAllXSToHDF5(self, assembly_name, directory = 'casmo-data'):
if not os.path.exists(directory):
os.makedirs(directory)
f = h5py.File(directory + '/' + assembly_name + '-all-materials.hdf5','w')
f.attrs['Energy Groups'] = self._energy_groups
for region in range(self._num_micro_regions):
material = f.create_group('microregion-' + str((region + 1)))
material.create_dataset('Total XS', data=self._sigt[region, :])
material.create_dataset('Absorption XS', data=self._siga[region, :])
material.create_dataset('Fission XS', data=self._sigf[region, :])
material.create_dataset('Nu Fission XS', data=self._signf[region, :])
material.create_dataset('Scattering XS', data=numpy.ravel(self._sigs[region, :, :]))
material.create_dataset('Dif Coefficient', data=self._sigd[region, :])
material.create_dataset('Chi', data=self._chi[region, :])
f.close()
##
# @brief This method exports average cross sectional arrays contained within
# member variables of the Casmo object to an hdf5 data file
# @param assembly_name name of assembly for materials being exported
# @param directory directory where hdf5 data file will be stored
def exportAvgXSToHDF5(self, assembly_name, directory = 'casmo-data'):
#check if cross sections have been computed
if len(self._average_cross_sections) == 0:
log.py_printf('WARNING', 'Average Cross Sections do not exist. Call'
' averageXSGenerator to compute them.')
else:
#create/set directory in which to store hdf5 file
if not os.path.exists(directory):
os.makedirs(directory)
f = h5py.File(directory + '/' + assembly_name + '-avg-materials.hdf5','w')
f.attrs['Energy Groups'] = self._energy_groups
#create an hdf5 dataset to store each average cross section
for material in self._average_cross_sections.keys():
material_group = f.create_group(material)
for xs_type in self._average_cross_sections[material].keys():
material_group.create_dataset(xs_type,data=self._average_cross_sections[material][xs_type])
f.close()
##
# @brief This method determines the average materials based on average cross
# parsed from the output file
def averageXSGenerator(self):
materials = ['fuel','water','cladding','helium']
#check for burnable poisons
if 'b' in self._string_cell_type_array:
materials.extend(['bp','ss304'])
#create dictionary of variables
variable_dict = {'Absorption XS':self._siga,'Dif Coefficient':self._sigd,
'Total XS':self._sigt,'Fission XS':self._sigf,'Nu Fission XS':self._signf,
'Scattering XS':self._sigs,'Chi':self._chi}
#create dictionary of values
val_dict = {}
#compute average cross section for each material
for material in materials:
val_dict[material] = {}
for xs_type in variable_dict.keys():
val_dict[material][xs_type] = []
for i in range(len(self._string_cell_type_array)):
for j in range(len(self._string_cell_type_array[i])):
for xs_type in variable_dict.keys():
#if pin cell is guide tube
if self._string_cell_type_array[i][j]=='g':
val_dict['water'][xs_type].append(variable_dict[xs_type][self._min_microregions[i][j]-1])
val_dict['cladding'][xs_type].append(variable_dict[xs_type][self._min_microregions[i][j]])
for k in range(self._min_microregions[i][j]+1,self._max_microregions[i][j]):
val_dict['water'][xs_type].append(variable_dict[xs_type][k])
#if pin cell is fuel
elif self._string_cell_type_array[i][j]=='f':
val_dict['fuel'][xs_type].append(variable_dict[xs_type][self._min_microregions[i][j]-1])
val_dict['helium'][xs_type].append(variable_dict[xs_type][self._min_microregions[i][j]])
val_dict['cladding'][xs_type].append(variable_dict[xs_type][self._min_microregions[i][j]+1])
for k in range(self._min_microregions[i][j]+2,self._max_microregions[i][j]):
val_dict['water'][xs_type].append(variable_dict[xs_type][k])
#if pin cell is burnable poison
elif self._string_cell_type_array[i][j]=='b':
val_dict['helium'][xs_type].append(variable_dict[xs_type][self._min_microregions[i][j]-1])
val_dict['ss304'][xs_type].append(variable_dict[xs_type][self._min_microregions[i][j]])
val_dict['helium'][xs_type].append(variable_dict[xs_type][self._min_microregions[i][j]+1])
val_dict['bp'][xs_type].append(variable_dict[xs_type][self._min_microregions[i][j]+2])
val_dict['helium'][xs_type].append(variable_dict[xs_type][self._min_microregions[i][j]+3])
val_dict['ss304'][xs_type].append(variable_dict[xs_type][self._min_microregions[i][j]+4])
val_dict['water'][xs_type].append(variable_dict[xs_type][self._min_microregions[i][j]+5])
val_dict['cladding'][xs_type].append(variable_dict[xs_type][self._min_microregions[i][j]+6])
for k in range(self._min_microregions[i][j]+7,self._max_microregions[i][j]):
val_dict['water'][xs_type].append(variable_dict[xs_type][k])
avg_dict = {}
#add avg cross sections to dictionary
for material in materials:
avg_dict[material] = {}
for xs_type in variable_dict.keys():
avg_dict[material][xs_type] = []
for group in range(self._energy_groups):
numerator = sum([e[group] for e in val_dict[material][xs_type]])
denominator = float(len(val_dict[material][xs_type]))
if xs_type == 'Scattering XS':
avg_dict[material][xs_type].extend(numerator/denominator)
else:
avg_dict[material][xs_type].append(numerator/denominator)
self._average_cross_sections = avg_dict
|
{"hexsha": "e18058b718c694eb84a9ffb485e1cd13f6f45e10", "size": 32974, "ext": "py", "lang": "Python", "max_stars_repo_path": "openmoc/compatible/casmo.py", "max_stars_repo_name": "AI-Pranto/OpenMOC", "max_stars_repo_head_hexsha": "7f6ce4797aec20ddd916981a56a4ba54ffda9a06", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 97, "max_stars_repo_stars_event_min_datetime": "2015-01-02T02:13:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T14:12:45.000Z", "max_issues_repo_path": "openmoc/compatible/casmo.py", "max_issues_repo_name": "AI-Pranto/OpenMOC", "max_issues_repo_head_hexsha": "7f6ce4797aec20ddd916981a56a4ba54ffda9a06", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 325, "max_issues_repo_issues_event_min_datetime": "2015-01-07T17:43:14.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T17:22:00.000Z", "max_forks_repo_path": "openmoc/compatible/casmo.py", "max_forks_repo_name": "AI-Pranto/OpenMOC", "max_forks_repo_head_hexsha": "7f6ce4797aec20ddd916981a56a4ba54ffda9a06", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 73, "max_forks_repo_forks_event_min_datetime": "2015-01-17T19:11:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T16:31:37.000Z", "avg_line_length": 35.7248104009, "max_line_length": 104, "alphanum_fraction": 0.6643719294, "include": true, "reason": "import numpy", "num_tokens": 8437}
|
function fig()
figure
|
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/18016-video-demonstration-of-how-to-use-matlab-excel-builder/fig.m"}
|
!------------------------------------------------------------------------!
! The Community Multiscale Air Quality (CMAQ) system software is in !
! continuous development by various groups and is based on information !
! from these groups: Federal Government employees, contractors working !
! within a United States Government contract, and non-Federal sources !
! including research institutions. These groups give the Government !
! permission to use, prepare derivative works of, and distribute copies !
! of their work in the CMAQ system to the public and to permit others !
! to do so. The United States Environmental Protection Agency !
! therefore grants similar permission to use the CMAQ system software, !
! but users are requested to provide copies of derivative works or !
! products designed to operate in the CMAQ system to the United States !
! Government without restrictions as to use by others. Software !
! that is used with the CMAQ system but distributed under the GNU !
! General Public License or the GNU Lesser General Public License is !
! subject to their copyright restrictions. !
!------------------------------------------------------------------------!
C RCS file, release, date & time of last delta, author, state, [and locker]
C $Header: /project/work/rep/STENEX/src/noop_f90/noop_global_sum_module.f,v 1.1.1.1 2000/04/12 17:40:55 yoj Exp $
C what(1) key, module and SID; SCCS file; date and time of last delta:
C %W% %P% %G% %U%
C --------------------------------------------------------------------------
C Purpose:
C
C use F90 interface feature to achieve "faked" polymorphism for noop global
C sum routine
C
C Revision history:
C
C Orginal version: 11/05/99 by David Wong
C -----------------------------------------------------------------------------
module noop_global_sum_module
implicit none
interface noop_global_sum
module procedure noop_global_isum, noop_global_rsum
end interface
contains
C -----------------------------------------------------------------------------
C Purpose: a noop counter part of se_global_isum which determine the global
C integer sum
C
C Revision history:
C
C Orginal version: 11/16/98 by David Wong
C 11/05/99 by David Wong
C -- recode using F90 syntax
C
C Parameter List:
C
C In: var -- sum variable
C -----------------------------------------------------------------------------
function noop_global_isum (var) result (noop_global_isum_result)
implicit none
integer, intent(in) :: var
integer :: noop_global_isum_result
noop_global_isum_result = var
end function noop_global_isum
C -----------------------------------------------------------------------------
C Purpose: a noop counter part of se_global_rsum which determine the global
C real sum
C
C Revision history:
C
C Orginal version: 11/16/98 by David Wong
C 11/05/99 by David Wong
C -- recode using F90 syntax
C
C Parameter List:
C
C In: var -- sum variable
C -----------------------------------------------------------------------------
function noop_global_rsum (var) result (noop_global_rsum_result)
implicit none
real, intent(in) :: var
real :: noop_global_rsum_result
noop_global_rsum_result = var
end function noop_global_rsum
end module noop_global_sum_module
|
{"hexsha": "e2e2c2c840c8459d2c6b39865e242875949a835d", "size": 3577, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "CCTM/src/STENEX/noop/noop_global_sum_module.f", "max_stars_repo_name": "Simeng-unique/CMAQ-changed", "max_stars_repo_head_hexsha": "cb83401728ed7ea1bb19a6986c0acc84dabe11a4", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 203, "max_stars_repo_stars_event_min_datetime": "2017-02-04T18:01:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T09:09:00.000Z", "max_issues_repo_path": "CCTM/src/STENEX/noop/noop_global_sum_module.f", "max_issues_repo_name": "Simeng-unique/CMAQ-changed", "max_issues_repo_head_hexsha": "cb83401728ed7ea1bb19a6986c0acc84dabe11a4", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 54, "max_issues_repo_issues_event_min_datetime": "2017-01-03T21:40:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-04T19:03:53.000Z", "max_forks_repo_path": "CCTM/src/STENEX/noop/noop_global_sum_module.f", "max_forks_repo_name": "Simeng-unique/CMAQ-changed", "max_forks_repo_head_hexsha": "cb83401728ed7ea1bb19a6986c0acc84dabe11a4", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 170, "max_forks_repo_forks_event_min_datetime": "2016-11-09T22:30:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T03:21:59.000Z", "avg_line_length": 35.77, "max_line_length": 113, "alphanum_fraction": 0.5672351132, "num_tokens": 745}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.