text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
!> \brief \b DLASSQ updates a sum of squares represented in scaled form.
!
! =========== DOCUMENTATION ===========
!
! Online html documentation available at
! http://www.netlib.org/lapack/explore-html/
!
!> \htmlonly
!> Download DLASSQ + dependencies
!> <a href="http://www.netlib.org/cgi-bin/netlibfiles.tgz?format=tgz&filename=/lapack/lapack_routine/dlassq.f90">
!> [TGZ]</a>
!> <a href="http://www.netlib.org/cgi-bin/netlibfiles.zip?format=zip&filename=/lapack/lapack_routine/dlassq.f90">
!> [ZIP]</a>
!> <a href="http://www.netlib.org/cgi-bin/netlibfiles.txt?format=txt&filename=/lapack/lapack_routine/dlassq.f90">
!> [TXT]</a>
!> \endhtmlonly
!
! Definition:
! ===========
!
! SUBROUTINE DLASSQ( N, X, INCX, SCALE, SUMSQ )
!
! .. Scalar Arguments ..
! INTEGER INCX, N
! DOUBLE PRECISION SCALE, SUMSQ
! ..
! .. Array Arguments ..
! DOUBLE PRECISION X( * )
! ..
!
!
!> \par Purpose:
! =============
!>
!> \verbatim
!>
!> DLASSQ returns the values scl and smsq such that
!>
!> ( scl**2 )*smsq = x( 1 )**2 +...+ x( n )**2 + ( scale**2 )*sumsq,
!>
!> where x( i ) = X( 1 + ( i - 1 )*INCX ). The value of sumsq is
!> assumed to be non-negative.
!>
!> scale and sumsq must be supplied in SCALE and SUMSQ and
!> scl and smsq are overwritten on SCALE and SUMSQ respectively.
!>
!> If scale * sqrt( sumsq ) > tbig then
!> we require: scale >= sqrt( TINY*EPS ) / sbig on entry,
!> and if 0 < scale * sqrt( sumsq ) < tsml then
!> we require: scale <= sqrt( HUGE ) / ssml on entry,
!> where
!> tbig -- upper threshold for values whose square is representable;
!> sbig -- scaling constant for big numbers; \see la_constants.f90
!> tsml -- lower threshold for values whose square is representable;
!> ssml -- scaling constant for small numbers; \see la_constants.f90
!> and
!> TINY*EPS -- tiniest representable number;
!> HUGE -- biggest representable number.
!>
!> \endverbatim
!
! Arguments:
! ==========
!
!> \param[in] N
!> \verbatim
!> N is INTEGER
!> The number of elements to be used from the vector x.
!> \endverbatim
!>
!> \param[in] X
!> \verbatim
!> X is DOUBLE PRECISION array, dimension (1+(N-1)*abs(INCX))
!> The vector for which a scaled sum of squares is computed.
!> x( i ) = X( 1 + ( i - 1 )*INCX ), 1 <= i <= n.
!> \endverbatim
!>
!> \param[in] INCX
!> \verbatim
!> INCX is INTEGER
!> The increment between successive values of the vector x.
!> If INCX > 0, X(1+(i-1)*INCX) = x(i) for 1 <= i <= n
!> If INCX < 0, X(1-(n-i)*INCX) = x(i) for 1 <= i <= n
!> If INCX = 0, x isn't a vector so there is no need to call
!> this subroutine. If you call it anyway, it will count x(1)
!> in the vector norm N times.
!> \endverbatim
!>
!> \param[in,out] SCALE
!> \verbatim
!> SCALE is DOUBLE PRECISION
!> On entry, the value scale in the equation above.
!> On exit, SCALE is overwritten with scl , the scaling factor
!> for the sum of squares.
!> \endverbatim
!>
!> \param[in,out] SUMSQ
!> \verbatim
!> SUMSQ is DOUBLE PRECISION
!> On entry, the value sumsq in the equation above.
!> On exit, SUMSQ is overwritten with smsq , the basic sum of
!> squares from which scl has been factored out.
!> \endverbatim
!
! Authors:
! ========
!
!> \author Edward Anderson, Lockheed Martin
!
!> \par Contributors:
! ==================
!>
!> Weslley Pereira, University of Colorado Denver, USA
!> Nick Papior, Technical University of Denmark, DK
!
!> \par Further Details:
! =====================
!>
!> \verbatim
!>
!> Anderson E. (2017)
!> Algorithm 978: Safe Scaling in the Level 1 BLAS
!> ACM Trans Math Softw 44:1--28
!> https://doi.org/10.1145/3061665
!>
!> Blue, James L. (1978)
!> A Portable Fortran Program to Find the Euclidean Norm of a Vector
!> ACM Trans Math Softw 4:15--23
!> https://doi.org/10.1145/355769.355771
!>
!> \endverbatim
!
!> \ingroup OTHERauxiliary
!
! =====================================================================
subroutine DLASSQ( n, x, incx, scl, sumsq )
use LA_CONSTANTS, &
only: wp=>dp, zero=>dzero, one=>done, &
sbig=>dsbig, ssml=>dssml, tbig=>dtbig, tsml=>dtsml
use LA_XISNAN
!
! -- LAPACK auxiliary routine --
! -- LAPACK is a software package provided by Univ. of Tennessee, --
! -- Univ. of California Berkeley, Univ. of Colorado Denver and NAG Ltd..--
!
! .. Scalar Arguments ..
integer :: incx, n
real(wp) :: scl, sumsq
! ..
! .. Array Arguments ..
real(wp) :: x(*)
! ..
! .. Local Scalars ..
integer :: i, ix
logical :: notbig
real(wp) :: abig, amed, asml, ax, ymax, ymin
! ..
!
! Quick return if possible
!
if( LA_ISNAN(scl) .or. LA_ISNAN(sumsq) ) return
if( sumsq == zero ) scl = one
if( scl == zero ) then
scl = one
sumsq = zero
end if
if (n <= 0) then
return
end if
!
! Compute the sum of squares in 3 accumulators:
! abig -- sums of squares scaled down to avoid overflow
! asml -- sums of squares scaled up to avoid underflow
! amed -- sums of squares that do not require scaling
! The thresholds and multipliers are
! tbig -- values bigger than this are scaled down by sbig
! tsml -- values smaller than this are scaled up by ssml
!
notbig = .true.
asml = zero
amed = zero
abig = zero
ix = 1
if( incx < 0 ) ix = 1 - (n-1)*incx
do i = 1, n
ax = abs(x(ix))
if (ax > tbig) then
abig = abig + (ax*sbig)**2
notbig = .false.
else if (ax < tsml) then
if (notbig) asml = asml + (ax*ssml)**2
else
amed = amed + ax**2
end if
ix = ix + incx
end do
!
! Put the existing sum of squares into one of the accumulators
!
if( sumsq > zero ) then
ax = scl*sqrt( sumsq )
if (ax > tbig) then
! We assume scl >= sqrt( TINY*EPS ) / sbig
abig = abig + (scl*sbig)**2 * sumsq
else if (ax < tsml) then
! We assume scl <= sqrt( HUGE ) / ssml
if (notbig) asml = asml + (scl*ssml)**2 * sumsq
else
amed = amed + scl**2 * sumsq
end if
end if
!
! Combine abig and amed or amed and asml if more than one
! accumulator was used.
!
if (abig > zero) then
!
! Combine abig and amed if abig > 0.
!
if (amed > zero .or. LA_ISNAN(amed)) then
abig = abig + (amed*sbig)*sbig
end if
scl = one / sbig
sumsq = abig
else if (asml > zero) then
!
! Combine amed and asml if asml > 0.
!
if (amed > zero .or. LA_ISNAN(amed)) then
amed = sqrt(amed)
asml = sqrt(asml) / ssml
if (asml > amed) then
ymin = amed
ymax = asml
else
ymin = asml
ymax = amed
end if
scl = one
sumsq = ymax**2*( one + (ymin/ymax)**2 )
else
scl = one / ssml
sumsq = asml
end if
else
!
! Otherwise all values are mid-range or zero
!
scl = one
sumsq = amed
end if
return
end subroutine
|
{"hexsha": "fddd1bf38f0958891627f2bf452fb6829401ab0a", "size": 7189, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "SRC/dlassq.f90", "max_stars_repo_name": "quellyn/lapack", "max_stars_repo_head_hexsha": "79aa0f2e0641cd48b27c7fc9a96922bf033193fa", "max_stars_repo_licenses": ["BSD-3-Clause-Open-MPI"], "max_stars_count": 998, "max_stars_repo_stars_event_min_datetime": "2016-05-04T20:50:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T17:43:13.000Z", "max_issues_repo_path": "SRC/dlassq.f90", "max_issues_repo_name": "quellyn/lapack", "max_issues_repo_head_hexsha": "79aa0f2e0641cd48b27c7fc9a96922bf033193fa", "max_issues_repo_licenses": ["BSD-3-Clause-Open-MPI"], "max_issues_count": 603, "max_issues_repo_issues_event_min_datetime": "2016-06-04T07:28:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-21T21:16:50.000Z", "max_forks_repo_path": "SRC/dlassq.f90", "max_forks_repo_name": "quellyn/lapack", "max_forks_repo_head_hexsha": "79aa0f2e0641cd48b27c7fc9a96922bf033193fa", "max_forks_repo_licenses": ["BSD-3-Clause-Open-MPI"], "max_forks_count": 373, "max_forks_repo_forks_event_min_datetime": "2016-06-04T06:05:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T17:25:45.000Z", "avg_line_length": 28.3031496063, "max_line_length": 113, "alphanum_fraction": 0.5651690082, "num_tokens": 2334}
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 21 10:31:38 2021
@author: crisprhhx
"""
import os
import pandas as pd
import numpy as np
import tensorflow as tf
import keras
from keras import backend as K
from skimage import io
from keras.applications.vgg16 import VGG16
import matplotlib.pyplot as plt
from keras import Sequential
from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Dense,GlobalAveragePooling2D
def get_vgg16():
base_model = VGG16(weights=None,include_top=False,input_shape=(256,256,4))
globpool_layer = GlobalAveragePooling2D()
globpool_layer_tensor = globpool_layer(base_model.output)
densehidden_layer = Dense(256,activation='relu',name = 'featurelayer')
densehidden_layer_tensor = densehidden_layer(globpool_layer_tensor)
denseout_layer = Dense(2,activation='sigmoid')
denseout_layer_tensor = denseout_layer(densehidden_layer_tensor)
model = Model(inputs=base_model.input,outputs=denseout_layer_tensor)
for layer in model.layers:
layer.trainable = False
model.layers[-1].trainable = True
model.layers[-2].trainable = True
model.layers[-3].trainable = True
return model
def rescale(x, Max=1, Min=0):
x_std = (x-x.min())/(x.max() - x.min())
x_scaled = x_std * (Max - Min) + Min
return x_scaled
def data_PathWrapper(image_data_root, mask_data_root):
# returns a dataframe of patient's mri&mask and corresponding label.
data_map = []
for sub_dir in os.listdir(image_data_root):
try:
label = sub_dir
sub_dir_path = image_data_root + sub_dir
for patiendfolder in os.listdir(sub_dir_path):
for imagefile in os.listdir(sub_dir_path+'/'+patiendfolder+'/'):
image_path = sub_dir_path+'/'+patiendfolder+'/'+imagefile
mask_path = mask_data_root+label+'/'+patiendfolder+'/'+imagefile[:-4]+'_mask.txt'
data_map.extend([patiendfolder, image_path, mask_path, label])
except Exception as e:
print(e)
#b = a[i:j:s],这里的s表示步进.很奇怪,可能是因为dataframe的输入是字典,而字典需要通过这样的切片方式来操作吧
df = pd.DataFrame({"patient_id":data_map[::4],
"image_path":data_map[1::4],
"mask_path":data_map[2::4],
"label":data_map[3::4]})
return df
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, img_path_list, mask_path_list, label_list, batch_size = 10, img_h = 256, img_w = 256, shuffle = True):
self.img_path_list = img_path_list
self.mask_path_list = mask_path_list
self.label_list = label_list
self.batch_size = batch_size
self.img_h = img_h
self.img_w = img_w
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Get the number of batches per epoch'
return int(np.floor(len(self.img_path_list)) / self.batch_size) #it should be 4.
def on_epoch_end(self):
'Used for updating the indices after each epoch, once at the beginning as well as at the end of each epoch'
#getting the array of indices based on the input dataframe
self.indexes = np.arange(len(self.img_path_list))
if self.shuffle:
np.random.shuffle(self.indexes)
def __getitem__(self, index):
'''
凡是在类中定义了这个__getitem__ 方法,那么它的实例对象(假定为p),可以像这样:
p[key] 取值,当实例对象做p[key] 运算时,会调用类中的方法__getitem__。
一般如果想使用索引访问元素时,就可以在类中定义这个方法: __getitem__(self, key)
https://blog.csdn.net/chituozha5528/article/details/78354833
'''
#generate index of batch_size length
indexes = self.indexes[index* self.batch_size : (index+1) * self.batch_size]
#get the Image path corresponding to the indexes created above based on batch size
list_imgs = [self.img_path_list[i] for i in indexes]
#get the Mask path corresponding to the indexes created above based on batch size
list_masks = [self.mask_path_list[i] for i in indexes]
#get the corrsponding labels
list_labels = [self.label_list[i] for i in indexes]
#generate data for X and y
X, y = self.__data_generation(list_imgs, list_masks, list_labels)
return X, y
def __data_generation(self, list_imgs, list_masks, list_labels):
"generate the data corresponding the indexes in a given batch of images"
# create empty arrays of shape (batch_size,height,width,depth)
#Depth is 4 for input(3 for the RGB channels and 1 for the mask concatenated)
#and depth is taken as 1 for output becasue mask consist only of 1 channel.
X = np.empty((self.batch_size, self.img_h, self.img_w, 4))
y = []
#iterate through the data rows
for i in range(len(list_imgs)):
#get the corresponding path
img_path = list_imgs[i]
mask_path = list_masks[i]
#get raw data
img = io.imread(img_path)
mask = np.loadtxt(mask_path)
label = float(list_labels[i])
#convert img to numpy array of type float64
img = np.array(img, dtype = np.float64)
img = rescale(img)
mask = np.expand_dims(mask, axis = -1)
#>>>>>DEBUG>>>>>
#打印一下img array的值的分布是不是0-255
#打印一下mask array的分布是不是0-255
#concatenate之后的结果,要不要归一化
#VGG16的输入是[0,1]还是[-1,1],有没有其他要求
#standardising
img -= img.mean()
img /= img.std()
mask -= mask.mean()
mask /= mask.std()
#concatenate the image and mask
data_concat = np.concatenate((img, mask), axis = -1)
#<<<<<DEBUG<<<<
X[i,] = data_concat
y.append(label)
y = np.array(y)
y = tf.one_hot(y, depth=2)
return X, y
|
{"hexsha": "2af5933b831504190672cc6d90ccd940708447fd", "size": 6231, "ext": "py", "lang": "Python", "max_stars_repo_path": "xiangyaMedTask/Stage2/utils.py", "max_stars_repo_name": "satoshiSchubert/WorkSpace", "max_stars_repo_head_hexsha": "5558b3573e6b897b6684240ea5497cf08ae35145", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "xiangyaMedTask/Stage2/utils.py", "max_issues_repo_name": "satoshiSchubert/WorkSpace", "max_issues_repo_head_hexsha": "5558b3573e6b897b6684240ea5497cf08ae35145", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xiangyaMedTask/Stage2/utils.py", "max_forks_repo_name": "satoshiSchubert/WorkSpace", "max_forks_repo_head_hexsha": "5558b3573e6b897b6684240ea5497cf08ae35145", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5, "max_line_length": 125, "alphanum_fraction": 0.618199326, "include": true, "reason": "import numpy", "num_tokens": 1602}
|
from lenstronomy.LensModel.Optimizer.optimizer import Optimizer
import unittest
import numpy as np
import pytest
class TestSinglePlaneOptimizer(object):
np.random.seed(0)
x_pos_simple,y_pos_simple = np.array([ 0.69190974, -0.58959536, 0.75765166, -0.70329933]),\
np.array([-0.94251661, 1.01956872, 0.45230274, -0.43988017])
magnification_simple = [1., 0.9848458, 0.63069122, 0.54312452]
lens_model_list_simple = ['SPEP', 'SHEAR']
kwargs_lens_simple = [{'theta_E': 0.7, 'center_x': 0.0, 'center_y': 0, 'e1': 0.0185665252864011, 'gamma': 2.,
'e2': 0.08890716633399057}, {'gamma1': 0.00418890660015825, 'gamma2': -0.02908846518073248}]
lens_model_list_subs = lens_model_list_simple + ['NFW'] * 5
kwargs_lens_subs = kwargs_lens_simple + [{'alpha_Rs': 0.005, 'center_y': -0.82, 'center_x': 0.944, 'Rs': 0.13},
{'alpha_Rs': 0.003, 'center_y': -0.24, 'center_x': -1.8, 'Rs': 0.23},
{'alpha_Rs': 0.008, 'center_y': 0.44, 'center_x': -1.8, 'Rs': 0.33},
{'alpha_Rs': 0.0015, 'center_y': 1.04, 'center_x': 0.8, 'Rs': 0.2},
{'alpha_Rs': 0.011, 'center_y': -0.4, 'center_x': 0.18, 'Rs': 0.109}]
kwargs_lens_subs = kwargs_lens_subs
optimizer_simple = Optimizer(x_pos_simple, y_pos_simple, magnification_target=magnification_simple, redshift_list=[],
lens_model_list=lens_model_list_simple, kwargs_lens=kwargs_lens_simple, multiplane=False, verbose=True,
optimizer_routine='fixed_powerlaw_shear')
optimizer_subs = Optimizer(x_pos_simple, y_pos_simple, magnification_target=magnification_simple, redshift_list=[],
lens_model_list=lens_model_list_subs, kwargs_lens=kwargs_lens_subs, multiplane=False, verbose=True,
optimizer_routine='fixed_powerlaw_shear')
optimizer_image_plane = Optimizer(x_pos_simple, y_pos_simple, magnification_target=magnification_simple, redshift_list=[],
lens_model_list=lens_model_list_simple, kwargs_lens=kwargs_lens_simple, multiplane=False, verbose=True,
optimizer_routine='fixed_powerlaw_shear', chi2_mode='image', tol_image=0.006, pso_convergence_mean=100)
def test_single_plane_simple(self):
kwargs_lens, source, [x_image,y_image] = self.optimizer_simple.optimize(n_particles=30, n_iterations=30,restart=2)
mags = self.optimizer_simple._lensModel.magnification(x_image, y_image, kwargs_lens)
kwargs_lens, source, [x_image, y_image] = self.optimizer_simple.optimize(n_particles=5, n_iterations=1,
restart=1)
def test_single_plane_subs(self, tol=0.003, n_restart=2):
kwargs_lens, source, [x_image,y_image] = self.optimizer_subs.optimize(n_particles=30, n_iterations=30,restart=n_restart)
mags = self.optimizer_subs._lensModel.magnification(x_image, y_image, kwargs_lens)
def test_image_plane_chi2(self):
kwargs_lens, source, [x_image, y_image] = self.optimizer_image_plane.optimize(n_particles=20, n_iterations=150, restart=1)
class TestRaise(unittest.TestCase):
def test_raise(self):
test_class = TestSinglePlaneOptimizer()
with self.assertRaises(ValueError):
out = test_class.test_single_plane_subs(n_restart=0)
if __name__ == '__main__':
pytest.main()
|
{"hexsha": "e5b41e646da6e2054f6cd913956ee41163749987", "size": 3643, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_LensModel/test_Optimizer/test_single_plane.py", "max_stars_repo_name": "lucateo/lenstronomy", "max_stars_repo_head_hexsha": "3ab6cfd4adea2222f02d3f0f1a9cb5390c533aab", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_LensModel/test_Optimizer/test_single_plane.py", "max_issues_repo_name": "lucateo/lenstronomy", "max_issues_repo_head_hexsha": "3ab6cfd4adea2222f02d3f0f1a9cb5390c533aab", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_LensModel/test_Optimizer/test_single_plane.py", "max_forks_repo_name": "lucateo/lenstronomy", "max_forks_repo_head_hexsha": "3ab6cfd4adea2222f02d3f0f1a9cb5390c533aab", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.921875, "max_line_length": 136, "alphanum_fraction": 0.6415042547, "include": true, "reason": "import numpy", "num_tokens": 947}
|
%!TEX root = ../../main.tex
\subsection{Supervised deep anomaly detection}
\label{sec:supervisedDAD}
Supervised anomaly detection techniques are superior in performance compared to unsupervised anomaly detection techniques since these techniques use labeled samples.~\cite{gornitz2013toward}. Supervised anomaly detection learns the separating boundary from a set of annotated data instances (training) and then, classify a test instance into either normal or anomalous classes with the learned model (testing).\\
\textbf{Assumptions :}
Deep supervised learning methods depend on separating data classes whereas unsupervised
techniques focus on explaining and understanding the characteristics of data. Multi-class classification based anomaly detection techniques assume that the training data contains labeled instances of multiple normal classes ~\cite{shilton2013combined,jumutc2014multi,kim2015deep,erfani2017shared}. Multi-class anomaly detection techniques learn a classifier to distinguish between anomalous class from the rest of the classes. In general, supervised deep learning-based classification schemes for anomaly detection have two sub-networks, a feature extraction network followed by a classifier network. Deep models require extremely large number of training samples (in the order of thousands or millions) to effectively learn feature representations to discriminate various class instances. Due to, lack of availability of clean data labels supervised deep anomaly detection techniques are not so popular as semi-supervised and unsupervised methods.
\textbf{Computational Complexity :}
The computational complexity of deep supervised anomaly detection methods based techniques depends on the input data dimension and the number of hidden layers trained using back-propagation algorithm. High dimensional data tend to have more hidden layers to ensure meaning-full hierarchical learning of input features.The computational complexity also increases linearly with the number of hidden layers and require greater model training and update time.
\textbf{Advantages and Disadvantages :}
The advantages of supervised DAD techniques are as follows:
\begin{itemize}
\item Supervised DAD methods are more accurate than semi-supervised and unsupervised models.
\item The testing phase of classification based techniques is fast since each test instance
needs to be compared against the pre-computed model.
\end{itemize}
The disadvantages of Supervised DAD techniques are as follows:
\begin{itemize}
\item Multi-class supervised techniques require accurate labels for various normal classes and anomalous instances, which is often not available.
\item Deep supervised techniques fail to separate normal from anomalous data , if the feature space is highly complex and non-linear.
\end{itemize}
|
{"hexsha": "2913b12f77f24693ce8115568cde64d0ec316243", "size": 2833, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ARXIV_DAD_Survey/sections/models/supervised.tex", "max_stars_repo_name": "raghavchalapathy/Deep-Learning-for-Anomaly-Detection-A-Survey", "max_stars_repo_head_hexsha": "aa775990a4b23306885979c4ef8e8cb3ed00441b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 107, "max_stars_repo_stars_event_min_datetime": "2019-01-11T12:06:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-09T12:03:57.000Z", "max_issues_repo_path": "ARXIV_DAD_Survey/sections/models/supervised.tex", "max_issues_repo_name": "raghavchalapathy/Deep-Learning-for-Anomaly-Detection-A-Survey_Arxiv_WorkingDocument", "max_issues_repo_head_hexsha": "aa775990a4b23306885979c4ef8e8cb3ed00441b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ARXIV_DAD_Survey/sections/models/supervised.tex", "max_forks_repo_name": "raghavchalapathy/Deep-Learning-for-Anomaly-Detection-A-Survey_Arxiv_WorkingDocument", "max_forks_repo_head_hexsha": "aa775990a4b23306885979c4ef8e8cb3ed00441b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 27, "max_forks_repo_forks_event_min_datetime": "2019-01-15T02:42:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-06T07:59:29.000Z", "avg_line_length": 76.5675675676, "max_line_length": 949, "alphanum_fraction": 0.829509354, "num_tokens": 538}
|
import group_theory.quotient_group
import group_theory.order_of_element
import .simple_group .quotient_group
namespace subgroup
variables {G : Type*} [group G] [fintype G]
@[to_additive]
lemma card_pos : fintype.card G > 0 := fintype.card_pos_iff.mpr ⟨1⟩
variables {H : subgroup G} [decidable_pred (λ h, h ∈ H)]
@[to_additive]
lemma card_lt : H ≠ ⊤ → fintype.card H < fintype.card G :=
begin
contrapose!, intro h, apply eq_top_of_card_eq H (le_antisymm _ h),
apply fintype.card_subtype_le,
end
lemma eq_bot_of_card_eq_one : fintype.card H = 1 → H = ⊥ :=
λ h, le_antisymm (λ x hx, begin
rcases fintype.card_eq_one_iff.mp h with ⟨y, hy⟩, rw mem_bot,
simpa using (hy ⟨x, hx⟩).trans (hy ⟨(1 : G), H.one_mem⟩).symm,
end) bot_le
end subgroup
namespace add_subgroup
variables {G : Type*} [add_group G] [fintype G]
variables {H : add_subgroup G} [decidable_pred (λ h, h ∈ H)]
/- The to_additive attribute doesn't work in this case because it also changes the 1
in the conclusion to a 0 -/
lemma eq_bot_of_card_eq_one : fintype.card H = 1 → H = ⊥ :=
λ h, le_antisymm (λ x hx, begin
rcases fintype.card_eq_one_iff.mp h with ⟨y, hy⟩, rw mem_bot,
simpa using (hy ⟨x, hx⟩).trans (hy ⟨(0 : G), H.zero_mem⟩).symm,
end) bot_le
end add_subgroup
attribute [to_additive add_subgroup.eq_bot_of_card_eq_one] subgroup.eq_bot_of_card_eq_one
section add_lagrange
open add_subgroup
variables {α : Type*} [add_group α] [fintype α]
lemma card_eq_card_quotient_mul_card_add_subgroup (s : add_subgroup α) [fintype s]
[decidable_pred (λ a, a ∈ s)] :
fintype.card α = fintype.card (quotient_add_group.quotient s) * fintype.card s :=
by rw ← fintype.card_prod;
exact fintype.card_congr (add_subgroup.add_group_equiv_quotient_times_add_subgroup)
attribute [to_additive card_eq_card_quotient_mul_card_add_subgroup] card_eq_card_quotient_mul_card_subgroup
lemma card_add_subgroup_dvd_card (s : add_subgroup α) [fintype s] :
fintype.card s ∣ fintype.card α :=
by haveI := classical.prop_decidable; simp [card_eq_card_quotient_mul_card_add_subgroup s]
attribute [to_additive] card_subgroup_dvd_card
end add_lagrange
namespace quotient_add_group
open add_subgroup
variables {G : Type*} [add_group G] [fintype G]
variables {N : add_subgroup G} [add_subgroup.normal N]
[decidable_pred (λ a, a ∈ N)] [decidable_pred N.carrier]
lemma eq_bot_of_card_quotient_eq : fintype.card (quotient N) = fintype.card G → N = ⊥ :=
begin
intro h, rw card_eq_card_quotient_mul_card_add_subgroup N at h,
conv_lhs at h { rw ←nat.mul_one (fintype.card (quotient N)) },
apply add_subgroup.eq_bot_of_card_eq_one,
apply nat.eq_of_mul_eq_mul_left add_subgroup.card_pos h.symm,
end
end quotient_add_group
namespace quotient_group
variables {G : Type*} [group G] [fintype G]
variables {N : subgroup G} [subgroup.normal N] [decidable_pred (λ a, a ∈ N)] [decidable_pred N.carrier]
@[to_additive]
lemma eq_bot_of_card_quotient_eq : fintype.card (quotient N) = fintype.card G → N = ⊥ :=
begin
intro h, rw card_eq_card_quotient_mul_card_subgroup N at h,
conv_lhs at h { rw ←nat.mul_one (fintype.card (quotient N)) },
apply subgroup.eq_bot_of_card_eq_one,
apply nat.eq_of_mul_eq_mul_left subgroup.card_pos h.symm,
end
@[to_additive]
lemma card_quotient_lt :
N ≠ ⊥ → fintype.card (quotient N) < fintype.card G :=
begin
contrapose!, intro h, apply eq_bot_of_card_quotient_eq (le_antisymm _ h),
apply fintype.card_quotient_le,
end
end quotient_group
namespace fingroup
open fintype
@[to_additive add_fingroup.strong_rec_on_card]
def strong_rec_on_card (G : Type*) [group G] [fintype G]
{p : Π (G : Type*) [group G] [fintype G], Sort _} :
(Π (G : Type*) [group G] [fintype G],
(Π (H : Type*) [group H] [fintype H], by exactI card H < card G → p H) →
by exactI p G) →
p G :=
λ ih, suffices h : ∀ (n : ℕ) (G : Type*) [group G] [fintype G], by exactI card G = n → p G,
from h (card G) G rfl,
λ n, n.strong_rec_on $ begin
intros n ih' G, introsI _ _, intro hn,
apply ih G,
intro H, introsI _ _, intro hH,
exact ih' (card H) (hn ▸ hH) H rfl,
end
@[to_additive add_fingroup.strong_rec_on_card']
def strong_rec_on_card' (G : Group) [fintype G]
{p : Π (G : Group) [fintype G], Sort _} :
(Π (G : Group) [fintype G],
(Π (H : Group) [fintype H], by exactI card H < card G → p H) →
by exactI p G) →
p G :=
λ ih, suffices h : ∀ (n : ℕ) (G : Group) [fintype G], by exactI card G = n → p G,
from h (card G) G rfl,
λ n, n.strong_rec_on $ begin
intros n ih' G, introI, intro hn,
apply ih G, intro H, introI, intro hH,
exact ih' (card H) (hn ▸ hH) H rfl,
end
end fingroup
open add_subgroup
lemma add_subgroup.not_subsingleton_of_prime_card {G : Type*} [add_group G] [fintype G] :
nat.prime (fintype.card G) → ¬ subsingleton G :=
λ h1 h2,
have h : fintype.card G = 1 := fintype.card_eq_one_iff.mpr ⟨0, λ g, @subsingleton.elim _ h2 g 0⟩,
by { rw [h] at h1, exact nat.not_prime_one h1 }
local attribute [instance] classical.prop_decidable
lemma add_subgroup.is_simple_of_prime_card {G : Type*} [add_group G] [fintype G] :
nat.prime (fintype.card G) → is_simple_add G :=
λ h N _, begin
have hp := card_add_subgroup_dvd_card N,
rw nat.dvd_prime h at hp,
cases hp,
{ left, exact eq_bot_of_card_eq_one hp },
right, exact not_not.mp (not_imp_not.mpr card_lt (not_lt_of_ge $ ge_of_eq hp)),
end
namespace subgroup
open quotient_group
variables {G : Type*} [group G]
@[to_additive]
lemma not_subsingleton_of_prime_card [fintype G] : nat.prime (fintype.card G) → ¬ subsingleton G :=
λ h1 h2,
have h : fintype.card G = 1 := fintype.card_eq_one_iff.mpr ⟨1, λ g, @subsingleton.elim _ h2 g 1⟩,
by { rw [h] at h1, exact nat.not_prime_one h1 }
local attribute [instance] classical.prop_decidable
@[to_additive]
lemma is_simple_of_prime_card [fintype G] : nat.prime (fintype.card G) → is_simple G :=
λ h N _, begin
have hp := card_subgroup_dvd_card N,
rw nat.dvd_prime h at hp,
cases hp,
{ left, exact eq_bot_of_card_eq_one hp },
right, exact not_not.mp (not_imp_not.mpr card_lt (not_lt_of_ge $ ge_of_eq hp)),
end
@[to_additive]
lemma exists_maximal_normal_subgroup [fintype G] :
¬ subsingleton G → ∃ (N : subgroup G), maximal_normal_subgroup N :=
fingroup.strong_rec_on_card G begin
clear _inst_1 _inst_2 G, intro G, introsI _ _, intros ih hG,
by_cases h : is_simple G,
{ use [⊥, subgroup.bot_normal, λ h, hG (subsingleton_iff.mpr $ subsingleton_of_bot_eq_top h),
λ N hN _, h N hN] },
rcases not_is_simple.mp h with ⟨N, hN, hN'⟩, haveI := hN,
rcases ih (quotient N) (card_quotient_lt hN'.1)
(λ h, hN'.2 $ subsingleton_quotient_iff.mp h) with ⟨K, hK, hKtop, hKmax⟩,
use [comap (mk' N) K, hK.comap _,
comap_top (mk' N) ▸ (comap_injective (mk'_surjective N)).ne hKtop],
intro L, introI hL, intro hLK,
have hLK' := map_mono hLK, rw map_comap_eq (mk'_surjective N) at hLK',
have hNL : N ≤ L := le_trans le_comap_mk' hLK,
exact (@hKmax (map (mk' N) L) (map_mk'_normal hNL) hLK').imp
(λ h, le_antisymm ((gc_map_comap (mk' N)).le_u $ le_of_eq h) hLK)
(λ h, (map_mk'_eq_top hNL).mp h),
end
open quotient_group
@[to_additive]
lemma maximal_normal_subgroup_iff (N : subgroup G) [N.normal] :
maximal_normal_subgroup N ↔
is_simple (quotient N) ∧ ¬ subsingleton (quotient N) :=
⟨λ hN, ⟨begin
intro K, introI,
have : N ≤ comap (mk' N) K, { simp only [←ker_mk N], exact ker_le_comap },
refine (hN.2.2 (comap (mk' N) K) this).imp (λ h, _) (λ h, _),
{ conv_rhs at h { rw [←ker_mk N] }, exact comap_injective (mk'_surjective N) h },
{ rw [←comap_top (mk' N)] at h, exact comap_injective (mk'_surjective N) h },
end,
λ h, hN.2.1 (subsingleton_quotient_iff.mp h)⟩,
λ ⟨h1, h2⟩, ⟨infer_instance, λ h, h2 (subsingleton_quotient_iff.mpr h), begin
intro K, introI, intro hNK,
refine (h1 _ (map_mk'_normal hNK)).imp (λ h, _) (λ h, _),
{ exact le_antisymm (ker_mk N ▸ le_ker_iff_map.mpr h) hNK },
{ exact (map_mk'_eq_top hNK).mp h },
end⟩⟩
end subgroup
|
{"author": "AdrianDoM", "repo": "IMOinLEAN", "sha": "672faa5bc8dd42a26fb1540ad8b9a325362be361", "save_path": "github-repos/lean/AdrianDoM-IMOinLEAN", "path": "github-repos/lean/AdrianDoM-IMOinLEAN/IMOinLEAN-672faa5bc8dd42a26fb1540ad8b9a325362be361/src/jordanholder/fingroup.lean"}
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% CS624: Analysis of Algorithms
% Copyright 2015 Pejman Ghorbanzade <pejman@ghorbanzade.com>
% Creative Commons Attribution-ShareAlike 4.0 International License
% More info: https://github.com/ghorbanzade/beacon
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section*{Question 3}
Prove that a simple loop contains no edge more than one.
\subsection*{Solution}
By definition, the loop $v_0 \rightarrow v_1 \rightarrow \cdots \rightarrow v_k$ is simple if $k \geq 3$ and it contains no vertex more than once except for the first and last vertex which are the same and occur only twice.
Proof is thus given by contradiction.
We assume that a simple loop might contain the edge $v_i \rightarrow v_{i+1}$ more than once, where $0 \leq i < k$.
If $i \neq 0$, $v_i$ (and $v_{i+1}$) would occur twice while neither of them are first or last vertex.
Therefore, the assumption would violate definition of the simple loop.
If $i = 0$, the path would be of the form $v_i \rightarrow \cdots \rightarrow v_i \rightarrow \cdots \rightarrow v_k$.
In this case, since $v_0$ and $v_k$ are the same, $v_i$ will occur more than twice which again violates the definition.
Therefore the assumption is false and proof is complete.
|
{"hexsha": "ee38661d4ca6f6fd0ebaf53b02e355a814f64559", "size": 1309, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "umb-cs624-2015s/src/tex/hw03/hw03q03.tex", "max_stars_repo_name": "ghorbanzade/beacon", "max_stars_repo_head_hexsha": "c36e3d1909b9e1e47b1ad3cda81f7f33b713adc4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-11-13T20:00:10.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-01T11:16:51.000Z", "max_issues_repo_path": "umb-cs624-2015s/src/tex/hw03/hw03q03.tex", "max_issues_repo_name": "ghorbanzade/beacon", "max_issues_repo_head_hexsha": "c36e3d1909b9e1e47b1ad3cda81f7f33b713adc4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "umb-cs624-2015s/src/tex/hw03/hw03q03.tex", "max_forks_repo_name": "ghorbanzade/beacon", "max_forks_repo_head_hexsha": "c36e3d1909b9e1e47b1ad3cda81f7f33b713adc4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-09-20T05:58:32.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-06T17:18:05.000Z", "avg_line_length": 56.9130434783, "max_line_length": 223, "alphanum_fraction": 0.679144385, "num_tokens": 330}
|
#!/usr/bin/env python
# pylint: disable=wrong-import-position,too-many-statements
import os
import time
import traceback
from argparse import ArgumentParser
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from evaluation import Evaluation
from utils import load_checkpoint, load_model, logistic
def parse_arguments():
parser = ArgumentParser()
parser.add_argument("--eval-dir", type=str, required=True,
help="Directory of the evaluation to test (output)")
parser.add_argument("--model-name", type=str, required=True,
help="Name of the model to instantiate")
parser.add_argument("--epoch", type=int, required=True,
help="The epoch of the model to load")
parser.add_argument("--description", type=str, default=None,
help="An optional description of the images")
parser.add_argument("--image-count", type=int, default=1,
help="The number of images to generate")
parser.add_argument("--rows", type=int, default=8,
help="The number of rows to generate")
parser.add_argument("--columns", type=int, default=8,
help="The number of columns to generate")
parser.add_argument("--noise-dimensions", type=int, default=100,
help="The number of dimensions of the noise vector")
parser.add_argument("--search-samples", type=int, default=4,
help="The number of samples to generate at each search step")
parser.add_argument("--step-size", type=float,
help="The distance to move in the various directions")
parser.add_argument("--size-factor", type=float, default=0.9,
help="The factor by which the step size is multiplied after each iteration")
parser.add_argument("--colored", action="store_true",
help="Specify if the model generates colored output")
parser.add_argument("--discriminator-classes", type=int, default=1,
help="Specify the number of classes the discriminator is predicting")
return parser.parse_args()
def main(start_time):
tf.enable_eager_execution()
# handle arguments and config
args = parse_arguments()
args.start_time = start_time
tf.logging.info("Args: {}".format(args))
args.has_colored_target = args.colored
args.checkpoint_dir = os.path.join("output", args.eval_dir, "checkpoints")
model = load_model(args)
generator = model.get_generator()
discriminator = model.get_discriminator()
load_checkpoint(args, checkpoint_number=args.epoch//25, generator=generator, discriminator=discriminator)
gen_training = not False
disc_training = False
for image_number in range(args.image_count):
tf.logging.info("Generating image {}/{}".format(image_number+1, args.image_count))
plt.figure(figsize=(32, 32))
inputs = tf.random_normal([args.search_samples, args.noise_dimensions])
samples = generator(inputs, training=gen_training)
predictions = logistic(discriminator(samples, training=disc_training))
best_index = tf.argmax(predictions)
best_index = best_index.numpy() if best_index.shape else best_index
previous_prediction = predictions[best_index]
plt.subplot(args.rows, args.columns, 1)
Evaluation.plot_image(samples[best_index], np.round(predictions[best_index].numpy(), 5))
previous_direction = None
improvements = 0
best_input = inputs[best_index]
if args.step_size is not None:
current_step_size = args.step_size
for i in range(1, args.rows*args.columns):
tf.logging.info("Looking for image {}/{}, previous prediction: {}{}".format(
i+1, args.rows*args.columns, previous_prediction,
"" if args.step_size is None else ", step: {:.3f}".format(current_step_size)))
# get new possible directions to move
directions = tf.random_normal([args.search_samples, args.noise_dimensions], stddev=0.1)
if previous_direction is not None:
directions = tf.concat([[previous_direction], directions[1:, :]], axis=0)
# obtain new inputs by moving previous input into the various directions
lengths = [tf.norm(direction).numpy() for direction in directions]
tf.logging.debug("Direction lengths: {}".format(",".join([str(l) for l in lengths])))
inputs = tf.reshape(tf.tile(best_input, [args.search_samples]), (-1, args.noise_dimensions))
if args.step_size is None:
inputs = inputs + directions
else:
directions = [direction * current_step_size / tf.norm(direction) for direction in directions]
inputs = inputs + directions
# get new sampels and predictions
samples = generator(inputs, training=gen_training)
predictions = logistic(discriminator(samples, training=disc_training))
best_index = tf.argmax(predictions)
best_index = best_index.numpy() if best_index.shape else best_index
tf.logging.debug("Best previous input: {}, input at best position: {}, direction: {}".format(
best_input[0], inputs[best_index, 0], directions[best_index][0]))
if previous_direction is not None and best_index == 0:
tf.logging.info("Going into the same direction again!")
if predictions[best_index].numpy() > previous_prediction.numpy():
previous_prediction = predictions[best_index]
previous_direction = directions[best_index]
best_input = inputs[best_index]
plt.subplot(args.rows, args.columns, i+1)
Evaluation.plot_image(samples[best_index], np.round(predictions[best_index].numpy(), 5))
improvements += 1
else:
previous_direction = None
tf.logging.info("No improvement found")
if args.step_size is not None:
current_step_size *= args.size_factor
tf.logging.info("Improved the original image {} times ({:.1f}%)".format(
improvements, 100. * improvements / (args.rows*args.columns-1)))
plt.tight_layout()
figure_file = os.path.join("output", args.eval_dir, "samples{}_{:03d}.png".format(
"_{}".format(args.description) if args.description else "", image_number+1))
plt.savefig(figure_file)
plt.close()
tf.logging.info("Finished generating {} images".format(args.image_count))
if __name__ == "__main__":
START_TIME = time.time()
# np.random.seed(42)
tf.logging.set_verbosity(tf.logging.INFO)
try:
main(START_TIME)
except Exception as ex:
tf.logging.fatal("Exception occurred: {}".format(traceback.format_exc()))
finally:
tf.logging.info("Finished eval after {:.1f}m".format((time.time() - START_TIME) / 60))
|
{"hexsha": "5c3cadc4153f80dac39dfd20006c1aef0a25ea56", "size": 6481, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/find_good_sample.py", "max_stars_repo_name": "furgerf/GAN-for-dermatologic-imaging", "max_stars_repo_head_hexsha": "e90b06c46c7693e984a4c5b067e18460113cd23b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/find_good_sample.py", "max_issues_repo_name": "furgerf/GAN-for-dermatologic-imaging", "max_issues_repo_head_hexsha": "e90b06c46c7693e984a4c5b067e18460113cd23b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2020-09-26T01:22:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-22T18:00:52.000Z", "max_forks_repo_path": "src/find_good_sample.py", "max_forks_repo_name": "furgerf/GAN-for-dermatologic-imaging", "max_forks_repo_head_hexsha": "e90b06c46c7693e984a4c5b067e18460113cd23b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2802547771, "max_line_length": 107, "alphanum_fraction": 0.7102299028, "include": true, "reason": "import numpy", "num_tokens": 1440}
|
"""
Copyright (C) 2019 Patrick Schwab, ETH Zurich
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import sys
import numpy as np
from bisect import bisect_right
from sklearn.metrics import roc_auc_score, accuracy_score, f1_score, recall_score, confusion_matrix, roc_curve, \
precision_recall_curve, auc
class ModelEvaluation(object):
@staticmethod
def evaluate_semisupervised_dnn(model, generator, num_steps,
with_auxiliary_tasks=False,
use_extra_auxiliary=False,
threshold=-1):
batch_size = 0
y_pred, y_true = [], []
for _ in range(num_steps):
generator_outputs = next(generator)
if len(generator_outputs) == 3:
batch_input, labels_batch, sample_weight = generator_outputs
else:
batch_input, labels_batch = generator_outputs
model_outputs = model.predict(batch_input)
if with_auxiliary_tasks:
batch_size = len(labels_batch[0])
else:
batch_size = len(labels_batch)
y_pred.append(model_outputs)
y_true.append(labels_batch)
if with_auxiliary_tasks:
y_true = map(lambda x: x[0], y_true)
if not use_extra_auxiliary and with_auxiliary_tasks:
y_pred = map(lambda x: x[0], y_pred)
y_pred = np.vstack(y_pred)
if y_pred.shape[-1] == 1:
y_pred, y_true = np.squeeze(y_pred), np.hstack(y_true)
else:
y_pred, y_true = np.vstack(y_pred)[:, :-1], np.vstack(y_true)[:, :-1]
y_pred = y_pred[:, 0]
y_true = y_true[:, 0]
assert y_true.shape[-1] == y_pred.shape[-1]
assert y_true.shape[-1] == num_steps * batch_size
try:
auc_score = roc_auc_score(y_true, y_pred)
fpr, tpr, thresholds = roc_curve(y_true, y_pred)
if threshold == -1.0:
# Choose optimal threshold based on closest-to-top-left selection on ROC curve.
optimal_threshold_idx = np.argmin(np.linalg.norm(np.stack((fpr, tpr)).T -
np.repeat([[0., 1.]], fpr.shape[0], axis=0), axis=1))
threshold = thresholds[optimal_threshold_idx]
y_pred_thresholded = (y_pred > threshold).astype(np.int)
tn, fp, fn, tp = confusion_matrix(y_true, y_pred_thresholded).ravel()
sens_at_95spec_idx = bisect_right(fpr, 0.05)
if sens_at_95spec_idx == 0:
# Report 0.0 if specificity goal can not be met.
sens_at_95spec = 0.0
else:
sens_at_95spec = tpr[sens_at_95spec_idx - 1]
precision, recall, _ = precision_recall_curve(y_true, y_pred)
auprc_score = auc(recall, precision, reorder=False)
print("INFO: Validated with AUC =", auc_score,
", with AUPR =", auprc_score,
", with accuracy =", accuracy_score(y_true, y_pred_thresholded),
", with mean = ", np.mean(y_true),
", with f1 =", f1_score(y_true, y_pred_thresholded),
", with specificity =", float(tn) / (tn+fp),
", with sensitivity = ", recall_score(y_true, y_pred_thresholded),
", with sens@95spec = ", sens_at_95spec,
", and n = ", len(y_true),
", and threshold = ", threshold,
file=sys.stderr)
except:
print("WARN: Score calculation failed. Most likely, there was only one class present in y_true.",
file=sys.stderr)
auc_score = 0
return auc_score, threshold
|
{"hexsha": "a32872e7951b3d9eb12f87a35d1f4de81d91ce3b", "size": 4837, "ext": "py", "lang": "Python", "max_stars_repo_path": "dsmt_nets/model/model_evaluation.py", "max_stars_repo_name": "d909b/DSMTNets", "max_stars_repo_head_hexsha": "17518e7f3dd3150469081b07899b771312cb9e3b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-04-02T00:21:31.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T13:27:42.000Z", "max_issues_repo_path": "dsmt_nets/model/model_evaluation.py", "max_issues_repo_name": "d909b/DSMTNets", "max_issues_repo_head_hexsha": "17518e7f3dd3150469081b07899b771312cb9e3b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-08-06T20:49:35.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-07T02:00:31.000Z", "max_forks_repo_path": "dsmt_nets/model/model_evaluation.py", "max_forks_repo_name": "d909b/DSMTNets", "max_forks_repo_head_hexsha": "17518e7f3dd3150469081b07899b771312cb9e3b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-06-12T11:28:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-13T15:28:23.000Z", "avg_line_length": 43.5765765766, "max_line_length": 118, "alphanum_fraction": 0.6119495555, "include": true, "reason": "import numpy", "num_tokens": 1046}
|
import pytest
import pandas as pd
import numpy as np
# --------------------------------------------------------------------------- #
# TEST DATA MOCKS
# --------------------------------------------------------------------------- #
@pytest.fixture(scope="module")
def reprice_data():
dates = pd.date_range(
"2021-12-22",
"2021-12-24",
freq="D",
).astype(str)
dates = list(dates)
samples = [np.random.randint(25, 75) for _ in range(len(dates))]
return dict(Y=samples, X=dates)
@pytest.fixture
def reprice_dataframe(reprice_data):
return pd.DataFrame.from_dict(reprice_data, orient="columns")
|
{"hexsha": "36d6316959d05dac84306bd39fa36703b445737c", "size": 641, "ext": "py", "lang": "Python", "max_stars_repo_path": "digging-into-python-testing/conftest.py", "max_stars_repo_name": "Tincre/technical-content", "max_stars_repo_head_hexsha": "7e10a65c1f46013b63a9d56391b4a248d92329db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-31T05:20:30.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T05:20:30.000Z", "max_issues_repo_path": "digging-into-python-testing/conftest.py", "max_issues_repo_name": "Tincre/technical-content", "max_issues_repo_head_hexsha": "7e10a65c1f46013b63a9d56391b4a248d92329db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "digging-into-python-testing/conftest.py", "max_forks_repo_name": "Tincre/technical-content", "max_forks_repo_head_hexsha": "7e10a65c1f46013b63a9d56391b4a248d92329db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8695652174, "max_line_length": 79, "alphanum_fraction": 0.5007800312, "include": true, "reason": "import numpy", "num_tokens": 137}
|
#ifndef WAVE_TYPES_HPP
#define WAVE_TYPES_HPP
#include <Eigen/Eigen>
namespace wave {
template<typename T>
using Vec = std::vector<T>;
template<typename T>
using VecE = std::vector<T, Eigen::aligned_allocator<T>>;
}
#endif //WAVE_TYPES_HPP
|
{"hexsha": "cb6f51aa02855cc87d687ea7b7366c81a117e04e", "size": 246, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "wave_utils/include/wave/utils/types.hpp", "max_stars_repo_name": "Jebediah/libwave", "max_stars_repo_head_hexsha": "c04998c964f0dc7d414783c6e8cf989a2716ad54", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2019-06-13T13:47:18.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-13T14:54:35.000Z", "max_issues_repo_path": "wave_utils/include/wave/utils/types.hpp", "max_issues_repo_name": "Jebediah/libwave", "max_issues_repo_head_hexsha": "c04998c964f0dc7d414783c6e8cf989a2716ad54", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wave_utils/include/wave/utils/types.hpp", "max_forks_repo_name": "Jebediah/libwave", "max_forks_repo_head_hexsha": "c04998c964f0dc7d414783c6e8cf989a2716ad54", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-02-13T02:27:29.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-13T02:27:29.000Z", "avg_line_length": 14.4705882353, "max_line_length": 57, "alphanum_fraction": 0.743902439, "num_tokens": 61}
|
# Copyright(c) 2014, The LIMIX developers (Christoph Lippert, Paolo Francesco Casale, Oliver Stegle)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tables
import numpy as np
import time, os
import pandas as pd
from datetime import datetime
def _depth_first_hdf5(dictionary, outfile, root=None, filters=None):
"""
performs a depth first search on a dict object and creates a HDF5 outfile
structure that mirrors the dictionary.
Supported types for leaf nodes are numpy.ndarray, scalars, regular lists, and pandas.DataFrame
Args:
dictionary: dict
outfile: pytables HDF5 outfile
root: the current root node (default "/")
filters: filters for chunked storage
"""
if root is None:
root=outfile.root
for child_key in list(dictionary.keys()):
if isinstance(dictionary[child_key],dict):
child_group = outfile.create_group(root, child_key)
_depth_first_hdf5(dictionary[child_key], outfile=outfile, root=child_group, filters=filters)
elif np.isscalar(dictionary[child_key]):
leaf = outfile.create_array(where=root, name=child_key, obj=dictionary[child_key])
elif isinstance(dictionary[child_key],list):
leaf = outfile.create_array(where=root, name=child_key, obj=dictionary[child_key])
elif isinstance(dictionary[child_key],np.ndarray):
atom = tables.Atom.from_dtype(dictionary[child_key].dtype)
if filters is None:
leaf = outfile.create_array(where=root, name=child_key, atom=atom, shape=dictionary[child_key].shape, obj=dictionary[child_key])
else:
leaf = outfile.create_array(where=root, name=child_key, atom=atom, shape=dictionary[child_key].shape, obj=dictionary[child_key], filters=filters)
elif isinstance(dictionary[child_key],pd.core.frame.DataFrame):
#raise NotImplementedError("to columscome: DataFrames to HDF")
child_group = outfile.create_group(root, child_key) #The DataFrame is stored in a group of its own
index = np.array(dictionary[child_key].index,dtype=type(dictionary[child_key].index[0]))
leaf = outfile.create_array(where=child_group, name="index", obj=index) #Store the index
columns = np.array(dictionary[child_key].columns,dtype=type(dictionary[child_key].columns[0]))
leaf = outfile.create_array(where=child_group, name="columns", obj=columns) #store the columns
values = dictionary[child_key].values
leaf = outfile.create_array(where=child_group, name="values", obj=columns) #store the values
else:
raise IOError("unsupported IO type in output dictionary: "+str(type(dictionary)))
def _depth_first_text(dictionary, outdir=".", delimiter=" ",float_format="%.6e"):
"""
performs a depth first search on a dict object and creates a directory
structure that mirrors the dictionaries. The leafs of the dictionaries are text files
Supported types for leaf nodes are numpy.ndarray, scalars, regular lists and pandas.DataFrame
Args:
dictionary: dict
outdir: directory to write in
delimiter: delimiter for root node text files
float_format: default format for floating point outputs.
"""
for child_key in list(dictionary.keys()):
if isinstance(dictionary[child_key],dict):
child_path = os.path.join(outdir, child_key)
_depth_first_text(dictionary[child_key], outdir=child_path,delimiter=delimiter,float_format=float_format)
else:
if not os.path.exists(outdir):
os.makedirs(outdir)
filename = os.path.join(outdir, child_key)+".txt"
if np.isscalar(dictionary[child_key]):
file = open(filename, "w")
if type(dictionary[child_key])==float:
outstr=float_format % dictionary[child_key]
else:
outstr=str(dictionary[child_key])
file.write(outstr)
file.close()
elif isinstance(dictionary[child_key],list):
outarray=np.array(dictionary[child_key])
_write_txt_array(filename,array=outarray,delimiter=delimiter,float_format=float_format)
elif isinstance(dictionary[child_key],np.ndarray):
_write_txt_array(filename,array=dictionary[child_key],delimiter=delimiter,float_format=float_format)
elif isinstance(dictionary[child_key],pd.core.frame.DataFrame):
dictionary[child_key].to_csv(filename,sep=delimiter,index=True,header=True,na_rep="nan",float_format=float_format)
else:
raise IOError("unsupported IO type in output dictionary: "+str(type(dictionary)))
def _write_txt_array(filename, array, delimiter=" ",float_format="%.6f"):
"""
stores an array with unknown dtype to a text file.
Args:
filename: name of the text file
array: array to be saved in the text file
delimiter: delimiter seperating values in the text file
float_format: formating string for floating point values
"""
if array.dtype==np.float:
np.savetxt(filename,array,fmt=float_format,delimiter=delimiter)
elif array.dtype==np.integer:
np.savetxt(filename,array,fmt="%i",delimiter=delimiter)
elif array.dtype.type==np.string_:
np.savetxt(filename,array,fmt="%s",delimiter=delimiter)
else:
raise IOError("unsupported format")
class output_writer(object):
"""writes an output dictionary to disk"""
def __init__(self, output_dictionary,timestamp=None):
"""
Args:
output_dictionary: a dictionary holding either other dictionaries, numpy arrays, or scalars as members
timestamp: a timestamp from time.time() If None, the current time is used.
"""
self.output_dict = output_dictionary
self.filters = None
if timestamp is None:
self.timestamp = time.time()
else:
self.timestamp = timestamp
#here we could add filters for saving compressed chunked arrays:
#self.filters = tables.Filters(complib='blosc', complevel=5)
def get_timestamp(self):
"""
create a timestamp
Returns:
a string
"""
return str(datetime.fromtimestamp(self.timestamp))[0:10]+"_"+str(datetime.fromtimestamp(self.timestamp))[11:13]+"-"+str(datetime.fromtimestamp(self.timestamp))[14:16]+"-"+str(datetime.fromtimestamp(self.timestamp))[17:19]
def write_hdf5(self, filename, timestamp=False):
"""
Creates a HDF5 file that mirrors the dictionary.
Supported types for leaf nodes are numpy.ndarray, scalars, and regular lists
Args:
filename: name of the HDF5 output file
timestamp: Boolean indicator whether to append a timestap to the filename
"""
if timestamp:
stamp=self.get_timestamp()
if len(filename)>4 and filename[-4] == ".":
filename=filename[0:-4]+"_"+self.get_timestamp()+filename[-4:]
elif len(filename)>3 and filename[-3] == ".":
filename=filename[0:-3]+"_"+self.get_timestamp()+filename[-3:]
else:
filename=filename+self.get_timestamp()
outfile = tables.open_file(filename, mode = "w", title = "Output")
_depth_first_hdf5(self.output_dict,outfile=outfile,root=outfile.root,filters=self.filters)
outfile.close()
pass
def write_txt(self, outdir=".", delimiter=" ",float_format="%.6f", timestamp=False):
"""
Creates a directory structure that mirrors the dictionary.
The leaf nodes are text files
Supported types for leaf nodes are numpy.ndarray, scalars, and regular lists
Args:
outdir: name of the HDF5 output file
delimiter: delimiter for values in text files
float_format: formating string for floating point values
timestamp: Boolean indicator whether to append a timestap to the filename
"""
if timestamp:
stamp=self.get_timestamp()
if outdir.endswith(".") or outdir=="" or outdir.endswith(".."):
outdir=os.path.join(outdir,self.get_timestamp())
else:
outdir=outdir+"_"+self.get_timestamp()
_depth_first_text(dictionary=self.output_dict, outdir=outdir, delimiter=delimiter,float_format=float_format)
pass
if __name__ == "__main__":
print(("last modified: %s" % time.ctime(os.path.getmtime(__file__))))
di={"a":{"B":np.ones((5,5)),"C":1},"B":["dfd","f"]}
writer = output_writer(output_dictionary=di)
writer.write_hdf5("test.h5",timestamp=True)
writer.write_txt("test",timestamp=True)
|
{"hexsha": "d642730c50d6b0cab51ee6899fe6558ab1e15712", "size": 9550, "ext": "py", "lang": "Python", "max_stars_repo_path": "svca_limix/limix/io/output_writer.py", "max_stars_repo_name": "DenisSch/svca", "max_stars_repo_head_hexsha": "bd029c120ca8310f43311253e4d7ce19bc08350c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 65, "max_stars_repo_stars_event_min_datetime": "2015-01-20T20:46:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-27T14:40:35.000Z", "max_issues_repo_path": "svca_limix/limix/io/output_writer.py", "max_issues_repo_name": "DenisSch/svca", "max_issues_repo_head_hexsha": "bd029c120ca8310f43311253e4d7ce19bc08350c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2015-02-01T22:35:17.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-07T08:18:23.000Z", "max_forks_repo_path": "svca_limix/limix/io/output_writer.py", "max_forks_repo_name": "DenisSch/svca", "max_forks_repo_head_hexsha": "bd029c120ca8310f43311253e4d7ce19bc08350c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2015-02-01T17:26:50.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-13T07:06:16.000Z", "avg_line_length": 48.7244897959, "max_line_length": 229, "alphanum_fraction": 0.6521465969, "include": true, "reason": "import numpy", "num_tokens": 1969}
|
function score = Task1_Min_value(Population,~)
% <min> <single> <real/integer/label/binary/permutation> <large/none> <constrained/none> <expensive/none> <sparse/none> <multitask>
% The minimum objective value of the first task (for multitask optimization)
%------------------------------- Copyright --------------------------------
% Copyright (c) 2023 BIMK Group. You are free to use the PlatEMO for
% research purposes. All publications which use this platform or any code
% in the platform should acknowledge the use of "PlatEMO" and reference "Ye
% Tian, Ran Cheng, Xingyi Zhang, and Yaochu Jin, PlatEMO: A MATLAB platform
% for evolutionary multi-objective optimization [educational forum], IEEE
% Computational Intelligence Magazine, 2017, 12(4): 73-87".
%--------------------------------------------------------------------------
PopDec = Population.decs;
score = Population(PopDec(:,end)==1).best.objs;
if isempty(score)
score = nan;
end
end
|
{"author": "BIMK", "repo": "PlatEMO", "sha": "c5b5b7c37a9bb42689a5ac2a0d638d9c4f5693d5", "save_path": "github-repos/MATLAB/BIMK-PlatEMO", "path": "github-repos/MATLAB/BIMK-PlatEMO/PlatEMO-c5b5b7c37a9bb42689a5ac2a0d638d9c4f5693d5/PlatEMO/Metrics/Task1_Min_value.m"}
|
import pytest
import numpy as np
from discopy import Cup, Word
from discopy.quantum.circuit import Id
from lambeq import AtomicType, IQPAnsatz, SPSAOptimizer
N = AtomicType.NOUN
S = AtomicType.SENTENCE
ansatz = IQPAnsatz({N: 1, S: 1}, n_layers=1, n_single_qubit_params=1)
diagrams = [
ansatz((Word("Alice", N) @ Word("runs", N >> S) >> Cup(N, N.r) @ Id(S))),
ansatz((Word("Alice", N) @ Word("walks", N >> S) >> Cup(N, N.r) @ Id(S)))
]
from lambeq.training.model import Model
class ModelDummy(Model):
def __init__(self) -> None:
super().__init__()
self.initialise_weights()
def from_checkpoint():
pass
def _make_lambda(self, diagram):
return diagram.lambdify(*self.symbols)
def initialise_weights(self):
self.weights = np.array([1.,2.,3.])
def get_diagram_output(self):
pass
def forward(self, x):
return self.weights.sum()
loss = lambda yhat, y: np.abs(yhat-y).sum()**2
def test_init():
model = ModelDummy.from_diagrams(diagrams)
model.initialise_weights()
optim = SPSAOptimizer(model,
hyperparams={'a': 0.01, 'c': 0.1, 'A':0.001},
loss_fn= loss,
bounds=[[0, 10]]*len(model.weights))
assert optim.alpha
assert optim.gamma
assert optim.current_sweep
assert optim.A
assert optim.a
assert optim.c
assert optim.ak
assert optim.ck
assert optim.project
def test_backward():
np.random.seed(3)
model = ModelDummy.from_diagrams(diagrams)
model.initialise_weights()
optim = SPSAOptimizer(model,
hyperparams={'a': 0.01, 'c': 0.1, 'A':0.001},
loss_fn= loss,
bounds=[[0, 10]]*len(model.weights))
optim.backward(([diagrams[0]], np.array([0])))
assert np.array_equal(optim.gradient.round(5), np.array([12, 12, 0]))
assert np.array_equal(model.weights, np.array([1.,2.,3.]))
def test_step():
np.random.seed(3)
model = ModelDummy.from_diagrams(diagrams)
model.initialise_weights()
optim = SPSAOptimizer(model,
hyperparams={'a': 0.01, 'c': 0.1, 'A':0.001},
loss_fn= loss,
bounds=[[0, 10]]*len(model.weights))
step_counter = optim.current_sweep
optim.backward(([diagrams[0]], np.array([0])))
optim.step()
assert np.array_equal(model.weights.round(4), np.array([0.8801,1.8801,3.]))
assert optim.current_sweep == step_counter+1
assert round(optim.ak,5) == 0.00659
assert round(optim.ck,5) == 0.09324
def test_project():
np.random.seed(4)
model = ModelDummy.from_diagrams(diagrams)
model.weights = np.array([0, 10, 0])
optim = SPSAOptimizer(model,
hyperparams={'a': 0.01, 'c': 0.1, 'A':0.001},
loss_fn= loss,
bounds=[[0, 10]]*len(model.weights))
optim.backward((diagrams, np.array([0, 0])))
assert np.array_equal(
optim.gradient.round(1), np.array([80.4, 80.4, -80.4]))
def test_missing_field():
model = ModelDummy
with pytest.raises(KeyError):
_ = SPSAOptimizer(model=model,
hyperparams={},
loss_fn=loss)
def test_bound_error():
model = ModelDummy()
model.initialise_weights()
with pytest.raises(ValueError):
_ = SPSAOptimizer(model=model,
hyperparams={'a': 0.01, 'c': 0.1, 'A':0.001},
loss_fn=loss,
bounds=[[0, 10]]*(len(model.weights)-1))
def test_load_state_dict():
state_dict = {'A': 0.1,
'a': 0.2,
'c': 0.3,
'ak': 0.01,
'ck': 0.02,
'current_sweep': 10}
model = ModelDummy()
model.from_diagrams(diagrams)
model.initialise_weights()
optim = SPSAOptimizer(model,
hyperparams={'a': 0.01, 'c': 0.1, 'A':0.001},
loss_fn= loss)
optim.load_state_dict(state_dict)
assert optim.A == state_dict['A']
assert optim.a == state_dict['a']
assert optim.c == state_dict['c']
assert optim.ak == state_dict['ak']
assert optim.ck == state_dict['ck']
assert optim.current_sweep == state_dict['current_sweep']
|
{"hexsha": "6bb55653f7d851fe9f8d08dd0ca1b3b8d6269c22", "size": 4427, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/training/test_spsa_optimizer.py", "max_stars_repo_name": "CQCL/lambeq", "max_stars_repo_head_hexsha": "04e4f736552c1ed51087dc9913f33464fad3783e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 131, "max_stars_repo_stars_event_min_datetime": "2021-10-11T02:45:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T22:28:05.000Z", "max_issues_repo_path": "tests/training/test_spsa_optimizer.py", "max_issues_repo_name": "CQCL/lambeq", "max_issues_repo_head_hexsha": "04e4f736552c1ed51087dc9913f33464fad3783e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-11-04T10:56:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-04T11:20:20.000Z", "max_forks_repo_path": "tests/training/test_spsa_optimizer.py", "max_forks_repo_name": "CQCL/lambeq", "max_forks_repo_head_hexsha": "04e4f736552c1ed51087dc9913f33464fad3783e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2021-10-13T03:34:32.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T11:48:21.000Z", "avg_line_length": 33.0373134328, "max_line_length": 79, "alphanum_fraction": 0.5611023266, "include": true, "reason": "import numpy", "num_tokens": 1185}
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2020 by ShabaniPy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Routines used to study Shapiro steps.
"""
from typing import Tuple, Optional, Union
import numpy as np
from scipy.signal import find_peaks
from shabanipy.jj.utils import compute_voltage_offset
from . import shapiro_step
from .binning import bin_power_shapiro_steps
def correct_voltage_offset_per_power(
power: np.ndarray,
current: np.ndarray,
voltage: np.ndarray,
frequency: Union[float, np.ndarray],
n_peak_width: int,
n_std_as_bin: int,
bound: Optional[float] = None,
debug: bool = False,
):
"""Correct the voltage offset in a Shapiro map (power, current bias) at each power.
The correction assumes that the offset is smaller than half a Shapiro step and
works by realigning peaks to quantified values. It may fail in the presence of
strong fractional peaks.
Arrays are assumed to be in such a shape that the bias varies on the last dimension
and the power on the penultimate dimension.
Parameters
----------
power : np.ndarray
N+2D array containing the microwave power used in the Shapiro.
current : np.ndarray
N+2D array containing the bias current.
voltage : np.ndarray
N+2D array containing the measured voltage.
frequency : Union[float, np.ndarray]
float or ND array containing the frequency at which the experiment was carried
out.
n_peak_width : int
Number of peak width to remove when determining the superconducting region in a
VI curve see shabanipy.jj.utils.compute_voltage_offset
n_std_as_bin : int
Number of standard deviation (as determined from the superconducting plateau of
the lowest power measurement).
bound : Optional[float]
Bounds around midpoint to look for peaks (in uA)
debug : bool, optional
[description], by default False
Returns
-------
np.ndarray
N+2D array containing the corrected voltage.
"""
# Copy the data to preserve the original
new_voltage = np.copy(voltage)
# Iterate on the extra dimensions if any
it = np.nditer(power[..., 0, 0], ["multi_index"])
for b in it:
index = it.multi_index
# Compute the value of the Shapiro step
step = shapiro_step(
frequency if isinstance(frequency, float) else frequency[index]
)
# Those arrays are guaranteed to be 2D
p = power[index]
c = current[index]
v = new_voltage[index]
# Determine the noise on the data by looking at the zero resistance state
# of the lowest measurement power
lpower_index = np.argmin(p[:, 0])
_, std = compute_voltage_offset(
c[lpower_index, :], v[lpower_index, :], n_peak_width, bound
)
# Compute the step fraction to use when binning to get a high resolution
# histogram
step_fraction = n_std_as_bin * std / step
# Compute the histogram of the steps and get the voltage in unit of shapiro steps
# As a consequence steps are an interger value
volt_1d, histo = bin_power_shapiro_steps(p, c, v, frequency, step_fraction)
# Iterate over the line of the histo and find the peaks (ie Shapiro steps)
for j, h in enumerate(histo):
# Enforce that the peaks are at least of about 1 (ignore fractional steps)
# In some cases, height here may cause an issue (not large enough or too large)
peaks, _ = find_peaks(h, height=max(h)/2)
# Calculate deviation of each peak and average
dev = np.average([volt_1d[i] - round(volt_1d[i]) for i in peaks])
# Subctract the offset of each line
v[j] -= dev * step
return new_voltage
|
{"hexsha": "bb6e76407e327dcda06383623444eb90d9e85622", "size": 4128, "ext": "py", "lang": "Python", "max_stars_repo_path": "shabanipy/jj/shapiro/utils.py", "max_stars_repo_name": "ShabaniLab/DataAnalysis", "max_stars_repo_head_hexsha": "e234b7d0e4ff8ecc11e58134e6309a095abcd2c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-06-25T20:01:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T23:15:57.000Z", "max_issues_repo_path": "shabanipy/jj/shapiro/utils.py", "max_issues_repo_name": "ShabaniLab/DataAnalysis", "max_issues_repo_head_hexsha": "e234b7d0e4ff8ecc11e58134e6309a095abcd2c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "shabanipy/jj/shapiro/utils.py", "max_forks_repo_name": "ShabaniLab/DataAnalysis", "max_forks_repo_head_hexsha": "e234b7d0e4ff8ecc11e58134e6309a095abcd2c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-06-11T17:21:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-24T14:45:08.000Z", "avg_line_length": 35.5862068966, "max_line_length": 91, "alphanum_fraction": 0.6412306202, "include": true, "reason": "import numpy,from scipy", "num_tokens": 909}
|
from pywrap.testing import cython_extension_from
import os
import numpy as np
from numpy.testing import assert_array_equal
from nose.plugins.skip import SkipTest
from pywrap.type_conversion import AbstractTypeConverter
from pywrap.defaultconfig import Config
from pywrap.utils import lines
def test_convert_vector():
eigen3_incdir = "/usr/include/eigen3"
if not os.path.exists(eigen3_incdir):
raise SkipTest("Eigen 3 include directory '%s' not found"
% eigen3_incdir)
eigen_vector_decl = """
cdef extern from "Eigen/Dense" namespace "Eigen":
cdef cppclass VectorXd:
VectorXd()
VectorXd(int rows)
VectorXd(VectorXd&)
double* data()
int rows()
double& get "operator()"(int rows)
"""
class EigenConverter(AbstractTypeConverter):
def __init__(self, tname, python_argname, type_info, context):
super(EigenConverter, self).__init__(
tname, python_argname, type_info, context)
if self.python_argname is not None:
self.cpp_argname = "cpp_" + python_argname
else:
self.cpp_argname = None
def matches(self):
return self.tname == "VectorXd"
def n_cpp_args(self):
return 1
def add_includes(self, includes):
includes.numpy = True
def python_to_cpp(self):
return lines(
"cdef int %(python_argname)s_length = %(python_argname)s.shape[0]",
"cdef cpp.VectorXd %(cpp_argname)s = cpp.VectorXd(%(python_argname)s_length)",
"cdef int %(python_argname)s_idx",
"for %(python_argname)s_idx in range(%(python_argname)s_length):",
" %(cpp_argname)s.data()[%(python_argname)s_idx] = %(python_argname)s[%(python_argname)s_idx]"
) % {"python_argname": self.python_argname,
"cpp_argname": self.cpp_argname}
def cpp_call_args(self):
return [self.cpp_argname]
def return_output(self, copy=True):
return lines(
"cdef int size = result.rows()",
"cdef int res_idx",
"cdef np.ndarray[double, ndim=1] res = np.ndarray(shape=(size,))",
"for res_idx in range(size):",
" res[res_idx] = result.get(res_idx)",
"return res"
)
def python_type_decl(self):
return "np.ndarray[double, ndim=1] " + self.python_argname
def cpp_type_decl(self):
return "cdef cpp.VectorXd"
config = Config()
config.registered_converters.append(EigenConverter)
config.add_decleration(eigen_vector_decl)
with cython_extension_from("eigen.hpp", config=config,
incdirs=eigen3_incdir):
from eigen import make
a = np.ones(5)
assert_array_equal(make(a), a * 2.0)
|
{"hexsha": "18bce3f97ddbe73d1e615c93d6b9637098b51741", "size": 2922, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_custom_conversions.py", "max_stars_repo_name": "steffanschlein/cythonwrapper", "max_stars_repo_head_hexsha": "ef30a3bc1a24024b9845dad4aa8a42e05219bd91", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 33, "max_stars_repo_stars_event_min_datetime": "2016-04-17T21:26:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-31T03:29:46.000Z", "max_issues_repo_path": "test/test_custom_conversions.py", "max_issues_repo_name": "steffanschlein/cythonwrapper", "max_issues_repo_head_hexsha": "ef30a3bc1a24024b9845dad4aa8a42e05219bd91", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2016-04-12T22:28:11.000Z", "max_issues_repo_issues_event_max_datetime": "2017-08-19T16:34:32.000Z", "max_forks_repo_path": "test/test_custom_conversions.py", "max_forks_repo_name": "steffanschlein/cythonwrapper", "max_forks_repo_head_hexsha": "ef30a3bc1a24024b9845dad4aa8a42e05219bd91", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-04-29T18:46:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-25T09:35:14.000Z", "avg_line_length": 34.3764705882, "max_line_length": 113, "alphanum_fraction": 0.6078028747, "include": true, "reason": "import numpy,from numpy", "num_tokens": 666}
|
module Dave.Structures.Definitions where
open import Dave.Equality public
op₁ : Set → Set
op₁ A = A → A
op₂ : Set → Set
op₂ A = A → A → A
associative : {A : Set} → op₂ A → Set
associative _·_ = ∀ m n p → (m · n) · p ≡ m · (n · p)
commutative : {A : Set} → op₂ A → Set
commutative _·_ = ∀ m n → m · n ≡ n · m
left-identity : {A : Set} → op₂ A → (e : A) → Set
left-identity _·_ e = ∀ m → e · m ≡ m
right-identity : {A : Set} → op₂ A → (e : A) → Set
right-identity _·_ e = ∀ m → m · e ≡ m
|
{"hexsha": "af9e804f95385034508198bd150ed74f0496bccc", "size": 547, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "Dave/Structures/Definitions.agda", "max_stars_repo_name": "DavidStahl97/formal-proofs", "max_stars_repo_head_hexsha": "05213fb6ab1f51f770f9858b61526ba950e06232", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Dave/Structures/Definitions.agda", "max_issues_repo_name": "DavidStahl97/formal-proofs", "max_issues_repo_head_hexsha": "05213fb6ab1f51f770f9858b61526ba950e06232", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Dave/Structures/Definitions.agda", "max_forks_repo_name": "DavidStahl97/formal-proofs", "max_forks_repo_head_hexsha": "05213fb6ab1f51f770f9858b61526ba950e06232", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.35, "max_line_length": 57, "alphanum_fraction": 0.4936014625, "num_tokens": 217}
|
import tensorflow as tf
import cv2
import numpy as np
WIDTH = 100
HEIGHT = 100
INPUT_CHANNELS = 1
OUTPUT_CHANNELS = 3
def img_to_gray(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def rescale_img(img):
return cv2.resize(img,(WIDTH, HEIGHT), interpolation = cv2.INTER_CUBIC)
import subprocess
def sendmessage(message):
subprocess.Popen(['notify-send', message])
return
class DataLoader(object):
def __init__(self):
data_dir = 'images/'
self._idx = 0
self.colored_images = []
self.grey_images = []
for name in ['images/i1.jpg']:
color_img = rescale_img(cv2.imread(name))
gray_img = img_to_gray(color_img)#.reshape(WIDTH, HEIGHT, 1)
self.colored_images.append(color_img)
self.grey_images.append(gray_img)
self.num = len(self.colored_images)
def next_batch(self, batch_size):
images_batch = np.zeros((batch_size, HEIGHT, WIDTH, INPUT_CHANNELS))
labels_batch = np.zeros((batch_size, HEIGHT, WIDTH, OUTPUT_CHANNELS))
for i in range(batch_size):
# when your dataset is huge, you might need to load images on the fly
# you might also want data augmentation
# images_batch[i, ...] = self.grey_images[self._idx].reshape((HEIGHT*WIDTH*INPUT_CHANNELS))
# img = tf.reshape(self.grey_images[self._idx], (HEIGHT, WIDTH, INPUT_CHANNELS))
# print 'src',self.grey_images[self._idx]
img = np.expand_dims(self.grey_images[self._idx], axis=2)
# print 'img',img
images_batch[i, ...] = img
labels_batch[i, ...] = self.colored_images[self._idx]
self._idx += 1
if self._idx == self.num:
self._idx = 0
# images_batch = tf.reshape(images_batch, (None, HEIGHT, WIDTH, INPUT_CHANNELS))
return images_batch, labels_batch
def load_test(self):
return self.dataset.test.images.reshape((-1, self.h, self.w, self.c)), self.dataset.test.labels
def setup_tensorflow_graph():
x = tf.placeholder(tf.float32, shape=[None,WIDTH,HEIGHT,1])#WIDTH*HEIGHT])
y_ = tf.placeholder(tf.float32, shape=[None,WIDTH,HEIGHT,OUTPUT_CHANNELS]) #WIDTH*HEIGHT*OUTPUT_CHANNELS])
# x_image = tf.reshape(x, [-1,WIDTH,HEIGHT,1])
x_image = x
W_conv1 = weight_variable([5, 5, 1, 64])
b_conv1 = bias_variable([64])
h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5, 5, 64, 64])
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_conv3 = weight_variable([5, 5, 64, 32])
b_conv3 = bias_variable([32])
h_conv3 = tf.nn.relu(conv2d(h_pool2, W_conv3) + b_conv3)
h_pool3 = max_pool_2x2(h_conv3)
W_conv4 = weight_variable([5, 5, 32, 3])
b_conv4 = bias_variable([3])
h_conv4 = tf.nn.relu(conv2d(h_pool3, W_conv4) + b_conv4)
h_pool4 = max_pool_2x2(h_conv4)
# y_output = tf.reshape(h_pool3, [-1,WIDTH*HEIGHT*OUTPUT_CHANNELS])
y_output = h_pool4
print x
print y_
print x_image
print h_conv1
print h_conv2
print h_conv3
print y_output
return x, y_, y_output
# W_fc1 = weight_variable([WIDTH*HEIGHT * 64, 1024])
# b_fc1 = bias_variable([1024])
# h_pool2_flat = tf.reshape(h_pool2, [-1, WIDTH*HEIGHT*64])
# h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
# keep_prob = tf.placeholder(tf.float32)
# h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# W_fc2 = weight_variable([1024, 10])
# b_fc2 = bias_variable([10])
# y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
def run_training():
print "Construct Session"
with tf.Session() as sess:
x, y_, y_output = setup_tensorflow_graph()
cross_entropy = tf.reduce_sum(tf.squared_difference(y_, y_output))#tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_output, y_))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_sum(tf.squared_difference(y_, y_output))#tf.cast(correct_prediction, tf.float32))
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver()
dataset = DataLoader()
print "Begin training"
for i in range(5000):
batch = dataset.next_batch(1)
if i%1 == 0:
train_accuracy = accuracy.eval(feed_dict={
x:batch[0], y_: batch[1]})#, keep_prob: 1.0})
print("step %d, training accuracy %g"%(i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y_: batch[1]})#, keep_prob: 0.5})
save_path = saver.save(sess, "model.ckpt")
print("Model saved in file: %s" % save_path)
sendmessage('Done training')
def colorize_img():
with tf.Session() as sess:
x, y_, y_output = setup_tensorflow_graph()
saver = tf.train.Saver()
dataset = DataLoader()
saver.restore(sess, "model.ckpt")
# x = img_to_gray(rescale_img(cv2.imread(name)))
for i in range(1):
batch = dataset.next_batch(1)
img = sess.run(y_output, feed_dict={x: batch[0]})
img = img.reshape((100, 100, 3))
print img.shape
cv2.imshow('y',img)
cv2.waitKey(0)
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 1, 1, 1], padding='SAME') #was [1, 2, 2, 1]
run_training()
# colorize_img()
|
{"hexsha": "c499920388445a7d282ed0d08f9c242fa5f6ae6f", "size": 5756, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/other_model/colorize.py", "max_stars_repo_name": "MIT-6819-team/TF_colorization", "max_stars_repo_head_hexsha": "30bee77244e5595b855821a1e0ada9e69159b1c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-11-03T20:06:16.000Z", "max_stars_repo_stars_event_max_datetime": "2017-11-03T20:06:16.000Z", "max_issues_repo_path": "experiments/other_model/colorize.py", "max_issues_repo_name": "MIT-6819-team/TF_colorization", "max_issues_repo_head_hexsha": "30bee77244e5595b855821a1e0ada9e69159b1c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 29, "max_issues_repo_issues_event_min_datetime": "2016-11-28T22:41:05.000Z", "max_issues_repo_issues_event_max_datetime": "2016-12-11T21:49:40.000Z", "max_forks_repo_path": "experiments/other_model/colorize.py", "max_forks_repo_name": "MIT-6819-team/TF_colorization", "max_forks_repo_head_hexsha": "30bee77244e5595b855821a1e0ada9e69159b1c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2826086957, "max_line_length": 140, "alphanum_fraction": 0.6546212648, "include": true, "reason": "import numpy", "num_tokens": 1654}
|
\documentclass{rsaaReport}
% To be compiled with pdfLaTeX
\Project{GMTAO}
\DocVersion{0.1}
\DocNumber{ANU-AO-}
% This is the master file of this template, the one to be actually
% compiled with pdfLaTeX
% Absolutely necessary packages
\usepackage{graphicx}
\usepackage[pdftex,bookmarks,colorlinks]{hyperref}
\usepackage{amsfonts}
\usepackage{bm}
\usepackage{makeidx}
% get nested equation numbering up to subsection level
\usepackage{amsmath}
\numberwithin{equation}{subsection}
% Setup of the .pdf file to be generated
\hypersetup{ pdftitle={GMT AO simulation Documentation},
pdfauthor={Piotr Piatrou},
bookmarksnumbered = true,
bookmarksopen = true,
pdfstartview = {FitH},
linkcolor = blue,
anchorcolor = black,
citecolor = blue,
filecolor = magenta,
menucolor = red,
urlcolor = red }
% Margins setup (normally LaTeX generates too narrow pages inconvenient for
% table and graphics inclusion, this setup is a fix for it)
\textwidth6in
\textheight9in
\topmargin-0.5in
\makeindex % make word index
\begin{document}
\title{Giant Magellan Telescope \\
Laser Tomography Adaptive Optics \\
Simulation Documentation \\ }
\maketitle
\begin{revision}
0.1 & P. Piatrou \& R. Conan, \today & & Draft \\
\end{revision}
\newpage
\tableofcontents
\listoftables
\listoffigures
\newpage
% \section{Purpose}
% \label{sec:purpose}
% \section{Applicable Documents}
% \label{sec:doc}
% \begin{documents}
% Doc. \# & & Doc. Title & Doc. Version & D
% oc. Date \\
% \end{documents}
% The .tex files contributed by each group member are added here through
% \input command by a person responsible for documentation keeping.
% All the contents are included as separate .tex files.
% If a file is not in the current folder, a full path to it should be
% given in the \input command, e.g. /home/TeXpapers/Reports/file
% Note that paths should be with / slash independently of platform.
\input{Atmosphere}
\input{LGS}
\input{SHWFS}
\input{DM}
\input{Control}
\input{Derivatives}
\input{DF}
\input{Biblio}
\newpage
\printindex % print word index in the end
\end{document}
|
{"hexsha": "2212d256ccd13271129daddde1f52cf43cf52b49", "size": 2083, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Doc/Sim.tex", "max_stars_repo_name": "rconan/OOMAO", "max_stars_repo_head_hexsha": "be6b64e55ddfd55d4925190d2f34f5e3e80a8008", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2015-01-10T15:50:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-09T06:06:37.000Z", "max_issues_repo_path": "Doc/Sim.tex", "max_issues_repo_name": "ZhangHeng333/OOMAO", "max_issues_repo_head_hexsha": "be6b64e55ddfd55d4925190d2f34f5e3e80a8008", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2016-07-01T03:39:18.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-19T01:49:29.000Z", "max_forks_repo_path": "Doc/Sim.tex", "max_forks_repo_name": "ZhangHeng333/OOMAO", "max_forks_repo_head_hexsha": "be6b64e55ddfd55d4925190d2f34f5e3e80a8008", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2015-04-07T12:25:34.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-27T02:03:39.000Z", "avg_line_length": 23.404494382, "max_line_length": 75, "alphanum_fraction": 0.7450792127, "num_tokens": 600}
|
#! /usr/bin/env python3
import numpy as np
from scipy.stats import loguniform, truncnorm, multivariate_normal
import argparse
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import GridSearchCV
parser = argparse.ArgumentParser(description="Fit a Gaussian Mixture Model to posterior samples and sample from the model to generate the next grid")
parser.add_argument("--posterior-file", help="Location of the posterior sample file")
parser.add_argument("--output-file", help="Filename to save grid to")
parser.add_argument("--npts", type=int, default=10000, help="Number of points to use in the grid")
parser.add_argument("--set-limit", nargs=3, action="append", help="Set a parameter's limits to something other than the default, e.g. `--set-limit mej_dyn 0.01, 0.05`")
parser.add_argument("--tempering-exponent", type=float, default=1., help="Exponent (between 0 and 1) to be applied to the likelihoods for the fit. Helps with the first few iterations where very few points have nonzero likelihoods")
parser.add_argument("--n-procs-kde", type=int, default=4, help="Number of parallel processes to use for fitting KDEs when testing for the best bandwidth")
parser.add_argument("--fixed-parameters", nargs="+", help="Parameters that stay fixed to their grid values")
parser.add_argument("--gaussian-prior", action="append", nargs=3, help="Give a parameter a Gaussian prior, specifying the mean and standard deviation; for example, `--gaussian-prior theta 20.0 5.0`")
args = parser.parse_args()
#
# Prior functions
#
def uniform(llim, rlim, x):
return 1. / (rlim - llim)
def log_uniform(llim, rlim, x):
return loguniform.pdf(x, llim, rlim)
def gaussian(llim, rlim, mu, sigma, x):
return truncnorm.pdf(x, llim, rlim, loc=mu, scale=sigma)
ordered_parameters = ["mej_dyn", "vej_dyn", "mej_wind", "vej_wind", "theta"]
fixed_parameters = args.fixed_parameters if args.fixed_parameters is not None else []
# Parameter limits
limits = {
"mej_dyn":[0.001, 0.1],
"vej_dyn":[0.05, 0.3],
"mej_wind":[0.001, 0.1],
"vej_wind":[0.05, 0.3],
"theta":[0., 90.]
}
# If the user specified different limits, change them accordingly
if args.set_limit is not None:
for [_parameter, _llim, _rlim] in args.set_limit:
limits[_parameter] = [float(_llim), float(_rlim)]
# Specify each parameter's prior
prior_functions = {
"mej_dyn":lambda n: log_uniform(*limits["mej_dyn"], n),
"vej_dyn":lambda n: uniform(*limits["vej_dyn"], n),
"mej_wind":lambda n: log_uniform(*limits["mej_wind"], n),
"vej_wind":lambda n: uniform(*limits["vej_wind"], n),
"theta":lambda n: uniform(*limits["theta"], n)
}
# Gaussian priors
if args.gaussian_prior is not None:
for [_parameter, _mu, _sigma] in args.gaussian_prior:
_mu, _sigma = float(_mu), float(_sigma)
a, b = (limits[_parameter][0] - _mu) / _sigma, (limits[_parameter][1] - _mu) / _sigma
prior_functions[_parameter] = lambda x: gaussian(a, b, _mu, _sigma, x)
# Convert masses to log10(mass), then scale parameters to be in the interval [0, 1]
# This function also removes fixed parameters from the returned array
def transform(parameters):
transformed_parameters = np.empty((parameters.shape[0], len(ordered_parameters) - len(fixed_parameters)))
i = 0
for _parameter in ordered_parameters:
if _parameter in fixed_parameters:
continue
llim, rlim = limits[_parameter]
if _parameter[:3] == "mej":
llim, rlim = np.log10(llim), np.log10(rlim)
transformed_parameters[:,i] = (np.log10(parameters[:,i]) - llim) / (rlim - llim)
else:
transformed_parameters[:,i] = (parameters[:,i] - llim) / (rlim - llim)
i += 1
return transformed_parameters
# Inverts the above transformation
def inverse_transform(parameters):
transformed_parameters = np.empty((parameters.shape[0], len(ordered_parameters) - len(fixed_parameters)))
i = 0
for _parameter in ordered_parameters:
if _parameter in fixed_parameters:
continue
llim, rlim = limits[_parameter]
if _parameter[:3] == "mej":
llim, rlim = np.log10(llim), np.log10(rlim)
transformed_parameters[:,i] = 10.**(parameters[:,i] * (rlim - llim) + llim)
else:
transformed_parameters[:,i] = parameters[:,i] * (rlim - llim) + llim
i += 1
return transformed_parameters
# Load the posterior samples
samples = np.loadtxt(args.posterior_file)
# Compute sample weights
ln_L = samples[:,0]
ln_p = samples[:,1]
ln_ps = samples[:,2]
ln_L = (ln_L - np.max(ln_L)) * args.tempering_exponent
log_weights = ln_L + ln_p - ln_ps
log_weights -= np.max(log_weights)
weights = np.exp(log_weights)
weights /= np.sum(weights) # normalize the weights
original_parameters = samples[:,3:]
parameters = transform(samples[:,3:])
# Strip out samples with weight NaN
parameters = parameters[np.isfinite(weights) & (weights > 0)]
weights = weights[np.isfinite(weights) & (weights > 0)]
# How many do we have left? Useful for diagnosing what's gone wrong
print("{0} samples with finite, positive weights".format(weights.size))
# We want to fit a KDE to the posterior samples so we can sample from it.
# The fit is quite sensitive to a hyperparameter called bandwidth, which specifies the width of the (in this case, Gaussian) kernel.
# Fortunately, scikit-learn has a built-in way to optimize this sort of hyperparameter, GridSearch.
hyperparameter_grid = GridSearchCV(KernelDensity(kernel="gaussian"), {"bandwidth":np.logspace(-4., 0., 20)}, cv=5, n_jobs=args.n_procs_kde)
hyperparameter_grid.fit(parameters, sample_weight=weights)
bandwidth = hyperparameter_grid.best_estimator_.bandwidth
print("Using bandwidth = {0}".format(bandwidth))
# Since GridSearch splits the data into train and test sets, we don't want to use the trained KDEs from this process.
# Instead, take the optimal bandwidth and retrain with it on the full data set
kde = KernelDensity(kernel="gaussian", bandwidth=bandwidth)
kde.fit(parameters, sample_weight=weights)
# Make a function for sampling new points
sampler = kde.sample
# Make a function for the sampling prior
sampling_prior = lambda x: kde.score_samples(x)
# Now we want to generate new samples, being sure to thow out points that lie outside the bounds
new_samples = np.empty((0, len(ordered_parameters) - len(fixed_parameters)))
llim_array = np.array([limits[_parameter][0] for _parameter in ordered_parameters if _parameter not in fixed_parameters])
rlim_array = np.array([limits[_parameter][1] for _parameter in ordered_parameters if _parameter not in fixed_parameters])
while new_samples.shape[0] < args.npts:
new_samples_here = inverse_transform(sampler(args.npts))
new_samples_here = new_samples_here[np.where(np.all(new_samples_here > llim_array, axis=1) & np.all(new_samples_here < rlim_array, axis=1))]
new_samples = np.append(new_samples, new_samples_here, axis=0)
# Keep only the number of samples requested by the user
new_samples = new_samples[:args.npts]
#
# Generate the new grid
#
grid = np.empty((args.npts, len(ordered_parameters) + 3))
# Put the newly sampled points in the grid, carrying fixed parameters over from their original values
j = 0
for i, _parameter in enumerate(ordered_parameters):
if _parameter in fixed_parameters:
grid[:,i + 3] = original_parameters[:,i][0] # Take the first value from the corresponding column in the original samples rather than copying the whole column,
# since in principle the new grid could have a different number of rows than the input.
else:
grid[:,i + 3] = new_samples[:,j]
j += 1
# The first column, for ln_L, gets filled in later (by generate_posterior_samples.py), so for now make it 0
grid[:,0] = 0.
# The second and third columns are the prior and sampling prior, respectively.
# The joint prior is the product of all the separate priors, so we'll set them to 1 now and multiply them by each parameter's prior in the loop.
grid[:,1] = 0.
# Do the sampling and compute (log) priors
for i, _parameter in enumerate(ordered_parameters):
grid[:,1] += np.log(prior_functions[_parameter](grid[:,i + 3]))
# Compute the sampling prior from the Gaussian or KDE (note that both return log probability)
# NOTE regarding normalization: technically, these sampling priors are not normalized since neither the Gaussian fit nor the KDE cares about the bounds of our parameter space.
# However, it doesn't actually matter - these probabilities are used to calculate sample weights that are then normalized, meaning any constant factor is inconsequential.
grid[:,2] = sampling_prior(transform(new_samples)) # Side note for future readers: this line cost me several days of effort searching for a bug.
# I forgot to transform the samples before calculating the sampling prior (since the Gaussian/KDE was fit to transformed data),
# meaning I was getting horrible garbage for a reason that took me way too long to figure out. *sigh*...
# Save the grid
np.savetxt(args.output_file, grid, header=("ln(L) ln(p) ln(ps) " + " ".join(ordered_parameters)))
|
{"hexsha": "b151185a46a9effbbe3c74867bef5dc1f58cfc91", "size": 9334, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/generate_next_grid.py", "max_stars_repo_name": "liz-champion/lc_fit", "max_stars_repo_head_hexsha": "f86d28781252783240a33a4b8854e9ecefeab27c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bin/generate_next_grid.py", "max_issues_repo_name": "liz-champion/lc_fit", "max_issues_repo_head_hexsha": "f86d28781252783240a33a4b8854e9ecefeab27c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bin/generate_next_grid.py", "max_forks_repo_name": "liz-champion/lc_fit", "max_forks_repo_head_hexsha": "f86d28781252783240a33a4b8854e9ecefeab27c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.1263157895, "max_line_length": 231, "alphanum_fraction": 0.7110563531, "include": true, "reason": "import numpy,from scipy", "num_tokens": 2299}
|
Function KTF2TC(J,M,K,L,R) ! Tit for Two Tats, Col rule
if(m .eq. 1) jold = 0
ktf2tc = 0
if ((jold .EQ. 1) .and. (j .eq. 1)) ktf2tc = 1
jold = j
Return
End ! TF2T Col Rule
|
{"hexsha": "a6b0f4284079fd81f4ceb9a9e64c296b2dddf7ab", "size": 229, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/strategies/KTF2TC.f", "max_stars_repo_name": "Axelrod-Python/TourExec", "max_stars_repo_head_hexsha": "498b07394d215ce7d7df5bb7fd3aaa35eeda8317", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/strategies/KTF2TC.f", "max_issues_repo_name": "Axelrod-Python/TourExec", "max_issues_repo_head_hexsha": "498b07394d215ce7d7df5bb7fd3aaa35eeda8317", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2017-07-20T19:57:26.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-12T17:30:18.000Z", "max_forks_repo_path": "src/strategies/KTF2TC.f", "max_forks_repo_name": "Axelrod-Python/TourExec", "max_forks_repo_head_hexsha": "498b07394d215ce7d7df5bb7fd3aaa35eeda8317", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.625, "max_line_length": 71, "alphanum_fraction": 0.4585152838, "num_tokens": 98}
|
import numpy as np
from sklearn.base import BaseEstimator, clone
from sklearn.metrics import r2_score
from .utils import my_fit
class EraBoostXgbRegressor(BaseEstimator):
def __init__(self, base_estimator=None, num_iterations=3, proportion=0.5, n_estimators=None):
self.base_estimator = base_estimator
self.num_iterations = num_iterations
self.proportion = proportion
self.n_estimators = n_estimators
def fit(self, X, y, sample_weight=None, fit_context=None):
self.n_features_in_ = X.shape[1]
self.base_estimator_ = clone(self.base_estimator)
my_fit(
self.base_estimator_,
X,
y,
sample_weight=sample_weight,
fit_context=fit_context,
)
for iter in range(self.num_iterations - 1):
y_pred = self.base_estimator_.predict(X)
era_scores = []
indicies = []
n = y_pred.shape[0]
m = 10
for i in range(m):
idx = np.arange(i * n // m, (i + 1) * n // m)
indicies.append(idx)
y_pred2 = indexing(y_pred, idx)
y2 = indexing(y, idx)
era_scores.append(r2_score(y2, y_pred2))
score_threshold = np.quantile(era_scores, self.proportion)
idx = []
for i in range(m):
if era_scores[i] <= score_threshold:
idx.append(indicies[i])
idx = np.concatenate(idx)
self.base_estimator_.n_estimators += self.n_estimators
booster = self.base_estimator_.get_booster()
self.base_estimator_.fit(indexing(X, idx), indexing(y, idx), xgb_model=booster)
return self
def predict(self, X):
return self.base_estimator_.predict(X)
def indexing(x, idx):
if hasattr(x, 'iloc'):
return x.iloc[idx]
else:
return x[idx]
|
{"hexsha": "0e4cfe48f6a435ed070e2422651d733c3ef665f9", "size": 1936, "ext": "py", "lang": "Python", "max_stars_repo_path": "era_boost_xgb_estimators.py", "max_stars_repo_name": "richmanbtc/bot_snippets", "max_stars_repo_head_hexsha": "a498cdb97f8568c1e05c117462a85b877d7dcf7d", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-12-02T10:02:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T06:34:21.000Z", "max_issues_repo_path": "era_boost_xgb_estimators.py", "max_issues_repo_name": "richmanbtc/bot_snippets", "max_issues_repo_head_hexsha": "a498cdb97f8568c1e05c117462a85b877d7dcf7d", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "era_boost_xgb_estimators.py", "max_forks_repo_name": "richmanbtc/bot_snippets", "max_forks_repo_head_hexsha": "a498cdb97f8568c1e05c117462a85b877d7dcf7d", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-12-02T13:25:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T15:36:19.000Z", "avg_line_length": 31.737704918, "max_line_length": 97, "alphanum_fraction": 0.5841942149, "include": true, "reason": "import numpy", "num_tokens": 431}
|
from copy import copy
import numpy as np
def get_evs(posteriors, thresholds):
return np.sum(thresholds * (posteriors / posteriors.sum(axis=1, keepdims=True)),
axis=1) / 100.
#def uncertaintify(reward, ev):
# uncertainty_const = 0.5
# assert 0. <= uncertainty_const <= 1.
# return np.average([reward, ev], weights=[1.-uncertainty_const, uncertainty_const])
class Agent:
def __init__(self, configuration):
self.n_bandits = int(configuration.banditCount)
self.decay_rate = configuration.decayRate
self.sample_res = configuration.sampleResolution
self.total_reward = 0
self.my_posteriors = np.ones((self.n_bandits, self.sample_res + 1)) / (self.sample_res + 1)
self.thresholds = np.repeat(
np.arange(self.sample_res + 1, dtype=np.float64)[None, :],
self.n_bandits,
axis=0
)
self.my_belief_opp_posteriors = np.ones((self.n_bandits, self.sample_res + 1)) / (self.sample_res + 1)
self.opp_belief_my_posteriors = np.ones((self.n_bandits, self.sample_res + 1)) / (self.sample_res + 1)
self.my_act_history = []
self.opp_act_history = []
self.last_thresholds = None
def get_action(self, observation):
if observation.step == 0:
return np.random.randint(self.n_bandits)
else:
r = observation.reward - self.total_reward
self.total_reward = observation.reward
self.my_act_history.append(observation.lastActions[observation.agentIndex])
self.opp_act_history.append(observation.lastActions[1 - observation.agentIndex])
if observation.step >= 2:
# Update self.my_belief_opp_posteriors and self.opp_belief_my_posteriors
self.update_my_belief()
self.update_opp_belief()
self.last_thresholds = copy(self.thresholds)
self.my_posteriors, self.thresholds = self.get_updated_posteriors_thresholds(
self.my_posteriors,
self.thresholds,
[self.my_act_history[-1], self.opp_act_history[-1]],
[r, None]
)
assert False, "This EV calculation is broken and doesn't account for beliefs"
evs = get_evs(self.my_posteriors, self.thresholds)
return np.random.choice(np.arange(self.n_bandits)[evs == np.max(evs)]).item()
def get_updated_belief(self, player_idx):
assert player_idx in (0, 1)
if player_idx == 0:
opp_act_history = self.opp_act_history
opp_posteriors = self.my_belief_opp_posteriors
else:
opp_act_history = self.my_act_history
opp_posteriors = self.opp_belief_my_posteriors
# Make counterfactual assumptions about the reward
loss_posteriors, _ = self.get_updated_posteriors_thresholds(
opp_posteriors, self.last_thresholds, opp_act_history[-2], 0.
)
loss_evs = get_evs(loss_posteriors, self.last_thresholds)
win_posteriors, _ = self.get_updated_posteriors_thresholds(
opp_posteriors, self.last_thresholds, opp_act_history[-2], 1.
)
win_evs = get_evs(win_posteriors, self.last_thresholds)
loss = loss_evs[opp_act_history[-1]] == np.max(loss_evs)
win = win_evs[opp_act_history[-1]] == np.max(win_evs)
if loss and win or (not loss and not win):
return opp_posteriors
elif loss:
return loss_posteriors
#return self.get_updated_posteriors_thresholds(
# opp_posteriors, self.last_thresholds, opp_act_history[-2],
# uncertaintify(0., get_evs(opp_posteriors, self.last_thresholds)[opp_act_history[-1]]).item()
#)[0]
else:
return win_posteriors
#return self.get_updated_posteriors_thresholds(
# opp_posteriors, self.last_thresholds, opp_act_history[-2],
# uncertaintify(1., get_evs(opp_posteriors, self.last_thresholds)[opp_act_history[-1]]).item()
#)[0]
def update_my_belief(self):
self.my_belief_opp_posteriors = self.get_updated_belief(0)
def update_opp_belief(self):
self.opp_belief_my_posteriors = self.get_updated_belief(1)
def get_updated_posteriors_thresholds(self, posteriors, thresholds, actions, rewards):
if type(actions) == int and (rewards is None or type(rewards) == float):
actions = (actions,)
rewards = (rewards,)
else:
assert len(actions) == len(rewards)
posteriors = copy(posteriors)
thresholds = copy(thresholds)
for act, r in zip(actions, rewards):
if r is not None:
likelihood = ((np.ceil(thresholds[act]) / self.sample_res) ** r) * (
(1 - np.ceil(thresholds[act]) / self.sample_res) ** (1 - r))
posteriors[act] = posteriors[act] * likelihood
posteriors[act] = posteriors[act] / posteriors[act].sum()
thresholds[act] = thresholds[act] * self.decay_rate
return posteriors, thresholds
curr_agent = None
def agent(observation, configuration):
global curr_agent
if curr_agent is None:
curr_agent = Agent(configuration)
return curr_agent.get_action(observation)
|
{"hexsha": "a91087a6b967d516ffc41d2dfcc8136e1b8e949b", "size": 5368, "ext": "py", "lang": "Python", "max_stars_repo_path": "handcrafted_agents/discrete_bayesian_greedy.py", "max_stars_repo_name": "IsaiahPressman/Kaggle_Santa_2020", "max_stars_repo_head_hexsha": "ff5c6aa78dbe234cef338f4c721cc30d7dbc3df8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "handcrafted_agents/discrete_bayesian_greedy.py", "max_issues_repo_name": "IsaiahPressman/Kaggle_Santa_2020", "max_issues_repo_head_hexsha": "ff5c6aa78dbe234cef338f4c721cc30d7dbc3df8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "handcrafted_agents/discrete_bayesian_greedy.py", "max_forks_repo_name": "IsaiahPressman/Kaggle_Santa_2020", "max_forks_repo_head_hexsha": "ff5c6aa78dbe234cef338f4c721cc30d7dbc3df8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9770992366, "max_line_length": 110, "alphanum_fraction": 0.6369225037, "include": true, "reason": "import numpy", "num_tokens": 1301}
|
import numpy as np
from skimage import transform
import gym
from gym.spaces import Box
from gym.wrappers import FrameStack, GrayScaleObservation, TransformObservation
from nes_py.wrappers import JoypadSpace
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT
class ResizeObservation(gym.ObservationWrapper):
def __init__(self, env, shape):
super().__init__(env)
if isinstance(shape, int):
self.shape = (shape, shape)
else:
self.shape = tuple(shape)
obs_shape = self.shape + self.observation_space.shape[2:]
self.observation_space = Box(low=0, high=255, shape=obs_shape, dtype=np.uint8)
def observation(self, observation):
resize_obs = transform.resize(observation, self.shape)
# cast float back to uint8
resize_obs *= 255
resize_obs = resize_obs.astype(np.uint8)
return resize_obs
class SkipFrame(gym.Wrapper):
def __init__(self, env, skip):
"""Return only every `skip`-th frame"""
super().__init__(env)
self._skip = skip
def step(self, action):
"""Repeat action, and sum reward"""
total_reward = 0.0
done = False
for i in range(self._skip):
# Accumulate reward and repeat the same action
obs, reward, done, info = self.env.step(action)
total_reward += reward
if done:
break
return obs, total_reward, done, info
class CutomReward(gym.Wrapper):
def __init__(self, env):
super(CutomReward, self).__init__(env)
self.observation_space = Box(low=0, high=255, shape=(1, 84, 84))
self.curr_score = 0
self.curr_x_pos = 0
self.farthest_x_pos = 0
self.not_forward_count_ = 0
def stay_at_the_origin(self, info):
return (info['x_pos'] < 30) and (info['time'] < 390)
def did_not_go_forward(self, info):
if (info['x_pos'] - self.curr_x_pos < 0):
self.not_forward_count_ += 1
return True
return False
def loginfo(self, reward, info):
if info['life'] == 255:
life = 'Game Over'
else:
life = f'Alive <{info["life"]}>'
print(f"* Log@ {info['time']}, Status: {life}")
print(f"* Reward: {reward/10.}\n* Coin: {info['coins']} \n* Score: {info['score']}\n* (X, Y):({info['x_pos']}, {info['y_pos']})")
def step(self, action):
state, reward, done, info = self.env.step(action)
reward += (info["score"] - self.curr_score) / 10.
self.curr_score = info["score"]
if info['x_pos'] > self.farthest_x_pos:
reward += 30
self.farthest_x_pos = info['x_pos']
if self.stay_at_the_origin(info):
reward -= 20
if self.did_not_go_forward(info):
if self.not_forward_count_ == 4:
reward -= 50
self.not_forward_count_ = 0
self.curr_x_pos = info['x_pos']
if done:
if info['time'] == 0:
reward -= 50
if info['x_pos'] < 200:
reward -= 100
else:
reward -= 50
if info["flag_get"]:
reward += 100
else:
reward -= 30
self.farthest_x_pos = 0
reward += info['x_pos'] / (300 - info['time'])
self.loginfo(reward/10, info)
return state, reward / 10., done, info
def reset(self):
self.curr_score = 0
return self.env.reset()
def apply_wrapper_env(env):
env = JoypadSpace(env, SIMPLE_MOVEMENT)
env = SkipFrame(env, skip=4)
env = GrayScaleObservation(env, keep_dim=False)
env = ResizeObservation(env, shape=84)
env = TransformObservation(env, f=lambda x: x / 255.)
env = FrameStack(env, num_stack=4)
env = CutomReward(env)
return env
|
{"hexsha": "290c85e37f72f42fa4809e56533fe108f405c3af", "size": 3950, "ext": "py", "lang": "Python", "max_stars_repo_path": "a2c/wrappers.py", "max_stars_repo_name": "plusoneee/rl-a2c-supermario", "max_stars_repo_head_hexsha": "c2d4ab6c1f0a162b2c66f66835300f1f91de9f8b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "a2c/wrappers.py", "max_issues_repo_name": "plusoneee/rl-a2c-supermario", "max_issues_repo_head_hexsha": "c2d4ab6c1f0a162b2c66f66835300f1f91de9f8b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "a2c/wrappers.py", "max_forks_repo_name": "plusoneee/rl-a2c-supermario", "max_forks_repo_head_hexsha": "c2d4ab6c1f0a162b2c66f66835300f1f91de9f8b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6, "max_line_length": 137, "alphanum_fraction": 0.5703797468, "include": true, "reason": "import numpy", "num_tokens": 1005}
|
import numpy as np
from pathlib import Path
import pandas as pd
from grid_simulations import MacroGen
if __name__ == "__main__":
np.random.seed(65432)
macgen = MacroGen()
macgen._base_geometry_cmd = "/control/execute setup_normal_run.mac"
macgen.run_macs = [#"run696keV.mac",
# "run1779keV.mac", "run2838keV.mac",
"runEx4617keV.mac",
"run4440keV.mac"
]
# print(macgen.run())
# nominal thicknesses in cm
dnominal = {'front': 0.2, 'radial': 0.1, 'reflector': 0.1,
'lidhalf': 0.1, 'det': 16}
dthick = {'front': 0.2, 'radial': 0.1, 'reflector': 0.1,
'lidhalf': 0.2, 'det': 16}
dsaintgb = {'front': 0.08, 'radial': 0.08, 'reflector': 0.22,
'lidhalf': 0.1, 'det': 16}
grid = [dnominal, dthick, dsaintgb]
print("Items to calculate: ", len(grid))
fnbase = Path("inbeam_grid_macs")
fnbase.mkdir(exist_ok=True)
for i, pars in enumerate(grid):
# print(f"Simulating gridpoint {i}")
dtmp = pars
macgen.outname_base = f"inbeam_grid_{i}_"
macgen.dgeometry = dtmp
macro = macgen.save(fnbase / f"grid_{i}.mac")
# create summary file with commands to run
# due to concerns on calculation time we may not calculate all values,
# but start with a random selection
indices = np.arange(len(grid))
# np.random.shuffle(indices)
cmds = [f"./OCL inbeam_grid_macs/grid_{i}.mac" for i in indices]
cmd_string = "\n".join(*[cmds])
fn_sum = Path("inbeam_grid_cmd_all.txt")
fn_sum.write_text(cmd_string)
# grid_out = np.column_stack((np.arange(len(grid)), grid))
# grid_out = pd.DataFrame(grid_out, columns=["grid_point", *dnominal.keys()])
# grid_out = grid_out.astype({"grid_point": 'int'}, copy=False)
# grid_out.to_pickle("inbeam_grid_inbeam.pickle")
|
{"hexsha": "b9563140e7e3b0b436a65de8de9501d81be4f5e0", "size": 1918, "ext": "py", "lang": "Python", "max_stars_repo_path": "HPCscripts/grid_inbeam.py", "max_stars_repo_name": "vetlewi/AFRODITE", "max_stars_repo_head_hexsha": "4aa42184c0f94613e7e2b219bc8aca371094143e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "HPCscripts/grid_inbeam.py", "max_issues_repo_name": "vetlewi/AFRODITE", "max_issues_repo_head_hexsha": "4aa42184c0f94613e7e2b219bc8aca371094143e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-05-04T10:52:08.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-04T10:52:08.000Z", "max_forks_repo_path": "HPCscripts/grid_inbeam.py", "max_forks_repo_name": "vetlewi/AFRODITE", "max_forks_repo_head_hexsha": "4aa42184c0f94613e7e2b219bc8aca371094143e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.5185185185, "max_line_length": 81, "alphanum_fraction": 0.6063607925, "include": true, "reason": "import numpy", "num_tokens": 564}
|
import dash_resumable_upload
import dash
import dash_html_components as html
from dash.dependencies import Input, Output
import base64
from os import listdir,system,path,remove
import dash_table_experiments as dt
import dash_core_components as dcc
from os.path import isfile, join
import shutil
import time
import core
import io
import plotly.graph_objs as go
import pandas as pd
import numpy as np
#try:
# system("rm -r uploads")
#except:
# pass
directory = './uploads'
if path.exists(directory):
system("rm -r uploads")
# remove(directory)
else:
pass
app = dash.Dash('')
#external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css', 'https://codepen.io/rmarren1/pen/eMQKBW.css']
#app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
colors = {
'background': '#ECF0F1',
'text': '#800000'
}
image_filename = 'Logo.png' # replace with your own image
encoded_image = base64.b64encode(open(image_filename, 'rb').read()).decode('ascii')
dash_resumable_upload.decorate_server(app.server, "uploads")
app.scripts.config.serve_locally = True # Uploaded to npm, this can work online now too.
app.css.append_css({
"external_url": "https://codepen.io/rmarren1/pen/eMQKBW.css"
})
app.layout = html.Div(style={'backgroundColor': colors['background']}, children=[
html.H1(
children='VoltCycle',
style={
'textAlign': 'center',
'color': colors['text']
}
),
html.Div([
html.Img(draggable=True, style={
'height': '20%',
'width': '20%'
}, src='data:image/png;base64,{}'.format(encoded_image))
], style={'textAlign': 'center'}),
html.H2(children='A Tool for Accelerating the Analysis of Cyclic Voltammetry Data', style={
'textAlign': 'center',
'color': colors['text']
}),
html.Br(),
html.Div([
html.Link(rel='stylesheet', href='https://codepen.io/rmarren1/pen/eMQKBW.css'),
dash_resumable_upload.Upload(
id='upload',
maxFiles=1,
maxFileSize=1024*1024*1000, # 100 MB
service="/upload_resumable",
textLabel="Upload Files",
startButton=False)
]),
html.Div(id='output_uploaded_file'),
html.Br(),
html.H2(
children='Select File to Analyze',
style={
'textAlign': 'center',
'color': colors['text']
}
),
html.Div([
dcc.Dropdown(id='files_dropdown')
],style={'width': '70%', 'height': '40', 'display': 'inline-block', 'textAlign': 'center'}
),
html.Div([
html.Br(),
dcc.Graph(id='CV_graph'),
],style={
'columnCount': 1,
'width':'70%',
'height': '80%',
}
),
html.Div([
html.Br(),
html.H2(
children='Redox Properties',
style={
'color': colors['text']
}
),
dt.DataTable(
rows=[{}],
row_selectable=True,
filterable=True,
selected_row_indices=[],
id='datatable_initial'
),
html.Div(id='selected-indexes'),
],
style={
'width': '98%',
#'height': '60px',
#'lineHeight': '60px',
'margin': '10px'
},
)
])
def parse_contents(value):
if path.exists(directory):
lines1 = base64.b64encode(open("uploads/%s" % (value), 'rb').read())
lines2 = base64.b64decode(lines1).decode('utf-8').split('\n')
dict_1, n_cycle = core.read_file_dash(lines2)
#print(n_cycle)
df = core.data_frame(dict_1, 1)
return df
def data_analysis(df):
results_dict = {}
# df = main.data_frame(dict_1,1)
x = df['Potential']
y = df['Current']
# Peaks are here [list]
peak_index = core.peak_detection_fxn(y)
# Split x,y to get baselines
x1,x2 = core.split(x)
y1,y2 = core.split(y)
y_base1 = core.linear_background(x1,y1)
y_base2 = core.linear_background(x2,y2)
# Calculations based on baseline and peak
values = core.peak_values(x,y)
Et = values[0]
Eb = values[2]
dE = core.del_potential(x,y)
half_E = min(Et,Eb) + core.half_wave_potential(x,y)
ia = core.peak_heights(x,y)[0]
ic = core.peak_heights(x,y)[1]
ratio_i = core.peak_ratio(x,y)
results_dict['Peak Current Ratio'] = ratio_i
results_dict['Ipc (A)'] = ic
results_dict['Ipa (A)'] = ia
results_dict['Epc (V)'] = Eb
results_dict['Epa (V)'] = Et
results_dict['∆E (V)'] = dE
results_dict['Redox Potential (V)'] = half_E
if dE>0.3:
results_dict['Reversible'] = 'No'
else:
results_dict['Reversible'] = 'Yes'
if half_E>0 and 'Yes' in results_dict.values():
results_dict['Type'] = 'Catholyte'
elif 'Yes' in results_dict.values():
results_dict['Type'] = 'Anolyte'
return results_dict, x1, x2, y1, y2, y_base1, y_base2, peak_index
#return results_dict
@app.callback(Output('output_uploaded_file', 'children'),
[Input('upload', 'fileNames')])
def display_files(fileNames):
if fileNames is not None:
return html.Ul([html.Li(html.A(x), style={'textAlign': 'center'}) for x in fileNames])
return html.Ul(html.Li("No Files Uploaded Yet!"), style={'textAlign': 'center'})
@app.callback(Output('files_dropdown', 'options'),
[Input('upload','fileNames')])
def dropdown_files(fileNames):
mypath='./uploads/'
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
return [{'label': i, 'value': i} for i in onlyfiles]
@app.callback( #update charge datatable
Output('datatable_initial', 'rows'),
[Input('files_dropdown', 'value')])
def update_table1(value):
df = parse_contents(value)
#print(df.head())
#final_dict = data_analysis(df)
final_dict, x_1, x_2, y_1, y_2, ybase_1, ybase_2, peak_i = data_analysis(df)
df1=pd.DataFrame.from_records([final_dict])
return df1.to_dict('records')
@app.callback(
Output('CV_graph', 'figure'),
[Input('files_dropdown', 'value')])
def update_figure(value):
df = parse_contents(value)
final_dict, x_1, x_2, y_1, y_2, ybase_1, ybase_2, peak_i = data_analysis(df)
trace1 = go.Scatter(
x = df['Potential'],
y = df['Current'],
marker={
'size': 15,
'opacity': 0.5,
'color' : '#F00000'
})
trace2 = go.Scatter(
x = x_1,
y = ybase_1,
mode = 'lines',
line = dict(
color = ('rgb(0, 0, 256)'),
width = 3,
dash = 'dash')
)
trace3 = go.Scatter(
x = x_2,
y = ybase_2,
mode = 'lines',
line = dict(
color = ('rgb(0, 0, 256)'),
width = 3,
dash = 'dash')
)
trace4 = go.Scatter(
x = np.array(x_1[peak_i[1]]),
y = np.array(y_1[peak_i[1]]),
mode = 'markers',
marker={
'size': 35,
'opacity': 0.5,
'color' : '#000080'
})
trace5 = go.Scatter(
x = np.array(x_2[peak_i[0]]),
y = np.array(y_2[peak_i[0]]),
mode = 'markers',
marker={
'size': 35,
'opacity': 0.5,
'color' : '#000080'
})
data = [trace1, trace2, trace3, trace4, trace5]
return {
'data': data,
#'layout' : {'Dash'}
'layout': go.Layout(
xaxis={'title': 'Voltage (V)'},
yaxis={'title': 'Current (A)'},
margin={'l': 40, 'b': 40, 't': 10, 'r': 10},
# #legend={'x': 0, 'y': 1},
showlegend = False,
hovermode='closest',
)
}
# return {
# 'data': [
# {'x': [x1[peak_index[1]]], 'y': [x1[peak_index[1]]], 'type': 'point'},
# #{'x': [1, 2, 3], 'y': [2, 4, 5], 'type': 'bar', 'name': u'Montréal'},
# ],
# }
if __name__ == '__main__':
app.run_server(debug=True)
|
{"hexsha": "3fa60b77eda35ff0038e41a24311506fa34c5593", "size": 8580, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/app.py", "max_stars_repo_name": "sabiharustam/TBD5", "max_stars_repo_head_hexsha": "2dafad06e866dabc7f16c51d8961e905991a1287", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-07-01T17:42:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-04T19:59:45.000Z", "max_issues_repo_path": "app/app.py", "max_issues_repo_name": "sabiharustam/TBD5", "max_issues_repo_head_hexsha": "2dafad06e866dabc7f16c51d8961e905991a1287", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-03-18T03:20:04.000Z", "max_issues_repo_issues_event_max_datetime": "2019-03-22T16:28:13.000Z", "max_forks_repo_path": "app/app.py", "max_forks_repo_name": "sabiharustam/TBD5", "max_forks_repo_head_hexsha": "2dafad06e866dabc7f16c51d8961e905991a1287", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-03-22T15:38:24.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-21T01:57:40.000Z", "avg_line_length": 28.7919463087, "max_line_length": 117, "alphanum_fraction": 0.5221445221, "include": true, "reason": "import numpy", "num_tokens": 2287}
|
[STATEMENT]
lemma lift_pref_profile_permute_agents:
assumes "\<pi> permutes agents" "agents \<subseteq> agents'"
shows "lift_pref_profile agents alts agents' alts' (R \<circ> \<pi>) =
lift_pref_profile agents alts agents' alts' R \<circ> \<pi>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lift_pref_profile agents alts agents' alts' (R \<circ> \<pi>) = lift_pref_profile agents alts agents' alts' R \<circ> \<pi>
[PROOF STEP]
using assms permutes_subset[OF assms]
[PROOF STATE]
proof (prove)
using this:
\<pi> permutes agents
agents \<subseteq> agents'
\<pi> permutes agents'
goal (1 subgoal):
1. lift_pref_profile agents alts agents' alts' (R \<circ> \<pi>) = lift_pref_profile agents alts agents' alts' R \<circ> \<pi>
[PROOF STEP]
by (auto simp add: lift_pref_profile_def o_def permutes_in_image)
|
{"llama_tokens": 308, "file": "Fishburn_Impossibility_Social_Choice_Functions", "length": 2}
|
import wave
import numpy as np
import pygame
##### Parameters #####
SAMPLERATE = 48000 # Hz
AMPLITUDE = 10000
NCHANNELS = 1 # mono: sound played identically in both channels
SOUNDLEN = .4
SOUNDFREQ = 800
##### Constructing tone #####
# calculate the total amount of cycles in the SOUNDLEN
ncycles = SOUNDLEN * SOUNDFREQ
# calculate the total amount of samples per SOUNDLEN
nsamples = SOUNDLEN * SAMPLERATE
# calculate samples per cycle
spc = nsamples / ncycles
# stepsize: distance between samples within a cycle
stepsize = (2*np.pi) / spc
# create a range of numbers between 0 and 2*pi
x = np.arange(0, 2*np.pi, stepsize)
# make a sine wave out of the range
sine = np.sin(x)
# increase the amplitude
tone = sine * AMPLITUDE
# repeat the sine wave for the length of the tone
tone = np.tile(tone, int(ncycles))
##### MIXING IT ALL TOGETHER #####
# initialise mixer module (it requires the sampling rate and num of channels)
pygame.mixer.init(frequency=SAMPLERATE, channels=NCHANNELS)
# create sound out of the allsines vector
tone = pygame.mixer.Sound(tone.astype('int16'))
# open new wave file objects
tonefile = wave.open('test_tone.wav', 'w')
# set parameters for pure tone
tonefile.setframerate(SAMPLERATE)
tonefile.setnchannels(NCHANNELS)
tonefile.setsampwidth(2) # in units of bytes and 8 bits per byte = 16bit
# get buffers
tonebuffer = tone.get_raw()
# write raw buffer to the wave file
tonefile.writeframesraw(tonebuffer)
# close the wave file
tonefile.close()
|
{"hexsha": "1793f2e6fb6bd0e44f4811ff88d5ba116e476db3", "size": 1550, "ext": "py", "lang": "Python", "max_stars_repo_path": "plain_tone.py", "max_stars_repo_name": "Stiltstiltstilts/Music-Language-Tapping", "max_stars_repo_head_hexsha": "13cf607affdb1025295b0153085c7c4d12e84a3b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "plain_tone.py", "max_issues_repo_name": "Stiltstiltstilts/Music-Language-Tapping", "max_issues_repo_head_hexsha": "13cf607affdb1025295b0153085c7c4d12e84a3b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "plain_tone.py", "max_forks_repo_name": "Stiltstiltstilts/Music-Language-Tapping", "max_forks_repo_head_hexsha": "13cf607affdb1025295b0153085c7c4d12e84a3b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.4098360656, "max_line_length": 78, "alphanum_fraction": 0.7116129032, "include": true, "reason": "import numpy", "num_tokens": 411}
|
#ifndef DART_CPP14_SHIM_H
#define DART_CPP14_SHIM_H
// Figure out what compiler we have.
#if defined(__clang__)
#define DART_USING_CLANG 1
#elif defined(__GNUC__) || defined(__GNUG__)
#define DART_USING_GCC 1
#elif defined(_MSC_VER)
#define DART_USING_MSVC 1
#endif
#ifdef DART_USING_MSVC
#define _CRT_SECURE_NO_WARNINGS 1
#endif
#include <ctime>
#include <cstring>
#if DART_USING_CLANG && __clang_major__ >= 5 && __clang_major__ <= 7
// Side-step a disagreement between clang (5/6) and GNU std::variant.
#define DART_USE_MPARK_VARIANT 1
#elif defined(__APPLE__)
// Side-step AppleClang misreporting compiler capabilities on macos 10.13 and below.
#include <availability.h>
#ifndef __MAC_10_14
#define DART_USE_MPARK_VARIANT 1
#endif
#endif
// MSVC doesn't have a signed size type, obviously
#if DART_USING_MSVC
#include <BaseTsd.h>
using ssize_t = SSIZE_T;
#endif
// Make sure we have a fallback for compilers that don't support attributes at all.
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(name) 0
#endif
// Figure out how to declare things [[nodiscard]]
#if __has_cpp_attribute(gnu::warn_unused_result)
#define DART_NODISCARD [[gnu::warn_unused_result]]
#elif __has_cpp_attribute(nodiscard)
#define DART_NODISCARD [[nodiscard]]
#else
#define DART_NODISCARD
#endif
#define DART_STRINGIFY_IMPL(x) #x
#define DART_STRINGIFY(x) DART_STRINGIFY_IMPL(x)
#if DART_USING_MSVC
#define DART_UNLIKELY(x) !!(x)
#else
#define DART_UNLIKELY(x) __builtin_expect(!!(x), 0)
#endif
#ifndef NDEBUG
#if DART_USING_MSVC
#include <io.h>
#define DART_WRITE(fd, ptr, bytes) _write(fd, ptr, static_cast<unsigned int>(bytes))
#define DART_STDERR_FILENO _fileno(stderr)
#else
#include <unistd.h>
#define DART_WRITE(fd, ptr, bytes) write(fd, ptr, bytes)
#define DART_STDERR_FILENO STDERR_FILENO
#endif
/**
* @brief
* Macro customizes functionality usually provided by assert().
*
* @details
* Not strictly necessary, but tries to provide a bit more context and
* information as to why I just murdered the user's program (in production, no doubt).
*
* @remarks
* Don't actually know if Doxygen lets you document macros, guess we'll see.
*/
#define DART_ASSERT(cond) \
if (DART_UNLIKELY(!(cond))) { \
auto& msg = "dart::packet has detected fatal memory corruption and cannot continue execution.\n" \
"\"" DART_STRINGIFY(cond) "\" violated.\nSee " __FILE__ ":" DART_STRINGIFY(__LINE__) "\n"; \
int errfd = DART_STDERR_FILENO; \
ssize_t spins {0}, written {0}, total {sizeof(msg)}; \
do { \
ssize_t ret = DART_WRITE(errfd, msg + written, total - written); \
if (ret >= 0) written += ret; \
} while (written != total && spins++ < 16); \
std::abort(); \
}
#else
#define DART_ASSERT(cond)
#endif
// Conditionally include different implementations of different data structures
// depending on what standard we have access to.
#if __cplusplus >= 201703L && !DART_USE_MPARK_VARIANT
#define DART_HAS_CPP17 1
#include <variant>
#include <optional>
#include <string_view>
#else
#define DART_HAS_CPP14 1
#include "support/variant.h"
#include "support/optional.h"
#include "support/string_view.h"
#endif
// We support two versions of GSL, but unfortunately they don't agree about
// the template signature of gsl::span (why would we expect two production-ready
// implementations of the same exact specification to agree about something like
// that), so we have to declare some of our partial specializations differently
// depending on what we included.
#include <gsl/gsl>
#ifndef gsl_lite_VERSION
#define DART_USING_GSL
#else
#define DART_USING_GSL_LITE
#endif
// Conditionally pull each of those types into our namespace.
namespace dart {
namespace shim {
#if DART_USING_MSVC
inline int aligned_alloc(void** memptr, size_t alignment, size_t size) {
void* ret = _aligned_malloc(size, alignment);
if (ret) {
*memptr = ret;
return 0;
} else {
return -1;
}
}
inline void aligned_free(void* ptr) {
_aligned_free(ptr);
}
inline void gmtime(time_t const* src, std::tm* out) {
gmtime_s(out, src);
}
inline time_t timegm(std::tm* src) {
return _mkgmtime(src);
}
#else
inline int aligned_alloc(void** memptr, size_t alignment, size_t size) {
return posix_memalign(memptr, alignment, size);
}
inline void aligned_free(void* ptr) {
free(ptr);
}
inline void gmtime(time_t const* src, std::tm* out) {
gmtime_r(src, out);
}
inline time_t timegm(std::tm* src) {
return ::timegm(src);
}
#endif
#ifdef DART_HAS_CPP17
// Pull in names of types.
using std::optional;
using std::nullopt_t;
using std::variant;
using std::monostate;
using std::string_view;
using std::basic_string_view;
// Pull in constants.
static inline constexpr auto nullopt = std::nullopt;
// Pull in non-member helpers.
using std::get;
using std::visit;
using std::get_if;
using std::launder;
using std::holds_alternative;
using std::variant_alternative;
using std::variant_alternative_t;
// Define a way to compose lambdas.
template <class... Ls>
struct compose : Ls... {
using Ls::operator ()...;
};
template <class... Ls>
compose(Ls...) -> compose<Ls...>;
template <class... Ls>
auto compose_together(Ls&&... lambdas) {
return compose {std::forward<Ls>(lambdas)...};
}
#else
// Pull in names of types.
using dart::optional;
using dart::nullopt_t;
using dart::string_view;
using dart::basic_string_view;
using mpark::variant;
using mpark::monostate;
// Pull in non-member helpers.
using mpark::get;
using mpark::visit;
using mpark::get_if;
using mpark::holds_alternative;
using mpark::variant_alternative;
using mpark::variant_alternative_t;
// Pull in constants.
static constexpr auto nullopt = dart::nullopt;
// Define a way to compose lambdas.
template <class... Ls>
struct compose;
template <class L, class... Ls>
struct compose<L, Ls...> : L, compose<Ls...> {
compose(L l, Ls... the_rest) : L(std::move(l)), compose<Ls...>(std::move(the_rest)...) {}
using L::operator ();
using compose<Ls...>::operator ();
};
template <class L>
struct compose<L> : L {
compose(L l) : L(std::move(l)) {}
using L::operator ();
};
template <class... Ls>
auto compose_together(Ls&&... lambdas) {
return compose<std::decay_t<Ls>...> {std::forward<Ls>(lambdas)...};
}
#endif
}
}
#endif
|
{"hexsha": "74236f85a8436bd02b1ec963bf21586dfe81907b", "size": 7298, "ext": "h", "lang": "C", "max_stars_repo_path": "include/dart/shim.h", "max_stars_repo_name": "Cfretz244/libdart", "max_stars_repo_head_hexsha": "987b01aa1f11455ac6aaf89f8e60825e92e6ec25", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 85.0, "max_stars_repo_stars_event_min_datetime": "2019-05-09T19:12:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-07T16:31:55.000Z", "max_issues_repo_path": "include/dart/shim.h", "max_issues_repo_name": "Cfretz244/libdart", "max_issues_repo_head_hexsha": "987b01aa1f11455ac6aaf89f8e60825e92e6ec25", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 10.0, "max_issues_repo_issues_event_min_datetime": "2019-05-09T22:37:27.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-29T03:25:16.000Z", "max_forks_repo_path": "include/dart/shim.h", "max_forks_repo_name": "Cfretz244/libdart", "max_forks_repo_head_hexsha": "987b01aa1f11455ac6aaf89f8e60825e92e6ec25", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 10.0, "max_forks_repo_forks_event_min_datetime": "2019-05-11T08:05:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-11T11:05:17.000Z", "avg_line_length": 29.3092369478, "max_line_length": 111, "alphanum_fraction": 0.6208550288, "num_tokens": 1792}
|
import torch
import numpy as np
from queue import Queue
from utils import load_obj, export
import copy
from pathlib import Path
import pickle
from pytorch3d.ops.knn import knn_gather, knn_points
class Mesh:
def __init__(self, file, hold_history=False, vs=None, faces=None, device='cpu', gfmm=True):
if file is None:
return
self.filename = Path(file)
self.vs = self.v_mask = self.edge_areas = None
self.edges = self.gemm_edges = self.sides = None
self.device = device
if vs is not None and faces is not None:
self.vs, self.faces = vs.cpu().numpy(), faces.cpu().numpy()
self.scale, self.translations = 1.0, np.zeros(3,)
else:
self.vs, self.faces = load_obj(file)
self.normalize_unit_bb()
self.vs_in = copy.deepcopy(self.vs)
self.v_mask = np.ones(len(self.vs), dtype=bool)
self.build_gemm()
self.history_data = None
if hold_history:
self.init_history()
if gfmm:
self.gfmm = self.build_gfmm() #TODO get rid of this DS
else:
self.gfmm = None
if type(self.vs) is np.ndarray:
self.vs = torch.from_numpy(self.vs)
if type(self.faces) is np.ndarray:
self.faces = torch.from_numpy(self.faces)
self.vs = self.vs.to(self.device)
self.faces = self.faces.to(self.device).long()
self.area, self.normals = self.face_areas_normals(self.vs, self.faces)
def build_gemm(self):
self.ve = [[] for _ in self.vs]
self.vei = [[] for _ in self.vs]
edge_nb = []
sides = []
edge2key = dict()
edges = []
edges_count = 0
nb_count = []
for face_id, face in enumerate(self.faces):
faces_edges = []
for i in range(3):
cur_edge = (face[i], face[(i + 1) % 3])
faces_edges.append(cur_edge)
for idx, edge in enumerate(faces_edges):
edge = tuple(sorted(list(edge)))
faces_edges[idx] = edge
if edge not in edge2key:
edge2key[edge] = edges_count
edges.append(list(edge))
edge_nb.append([-1, -1, -1, -1])
sides.append([-1, -1, -1, -1])
self.ve[edge[0]].append(edges_count)
self.ve[edge[1]].append(edges_count)
self.vei[edge[0]].append(0)
self.vei[edge[1]].append(1)
nb_count.append(0)
edges_count += 1
for idx, edge in enumerate(faces_edges):
edge_key = edge2key[edge]
edge_nb[edge_key][nb_count[edge_key]] = edge2key[faces_edges[(idx + 1) % 3]]
edge_nb[edge_key][nb_count[edge_key] + 1] = edge2key[faces_edges[(idx + 2) % 3]]
nb_count[edge_key] += 2
for idx, edge in enumerate(faces_edges):
edge_key = edge2key[edge]
sides[edge_key][nb_count[edge_key] - 2] = nb_count[edge2key[faces_edges[(idx + 1) % 3]]] - 1
sides[edge_key][nb_count[edge_key] - 1] = nb_count[edge2key[faces_edges[(idx + 2) % 3]]] - 2
self.edges = np.array(edges, dtype=np.int32)
self.gemm_edges = np.array(edge_nb, dtype=np.int64)
self.sides = np.array(sides, dtype=np.int64)
self.edges_count = edges_count
# lots of DS for loss
self.nvs, self.nvsi, self.nvsin = [], [], []
for i, e in enumerate(self.ve):
self.nvs.append(len(e))
self.nvsi.append(len(e) * [i])
self.nvsin.append(list(range(len(e))))
self.vei = torch.from_numpy(np.concatenate(np.array(self.vei)).ravel()).to(self.device).long()
self.nvsi = torch.Tensor(np.concatenate(np.array(self.nvsi)).ravel()).to(self.device).long()
self.nvsin = torch.from_numpy(np.concatenate(np.array(self.nvsin)).ravel()).to(self.device).long()
ve_in = copy.deepcopy(self.ve)
self.ve_in = torch.from_numpy(np.concatenate(np.array(ve_in)).ravel()).to(self.device).long()
self.max_nvs = max(self.nvs)
self.nvs = torch.Tensor(self.nvs).to(self.device).float()
self.edge2key = edge2key
def build_ef(self):
edge_faces = dict()
if type(self.faces) == torch.Tensor:
faces = self.faces.cpu().numpy()
else:
faces = self.faces
for face_id, face in enumerate(faces):
for i in range(3):
edge = tuple(sorted([face[i], face[(i + 1) % 3]]))
if edge not in edge_faces:
edge_faces[edge] = []
edge_faces[edge].append(face_id)
for k in edge_faces.keys():
if len(edge_faces[k]) < 2:
edge_faces[k].append(edge_faces[k][0])
return edge_faces
def build_gfmm(self):
edge_faces = self.build_ef()
gfmm = []
if type(self.faces) == torch.Tensor:
faces = self.faces.cpu().numpy()
else:
faces = self.faces
for face_id, face in enumerate(faces):
neighbors = [face_id]
for i in range(3):
edge = tuple(sorted([face[i], face[(i + 1) % 3]]))
neighbors.extend(list(set(edge_faces[edge]) - set([face_id])))
gfmm.append(neighbors)
return torch.Tensor(gfmm).long().to(self.device)
def normalize_unit_bb(self):
"""
normalizes to unit bounding box and translates to center
if no
:param verts: new verts
"""
cache_norm_file = self.filename.with_suffix('.npz')
if not cache_norm_file.exists():
scale = max([self.vs[:, i].max() - self.vs[:, i].min() for i in range(3)])
scaled_vs = self.vs / scale
target_mins = [(scaled_vs[:, i].max() - scaled_vs[:, i].min()) / -2.0 for i in range(3)]
translations = [(target_mins[i] - scaled_vs[:, i].min()) for i in range(3)]
np.savez_compressed(cache_norm_file, scale=scale, translations=translations)
# load from the cache
cached_data = np.load(cache_norm_file, encoding='latin1', allow_pickle=True)
self.scale, self.translations = cached_data['scale'], cached_data['translations']
self.vs /= self.scale
self.vs += self.translations[None, :]
def discrete_project(self, pc: torch.Tensor, thres=0.9, cpu=False):
with torch.no_grad():
device = torch.device('cpu') if cpu else self.device
pc = pc.double()
if isinstance(self, Mesh):
mid_points = self.vs[self.faces].mean(dim=1)
normals = self.normals
else:
mid_points = self[:, :3]
normals = self[:, 3:]
pk12 = knn_points(mid_points[:, :3].unsqueeze(0), pc[:, :, :3], K=3).idx[0]
pk21 = knn_points(pc[:, :, :3], mid_points[:, :3].unsqueeze(0), K=3).idx[0]
loop = pk21[pk12].view(pk12.shape[0], -1)
knn_mask = (loop == torch.arange(0, pk12.shape[0], device=self.device)[:, None]).sum(dim=1) > 0
mid_points = mid_points.to(device)
pc = pc[0].to(device)
normals = normals.to(device)[~ knn_mask, :]
masked_mid_points = mid_points[~ knn_mask, :]
displacement = masked_mid_points[:, None, :] - pc[:, :3]
torch.cuda.empty_cache()
distance = displacement.norm(dim=-1)
mask = (torch.abs(torch.sum((displacement / distance[:, :, None]) *
normals[:, None, :], dim=-1)) > thres)
if pc.shape[-1] == 6:
pc_normals = pc[:, 3:]
normals_correlation = torch.sum(normals[:, None, :] * pc_normals, dim=-1)
mask = mask * (normals_correlation > 0)
torch.cuda.empty_cache()
distance[~ mask] += float('inf')
min, argmin = distance.min(dim=-1)
pc_per_face_masked = pc[argmin, :].clone()
pc_per_face_masked[min == float('inf'), :] = float('nan')
pc_per_face = torch.zeros(mid_points.shape[0], 6).\
type(pc_per_face_masked.dtype).to(pc_per_face_masked.device)
pc_per_face[~ knn_mask, :pc.shape[-1]] = pc_per_face_masked
pc_per_face[knn_mask, :] = float('nan')
# clean up
del knn_mask
return pc_per_face.to(self.device), (pc_per_face[:, 0] == pc_per_face[:, 0]).to(device)
@staticmethod
def face_areas_normals(vs, faces):
if type(vs) is not torch.Tensor:
vs = torch.from_numpy(vs)
if type(faces) is not torch.Tensor:
faces = torch.from_numpy(faces)
face_normals = torch.cross(vs[faces[:, 1]] - vs[faces[:, 0]],
vs[faces[:, 2]] - vs[faces[:, 1]])
face_areas = torch.norm(face_normals, dim=1)
face_normals = face_normals / face_areas[:, None]
face_areas = 0.5 * face_areas
face_areas = 0.5 * face_areas
return face_areas, face_normals
def update_verts(self, verts):
"""
update verts positions only, same connectivity
:param verts: new verts
"""
self.vs = verts
def deep_copy(self): #TODO see if can do this better
new_mesh = Mesh(file=None)
types = [np.ndarray, torch.Tensor, dict, list, str, int, bool, float]
for attr in self.__dir__():
if attr == '__dict__':
continue
val = getattr(self, attr)
if type(val) == types[0]:
new_mesh.__setattr__(attr, val.copy())
elif type(val) == types[1]:
new_mesh.__setattr__(attr, val.clone())
elif type(val) in types[2:4]:
new_mesh.__setattr__(attr, pickle.loads(pickle.dumps(val, -1)))
elif type(val) in types[4:]:
new_mesh.__setattr__(attr, val)
return new_mesh
def merge_vertices(self, edge_id):
self.remove_edge(edge_id)
edge = self.edges[edge_id]
v_a = self.vs[edge[0]]
v_b = self.vs[edge[1]]
# update pA
v_a.__iadd__(v_b)
v_a.__itruediv__(2)
self.v_mask[edge[1]] = False
mask = self.edges == edge[1]
self.ve[edge[0]].extend(self.ve[edge[1]])
self.edges[mask] = edge[0]
def remove_vertex(self, v):
self.v_mask[v] = False
def remove_edge(self, edge_id):
vs = self.edges[edge_id]
for v in vs:
if edge_id not in self.ve[v]:
print(self.ve[v])
print(self.filename)
self.ve[v].remove(edge_id)
def clean(self, edges_mask, groups):
edges_mask = edges_mask.astype(bool)
torch_mask = torch.from_numpy(edges_mask.copy())
self.gemm_edges = self.gemm_edges[edges_mask]
self.edges = self.edges[edges_mask]
self.sides = self.sides[edges_mask]
new_ve = []
edges_mask = np.concatenate([edges_mask, [False]])
new_indices = np.zeros(edges_mask.shape[0], dtype=np.int32)
new_indices[-1] = -1
new_indices[edges_mask] = np.arange(0, np.ma.where(edges_mask)[0].shape[0])
self.gemm_edges[:, :] = new_indices[self.gemm_edges[:, :]]
for v_index, ve in enumerate(self.ve):
update_ve = []
# if self.v_mask[v_index]:
for e in ve:
update_ve.append(new_indices[e])
new_ve.append(update_ve)
self.ve = new_ve
self.__clean_history(groups, torch_mask)
def export(self, file):
vs = self.vs.cpu().clone()
vs -= self.translations[None, :]
vs *= self.scale
export(file, vs, self.faces)
def init_history(self):
self.history_data = {
'groups': [],
'gemm_edges': [self.gemm_edges.copy()],
'occurrences': [],
'edges_count': [self.edges_count],
}
def get_groups(self):
return self.history_data['groups'].pop()
def get_occurrences(self):
return self.history_data['occurrences'].pop()
def __clean_history(self, groups, pool_mask):
if self.history_data is not None:
self.history_data['occurrences'].append(groups.get_occurrences())
self.history_data['groups'].append(groups.get_groups(pool_mask))
self.history_data['gemm_edges'].append(self.gemm_edges.copy())
self.history_data['edges_count'].append(self.edges_count)
def unroll_gemm(self):
self.history_data['gemm_edges'].pop()
self.gemm_edges = self.history_data['gemm_edges'][-1]
self.history_data['edges_count'].pop()
self.edges_count = self.history_data['edges_count'][-1]
@staticmethod
def from_tensor(mesh, vs, faces, gfmm=True):
mesh = Mesh(file=mesh.filename, vs=vs, faces=faces, device=mesh.device, hold_history=True, gfmm=gfmm)
return mesh
def submesh(self, vs_index):
return PartMesh.create_submesh(vs_index, self)
class PartMesh:
"""
Divides a mesh into submeshes
"""
def __init__(self, main_mesh: Mesh, vs_groups=None, num_parts=1, bfs_depth=0, n=-1):
"""
Part Mesh constructor
:param main_mesh: main mesh to pick the submeshes from
:param vs_groups: tensor the size of vs that contains the submesh index from 0 upto number_of_sub_meshes - 1
:param num_parts: number of parts to seperate the main_mesh into
"""
self.main_mesh = main_mesh
if vs_groups is not None: #TODO is this neccesary?
self.vs_groups = vs_groups
else:
if n != -1:
self.vs_groups = PartMesh.grid_segment(self.main_mesh.vs, n=n)
else:
self.vs_groups = PartMesh.segment_shape(self.main_mesh.vs, seg_num=num_parts)
self.n_submeshes = torch.max(self.vs_groups).item() + 1
self.sub_mesh_index = []
self.sub_mesh = []
self.init_verts = []
tmp_vs_groups = self.vs_groups.clone()
delta = 0
for i in range(self.n_submeshes):
vs_index = (self.vs_groups == i).nonzero().squeeze(1)
if vs_index.size()[0] == 0:
tmp_vs_groups[self.vs_groups > i - delta] -= 1
continue
vs_index = torch.sort(vs_index, dim=0)[0]
vs_index = torch.tensor(self.vs_bfs(vs_index.tolist(), self.main_mesh.faces.tolist(), bfs_depth),
dtype=vs_index.dtype).to(vs_index.device)
m, vs_index = self.main_mesh.submesh(vs_index)
self.sub_mesh.append(m)
self.sub_mesh_index.append(vs_index)
self.init_verts.append(m.vs.clone().detach())
self.vs_groups = tmp_vs_groups
self.n_submeshes = torch.max(self.vs_groups).item() + 1
vse = self.vs_e_dict(self.main_mesh.edges)
self.sub_mesh_edge_index = []
for i in range(self.n_submeshes):
mask = torch.zeros(self.main_mesh.edges.shape[0]).long()
for face in self.sub_mesh[i].faces:
face = self.sub_mesh_index[i][face].to(face.device).long()
for j in range(3):
e = tuple(sorted([face[j].item(), face[(j + 1) % 3].item()]))
mask[vse[e]] = 1
self.sub_mesh_edge_index.append(self.mask_to_index(mask))
def update_verts(self, new_vs: torch.Tensor, index: int):
m = self.sub_mesh[index]
m.update_verts(new_vs)
self.main_mesh.vs[self.sub_mesh_index[index], :] = new_vs
def build_main_mesh(self):
"""
build self.main_mesh out of submesh's vs
"""
new_vs = torch.zeros_like(self.main_mesh.vs)
new_vs_n = torch.zeros(self.main_mesh.vs.shape[0], dtype=new_vs.dtype).to(new_vs.device)
for i, m in enumerate(self.sub_mesh):
new_vs[self.sub_mesh_index[i], :] += m.vs
new_vs_n[self.sub_mesh_index[i]] += 1
new_vs = new_vs / new_vs_n[:, None]
new_vs[new_vs_n == 0, :] = self.main_mesh.vs[new_vs_n == 0, :]
self.main_mesh.update_verts(new_vs)
def export(self, file, build_main=True):
"""
export the entire mesh (self.main_mesh)
:param file: file to output to
:param vcolor: color for vertices, Default: None
:param build_main: build main mesh before exporting, Default: True
:param segment: color the verts according to submesh classes
"""
with torch.no_grad():
if build_main:
self.build_main_mesh()
self.main_mesh.export(file)
def __getitem__(self, i: int) -> Mesh:
"""
get submesh at index i
:param i: index of submesh
:return: submesh at index i
"""
if type(i) != int:
raise TypeError('number submesh must be int')
if i >= self.n_submeshes:
raise OverflowError(f'index {i} for submesh is out of bounds, max index is {self.n_submeshes - 1}')
return self.sub_mesh[i]
def __iter__(self):
return iter(self.sub_mesh)
@staticmethod
def create_submesh(vs_index: torch.Tensor, mesh: Mesh) -> (Mesh, torch.Tensor):
"""
create a submesh out on a mesh object
:param vs_index: indices of the submesh
:param mesh: the mesh to sub
:return: the new submesh
"""
vs_mask = torch.zeros(mesh.vs.shape[0])
vs_mask[vs_index] = 1
faces_mask = vs_mask[mesh.faces].sum(dim=-1) > 0
new_faces = mesh.faces[faces_mask].clone()
all_verts = new_faces.view(-1)
new_vs_mask = torch.zeros(mesh.vs.shape[0]).long().to(all_verts.device)
new_vs_mask[all_verts] = 1
new_vs_index = PartMesh.mask_to_index(new_vs_mask)
new_vs = mesh.vs[new_vs_index, :].clone()
vs_mask = torch.zeros(mesh.vs.shape[0])
vs_mask[new_vs_index] = 1
cummusum = torch.cumsum(1 - vs_mask, dim=0)
new_faces -= cummusum[new_faces].to(new_faces.device).long()
m = Mesh.from_tensor(mesh, new_vs.detach(), new_faces.detach(), gfmm=False)
return m, new_vs_index
@staticmethod
def index_to_mask(index: torch.Tensor, len:int):
mask = torch.zeros(len)
for i in index:
mask[i] = 1
return mask
@staticmethod
def mask_to_index(mask: torch.Tensor):
lst = []
mask = mask.long()
for i, val in enumerate(mask):
if val == 1:
lst.append(i)
return torch.tensor(lst).type(torch.long)
@staticmethod
def segment_shape(vs: torch.Tensor, seg_num: int):
"""
segment shape to 8 classes depence on the center of mass
:param vs: tensor NX3
:return: tensor size N with value being the class 0-7 (including 7)
"""
center = vs.mean(dim=0)
diff = vs - center[None, :]
eighth = torch.zeros(vs.shape[0]).float().to(diff.device)
if seg_num >= 2:
eighth += 1 *(diff[:, 0] > 0).float()
if seg_num >= 4:
eighth += 2 * (diff[:, 1] > 0).float()
if seg_num >= 8:
eighth += 4 * (diff[:, 2] > 0).float()
return eighth.long()
@staticmethod
def grid_segment(vs: torch.Tensor, n):
maxx, _ = vs.max(dim=0)
minn, _ = vs.min(dim=0)
unit = (maxx - minn) / n
vs_new = vs - minn[None, :]
vs_cordinants = (vs_new / unit).int()
vs_cordinants[vs_cordinants == n] -= 1
return vs_cordinants[:, 0] + vs_cordinants[:, 1] * n + vs_cordinants[:, 2] * (n ** 2)
@staticmethod
def vs_e_dict(edges):
d = dict()
for i, e in enumerate(edges):
k = tuple(sorted(e))
d[k] = i
return d
@staticmethod
def vs_bfs(start_vs, faces, max_depth):
if max_depth <= 0:
return start_vs
q = Queue()
[q.put((c, 0)) for c in start_vs]
visited = start_vs
while not q.empty():
i, depth = q.get()
for f in faces:
if i in f:
for j in f:
if j not in visited:
if depth + 1 <= max_depth:
q.put((j, depth + 1))
visited.append(j)
return sorted(visited)
|
{"hexsha": "367adafceccdf978a003bfccc85bf09e79f57d17", "size": 20729, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/layers/mesh.py", "max_stars_repo_name": "Sanjay-Ganeshan/point2mesh", "max_stars_repo_head_hexsha": "0b5f8eade103d4408529d94ec5ca55cf64a9a2c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/layers/mesh.py", "max_issues_repo_name": "Sanjay-Ganeshan/point2mesh", "max_issues_repo_head_hexsha": "0b5f8eade103d4408529d94ec5ca55cf64a9a2c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/layers/mesh.py", "max_forks_repo_name": "Sanjay-Ganeshan/point2mesh", "max_forks_repo_head_hexsha": "0b5f8eade103d4408529d94ec5ca55cf64a9a2c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5655577299, "max_line_length": 116, "alphanum_fraction": 0.5571904096, "include": true, "reason": "import numpy", "num_tokens": 5080}
|
from __future__ import print_function
import unittest
import numpy as np
from scipy.sparse.linalg import eigsh
from discretize import TensorMesh
from SimPEG import simulation, data_misfit
from SimPEG.maps import IdentityMap
from SimPEG.regularization import Tikhonov
from SimPEG.utils.mat_utils import eigenvalue_by_power_iteration
class TestEigenvalues(unittest.TestCase):
def setUp(self):
# Mesh
N = 100
mesh = TensorMesh([N])
# Survey design parameters
nk = 30
jk = np.linspace(1.0, 59.0, nk)
p = -0.25
q = 0.25
# Physics
def g(k):
return np.exp(p * jk[k] * mesh.vectorCCx) * np.cos(
np.pi * q * jk[k] * mesh.vectorCCx
)
G = np.empty((nk, mesh.nC))
for i in range(nk):
G[i, :] = g(i)
self.G = G
# Creating the true model
true_model = np.zeros(mesh.nC)
true_model[mesh.vectorCCx > 0.3] = 1.0
true_model[mesh.vectorCCx > 0.45] = -0.5
true_model[mesh.vectorCCx > 0.6] = 0
self.true_model = true_model
# Create a SimPEG simulation
model_map = IdentityMap(mesh)
sim = simulation.LinearSimulation(mesh, G=G, model_map=model_map)
# Create a SimPEG data object
relative_error = 0.1
noise_floor = 1e-4
data_obj = sim.make_synthetic_data(
true_model, relative_error=relative_error, noise_floor=noise_floor, add_noise=True
)
dmis = data_misfit.L2DataMisfit(simulation=sim, data=data_obj)
self.dmis = dmis
# Test for joint misfits
n_misfits = 5
multipliers = np.random.randn(n_misfits)**2
multipliers /= np.sum(multipliers)
self.multipliers = multipliers
dmiscombo = dmis
for i, mult in enumerate(multipliers):
dmiscombo += mult * dmis
self.dmiscombo = dmiscombo
# Test for a regularization term
reg = Tikhonov(mesh=mesh)
self.reg = reg
# Test a mix combo
self.beta = 10.
self.mixcombo = self.dmis + self.beta * self.reg
def test_dm_eigenvalue_by_power_iteration(self):
# Test for a single data misfit
dmis_matrix = self.G.T.dot((self.dmis.W**2).dot(self.G))
field = self.dmis.simulation.fields(self.true_model)
max_eigenvalue_numpy, _ = eigsh(dmis_matrix,k=1)
max_eigenvalue_directive = eigenvalue_by_power_iteration(self.dmis,self.true_model, fields_list=field, n_pw_iter=30)
passed = np.isclose(max_eigenvalue_numpy, max_eigenvalue_directive, rtol=1e-2)
self.assertTrue(passed, True)
print("Eigenvalue Utils for one data misfit term is validated.")
# Test for multiple data misfit
WtW = 0.
for i, (mult, dm) in enumerate(zip(self.dmiscombo.multipliers, self.dmiscombo.objfcts)):
WtW += mult * dm.W**2
dmiscombo_matrix = self.G.T.dot(WtW.dot(self.G))
max_eigenvalue_numpy, _ = eigsh(dmiscombo_matrix,k=1)
max_eigenvalue_directive = eigenvalue_by_power_iteration(self.dmiscombo,self.true_model, n_pw_iter=30)
passed = np.isclose(max_eigenvalue_numpy, max_eigenvalue_directive, rtol=1e-2)
self.assertTrue(passed, True)
print("Eigenvalue Utils for multiple data misfit terms is validated.")
def test_reg_eigenvalue_by_power_iteration(self):
reg_maxtrix = self.reg.deriv2(self.true_model)
max_eigenvalue_numpy, _ = eigsh(reg_maxtrix,k=1)
max_eigenvalue_directive = eigenvalue_by_power_iteration(self.reg, self.true_model, n_pw_iter=100)
passed = np.isclose(max_eigenvalue_numpy, max_eigenvalue_directive, rtol=1e-2)
self.assertTrue(passed, True)
print("Eigenvalue Utils for regularization is validated.")
def test_combo_eigenvalue_by_power_iteration(self):
reg_maxtrix = self.reg.deriv2(self.true_model)
dmis_matrix = self.G.T.dot((self.dmis.W**2).dot(self.G))
combo_matrix = dmis_matrix + self.beta * reg_maxtrix
max_eigenvalue_numpy, _ = eigsh(combo_matrix,k=1)
max_eigenvalue_directive = eigenvalue_by_power_iteration(self.mixcombo, self.true_model, n_pw_iter=100)
passed = np.isclose(max_eigenvalue_numpy, max_eigenvalue_directive, rtol=1e-2)
self.assertTrue(passed, True)
print("Eigenvalue Utils for a mixed ComboObjectiveFunction is validated.")
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "0bc6c7fa7bd002b1bee5aa9f61611a956e7a1ffc", "size": 3977, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/utils/test_mat_utils.py", "max_stars_repo_name": "JKutt/simpeg", "max_stars_repo_head_hexsha": "a0d9cf88e4551bfbfda3792521f4c85724686103", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/utils/test_mat_utils.py", "max_issues_repo_name": "JKutt/simpeg", "max_issues_repo_head_hexsha": "a0d9cf88e4551bfbfda3792521f4c85724686103", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/utils/test_mat_utils.py", "max_forks_repo_name": "JKutt/simpeg", "max_forks_repo_head_hexsha": "a0d9cf88e4551bfbfda3792521f4c85724686103", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2844827586, "max_line_length": 118, "alphanum_fraction": 0.7490570782, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1215}
|
/* Copyright (C) 2014 InfiniDB, Inc.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; version 2 of
the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
/*****************************************************************************
* $Id$
*
****************************************************************************/
#include <exception>
#include <boost/scoped_ptr.hpp>
#include "configcpp.h"
#include "IDBDataFile.h"
#include "IDBPolicy.h"
#define BRMTBLLOCKSVR_DLLEXPORT
#include "tablelockserver.h"
#undef BRMTBLLOCKSVR_DLLEXPORT
using namespace std;
using namespace boost;
using namespace idbdatafile;
namespace BRM
{
TableLockServer::TableLockServer(SessionManagerServer* sm) : sms(sm)
{
boost::mutex::scoped_lock lk(mutex);
config::Config* config = config::Config::makeConfig();
filename = config->getConfig("SystemConfig", "TableLockSaveFile");
if (filename == "")
throw invalid_argument("TableLockServer: Need to define SystemConfig/TableLockSaveFile in config file"); // todo, replace this
load();
}
TableLockServer::~TableLockServer()
{
}
// call with lock held
void TableLockServer::save()
{
lit_t it;
uint32_t count = locks.size();
const char* filename_p = filename.c_str();
scoped_ptr<IDBDataFile> out(IDBDataFile::open(
IDBPolicy::getType(filename_p, IDBPolicy::WRITEENG),
filename_p, "wb", 0));
if (!out)
throw runtime_error("TableLockServer::save(): could not open save file");
out->write((char*) &count, 4);
for (it = locks.begin(); it != locks.end(); ++it)
{
if (!out)
throw runtime_error("TableLockServer::save(): could not write save file");
it->second.serialize(out.get());
}
}
// call with lock held
void TableLockServer::load()
{
uint32_t size;
uint32_t i = 0;
TableLockInfo tli;
/* Need to standardize the file error handling */
const char* filename_p = filename.c_str();
scoped_ptr<IDBDataFile> in(IDBDataFile::open(
IDBPolicy::getType(filename_p, IDBPolicy::WRITEENG),
filename_p, "rb", 0));
if (!in)
{
ostringstream os;
os << "TableLockServer::load(): could not open the save file"
<< filename;
log(os.str(), logging::LOG_TYPE_DEBUG);
return;
}
try
{
in->read((char*) &size, 4);
for (i = 0; i < size; i++)
{
tli.deserialize(in.get());
tli.id = sms->getUnique64(); // Need new #s...
if (tli.id == 0) // 0 is an error code
tli.id = sms->getUnique64();
locks[tli.id] = tli;
}
}
catch (std::exception& e)
{
ostringstream os;
os << "TableLockServer::load(): could not load save file " << filename <<
" loaded " << i << "/" << size << " entries\n";
log(os.str(), logging::LOG_TYPE_DEBUG);
throw;
}
}
// throws on a failed save()
uint64_t TableLockServer::lock(TableLockInfo* tli)
{
set<uint32_t> dbroots;
lit_t it;
uint32_t i;
boost::mutex::scoped_lock lk(mutex);
for (i = 0; i < tli->dbrootList.size(); i++)
dbroots.insert(tli->dbrootList[i]);
for (it = locks.begin(); it != locks.end(); ++it)
{
if (it->second.overlaps(*tli, dbroots))
{
tli->ownerName = it->second.ownerName;
tli->ownerPID = it->second.ownerPID;
tli->ownerSessionID = it->second.ownerSessionID;
tli->ownerTxnID = it->second.ownerTxnID;
return false;
}
}
tli->id = sms->getUnique64();
if (tli->id == 0) // 0 is an error code
tli->id = sms->getUnique64();
locks[tli->id] = *tli;
try
{
save();
}
catch (...)
{
locks.erase(tli->id);
throw;
}
return tli->id;
}
bool TableLockServer::unlock(uint64_t id)
{
std::map<uint64_t, TableLockInfo>::iterator it;
TableLockInfo tli;
boost::mutex::scoped_lock lk(mutex);
it = locks.find(id);
if (it != locks.end())
{
tli = it->second;
locks.erase(it);
try
{
save();
}
catch (...)
{
locks[tli.id] = tli;
throw;
}
return true;
}
return false;
}
bool TableLockServer::changeState(uint64_t id, LockState state)
{
lit_t it;
boost::mutex::scoped_lock lk(mutex);
LockState old;
it = locks.find(id);
if (it == locks.end())
return false;
old = it->second.state;
it->second.state = state;
try
{
save();
}
catch (...)
{
it->second.state = old;
throw;
}
return true;
}
bool TableLockServer::changeOwner(uint64_t id, const string& ownerName, uint32_t pid, int32_t session,
int32_t txnID)
{
lit_t it;
boost::mutex::scoped_lock lk(mutex);
string oldName;
uint32_t oldPID;
int32_t oldSession;
int32_t oldTxnID;
it = locks.find(id);
if (it == locks.end())
return false;
oldName = it->second.ownerName;
oldPID = it->second.ownerPID;
oldSession = it->second.ownerSessionID;
oldTxnID = it->second.ownerTxnID;
it->second.ownerName = ownerName;
it->second.ownerPID = pid;
it->second.ownerSessionID = session;
it->second.ownerTxnID = txnID;
try
{
save();
}
catch (...)
{
it->second.ownerName = oldName;
it->second.ownerPID = oldPID;
it->second.ownerSessionID = oldSession;
it->second.ownerTxnID = oldTxnID;
throw;
}
return true;
}
vector<TableLockInfo> TableLockServer::getAllLocks() const
{
vector<TableLockInfo> ret;
boost::mutex::scoped_lock lk(mutex);
constlit_t it;
for (it = locks.begin(); it != locks.end(); ++it)
ret.push_back(it->second);
return ret;
}
void TableLockServer::releaseAllLocks()
{
std::map<uint64_t, TableLockInfo> tmp;
boost::mutex::scoped_lock lk(mutex);
tmp.swap(locks);
try
{
save();
}
catch (...)
{
tmp.swap(locks);
throw;
}
}
bool TableLockServer::getLockInfo(uint64_t id, TableLockInfo* out) const
{
constlit_t it;
boost::mutex::scoped_lock lk(mutex);
it = locks.find(id);
if (out == NULL)
return (it != locks.end());
if (it != locks.end())
{
*out = it->second;
return true;
}
return false;
}
}
|
{"hexsha": "5772e7135cc4a5a22a9752fb48eec01d55fad7a6", "size": 7198, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/versioning/BRM/tablelockserver.cpp", "max_stars_repo_name": "zettadb/zettalib", "max_stars_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/versioning/BRM/tablelockserver.cpp", "max_issues_repo_name": "zettadb/zettalib", "max_issues_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/vendor/mariadb-10.6.7/storage/columnstore/columnstore/versioning/BRM/tablelockserver.cpp", "max_forks_repo_name": "zettadb/zettalib", "max_forks_repo_head_hexsha": "3d5f96dc9e3e4aa255f4e6105489758944d37cc4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2022-02-27T14:00:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T06:24:22.000Z", "avg_line_length": 22.3540372671, "max_line_length": 135, "alphanum_fraction": 0.5640455682, "num_tokens": 1837}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
# https://www.institutoptique.fr/content/download/3234/22015/file/Optique%20Statistique%20cours%20ecrit.pdf
class Laser:
def __init__(self, fs, n, D_phi):
# Sampling frequency (Hz)
self.fs = fs
# Number of points in time
self.n = n
self.D_phi = D_phi
self.n_ = 2 * self.n
# Time grid (s)
self.t = np.arange(self.n)/fs
self.t_ = np.arange(self.n_)/fs
# Time steps (s)
self.dt = 1/fs
# Frequency grid (Hz)
self.f_fft = np.fft.fftfreq(self.n)/self.dt
self.f_fft_ = np.fft.fftfreq(self.n_)/self.dt
self.phase = np.zeros(self.n_) + 2* np.pi * np.random.rand()
self.update_phase(self.n)
self.interferometer_phase = np.pi/4
def update_phase(self, n_update):
phase_steps = np.sqrt(2*self.D_phi*self.dt) * np.random.randn(n_update)
self.phase = np.roll(self.phase, -n_update)
self.phase[-n_update-1:-1] = np.cumsum(phase_steps) + self.phase[-n_update-2]
def interference_signal(self, delay):
signal = 0.5*(1 + np.real(np.exp(1j * self.interferometer_phase) * \
np.exp(1j * (self.phase - self.shift_phase(delay, correct_slope=True)))))
return signal
def shift_phase(self, delay, correct_slope=False):
phase = self.phase
wedge = np.exp(2*1j*np.pi*delay*self.f_fft_)
if correct_slope:
data_inter = phase[-1] + (phase[1]-phase[0]+phase[-1]-phase[-2])/2
slope = (data_inter - phase[0]) / (self.n_ * self.dt)
phase = phase - slope * self.t_
fft_phase = np.fft.fft(phase)
phase_shifted = np.real(np.fft.ifft(fft_phase * wedge))
if correct_slope:
phase_shifted += slope * self.t_
return phase_shifted
class Window(QtGui.QMainWindow):
def __init__(self, app, laser):
super(Window, self).__init__()
self.app = app
self.laser = laser
self.max_dphi = 10e6
self.dphi_step = 10e3
self.delay = 0.1e-6
self.max_delay = 0.1e-6
self.delay_step = 0.1e-9
self.setWindowTitle("Koheron Simulation of laser phase noise") # Title
self.setWindowIcon(QtGui.QIcon('icon_koheron.png'))
self.resize(800, 600) # Size
# Layout
self.centralWid = QtGui.QWidget()
self.setCentralWidget(self.centralWid)
self.lay = QtGui.QVBoxLayout()
self.button_sublayout = QtGui.QHBoxLayout()
self.hlay1 = QtGui.QHBoxLayout()
self.value_layout = QtGui.QVBoxLayout()
self.slider_layout = QtGui.QVBoxLayout()
self.centralWid.setLayout(self.lay)
# Widgets : Buttons, Sliders, PlotWidgets
# D_phi
self.dphi_label = QtGui.QLabel()
self.dphi_label.setText('Linewitdth (kHz): '+"{:.2f}".format(self.laser.D_phi/(2*np.pi)))
self.dphi_slider = QtGui.QSlider()
self.dphi_slider.setMinimum(0)
self.dphi_slider.setMaximum(self.max_dphi/self.dphi_step)
self.dphi_slider.setOrientation(QtCore.Qt.Horizontal)
# D_phi
self.delay_label = QtGui.QLabel()
self.delay_label.setText('Delay (ns): '+"{:.2f}".format(self.delay))
self.delay_slider = QtGui.QSlider()
self.delay_slider.setMinimum(0)
self.delay_slider.setMaximum(self.max_delay/self.delay_step)
self.delay_slider.setOrientation(QtCore.Qt.Horizontal)
# Plot Widget
self.plotWid = pg.PlotWidget(name="data")
self.dataItem = pg.PlotDataItem(1e-6 * np.fft.fftshift(self.laser.f_fft),0*self.laser.t, pen=(0,4))
self.plotWid.addItem(self.dataItem)
self.plotItem = self.plotWid.getPlotItem()
self.plotItem.setMouseEnabled(x=False, y = True)
#specItem.setYRange(-8192, 8192)
# Axis
self.plotAxis = self.plotItem.getAxis("bottom")
self.plotAxis.setLabel("Frequency (MHz)")
# Add Widgets to layout
self.value_layout.addWidget(self.dphi_label,0)
self.slider_layout.addWidget(self.dphi_slider,0)
self.value_layout.addWidget(self.delay_label,0)
self.slider_layout.addWidget(self.delay_slider,0)
self.hlay1.addLayout(self.value_layout)
self.hlay1.addLayout(self.slider_layout)
self.lay.addLayout(self.hlay1)
self.lay.addWidget(self.plotWid)
self.dphi_slider.valueChanged.connect(self.change_dphi)
self.delay_slider.valueChanged.connect(self.change_delay)
self.show()
# Define events
def update(self):
self.laser.update_phase(16)
signal = self.laser.interference_signal(self.delay)
psd = np.abs(np.square(np.fft.fft(signal[-self.laser.n-1:-1])));
self.dataItem.setData(1e-6 * np.fft.fftshift(self.laser.f_fft),np.fft.fftshift(10*np.log10(psd)))
def change_dphi(self):
self.laser.D_phi = self.dphi_slider.value()*self.dphi_step
self.dphi_label.setText('Linewidth (kHz) : '+"{:.2f}".format(1e-3 * self.laser.D_phi / (2*np.pi)))
def change_delay(self):
self.delay = self.delay_slider.value()*self.delay_step
self.delay_label.setText('Delay (ns) : '+"{:.2f}".format(1e9 * self.delay))
def main():
fs = 125e6
n = 1024
linewidth = 100e3; # Laser linewidth (Hz)
D_phi = 2*np.pi*linewidth
las = Laser(fs, n, D_phi)
app = QtGui.QApplication.instance()
if app == None:
app = QtGui.QApplication([])
app.quitOnLastWindowClosed()
win = Window(app, las)
while True:
win.update()
QtGui.QApplication.processEvents()
if __name__ == '__main__':
import sys
main()
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
{"hexsha": "7e96632de172009ba358887b98c3579c23dbd212", "size": 6388, "ext": "py", "lang": "Python", "max_stars_repo_path": "laser.py", "max_stars_repo_name": "Koheron/phase-noise", "max_stars_repo_head_hexsha": "e87ad9bdd3ff594fc3b62c2436745c7db4655675", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-02-19T07:34:09.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-04T07:27:35.000Z", "max_issues_repo_path": "laser.py", "max_issues_repo_name": "Koheron/phase-noise", "max_issues_repo_head_hexsha": "e87ad9bdd3ff594fc3b62c2436745c7db4655675", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "laser.py", "max_forks_repo_name": "Koheron/phase-noise", "max_forks_repo_head_hexsha": "e87ad9bdd3ff594fc3b62c2436745c7db4655675", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-04-05T09:22:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-27T12:10:42.000Z", "avg_line_length": 34.9071038251, "max_line_length": 107, "alphanum_fraction": 0.5832811522, "include": true, "reason": "import numpy", "num_tokens": 1582}
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
import tensorflow as tf
tf.enable_eager_execution()
import tensorflow_probability as tfp
from tensorflow_probability import edward2 as ed
tfd = tfp.distributions
# ===========================================================================
# Constant
# ===========================================================================
a = 8
b = 0.5
mu = 0
n_samples = 100000
# ===========================================================================
# Following the generative procedure
# ===========================================================================
# Step 1: generate the precision Beta
beta_dist = tfd.Gamma(concentration=a, rate=b)
beta = beta_dist.sample(n_samples)
# the prior probability
p_beta_given_a_and_b = beta_dist.prob(beta)
# Step 2: generate the data point
# scale is standard deviation
x_dist = tfd.Normal(loc=mu, scale=tf.sqrt(1 / beta))
x = x_dist.sample()
# the likelihood
p_x_given_mu_and_beta = x_dist.prob(x)
# ====== plotting the prior ====== #
plt.figure()
sns.distplot(beta.numpy(), bins=120, kde=True)
plt.title(r"Prior distribution: $p(\beta|a=%g, b=%g)$" % (a, b))
# ====== plotting the likelihood ====== #
plt.figure()
sns.distplot(x.numpy(), bins=120, kde=True)
plt.title(r"Likelihood distribution: $p(X|\mu=%g, \sigma=\sqrt{\beta^{-1}})$" % mu)
# ====== plotting the posterior ====== #
# the posterior probability, this is only
# proportionally, not exactly because we omit
# the evidence p(X)
# If we want to calculate p(X), we need to marginalize out
# beta using sum rule:
# p(X) = p(X, beta_1) + p(X, beta_2) + ... + p(X, beta_∞)
# This is not easy
p_beta_given_x = p_x_given_mu_and_beta * p_beta_given_a_and_b
p_beta_given_x = p_beta_given_x / tf.reduce_sum(p_beta_given_x)
posterior_dist = tfd.Categorical(probs=p_beta_given_x)
beta = beta.numpy()
posterior = []
for i in range(n_samples // 2000):
idx = posterior_dist.sample(2000).numpy()
posterior.append(beta[idx])
posterior = np.concatenate(posterior)
plt.figure()
sns.distplot(posterior, bins=120, kde=True)
plt.title(r"Sampled posterior distribution: $p(\beta|X)$")
# ====== plotting the close form solution ====== #
a0 = a + n_samples / 2
b0 = b + n_samples / 2 * np.var(x.numpy())
posterior_dist = tfd.Gamma(concentration=a0, rate=b0)
posterior = posterior_dist.sample(n_samples)
plt.figure()
sns.distplot(posterior, bins=120, kde=True)
plt.title(
r"Closed form solution: $p(\beta|X) \sim Gamma(a=%g, b=%g)$"
% (a0, b0))
from odin import visual as V
V.plot_save('/tmp/tmp.pdf', dpi=200)
|
{"hexsha": "33cd7255bbba76e2a841f4c023d7958020291128", "size": 2730, "ext": "py", "lang": "Python", "max_stars_repo_path": "ex2_graph/tut2_infer_sigma.py", "max_stars_repo_name": "trungnt13/uef_bay1_2018", "max_stars_repo_head_hexsha": "48a0f684eb4d18777d9f03998233774baa0524a8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ex2_graph/tut2_infer_sigma.py", "max_issues_repo_name": "trungnt13/uef_bay1_2018", "max_issues_repo_head_hexsha": "48a0f684eb4d18777d9f03998233774baa0524a8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-11-30T16:36:40.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-30T16:36:40.000Z", "max_forks_repo_path": "ex2_graph/tut2_infer_sigma.py", "max_forks_repo_name": "trungnt13/uef_bay1_2018", "max_forks_repo_head_hexsha": "48a0f684eb4d18777d9f03998233774baa0524a8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6741573034, "max_line_length": 83, "alphanum_fraction": 0.6315018315, "include": true, "reason": "import numpy", "num_tokens": 712}
|
PROGRAM FILTERFIX
C-------------------------
C Fix up HST filter discriptions
C Read in wavelength and throughput
C Interpolate between the bins to give even bin sizes
C--------------------------
C
IMPLICIT NONE
C
INTEGER NFILT,IFILT
PARAMETER (NFILT=8)
c
CHARACTER*20 FNAME(NFILT),FNAMEI(NFILT)
DATA FNAME /
& 'F110W_NIC2.dat',
& 'F160W_NIC2.dat',
& 'F606W_ACS.dat',
& 'F675W_WFPC2.dat',
& 'F775W_ACS.dat',
& 'F814W_WFPC2.dat',
& 'F850LP_ACS.dat',
& 'F850lp_WFPC2.dat'/
INTEGER NLAM,ILAM,JLAM,KLAM,NLAMI
PARAMETER (NLAM=15000)
REAL LAM(NLAM),THROUG(NLAM)
REAL LAMX,LAMN,LAMS
C
REAL LAMI(NLAM),THROUGI(NLAM)
C
DO IFILT = 1,NFILT
OPEN (UNIT=30,FILE=FNAME(IFILT),FORM='FORMATTED')
ILAM = 0
100 CONTINUE
ILAM = ILAM + 1
READ (30,'(11X,F9.3,9X,F12.10)',END=101) LAM(ILAM),
& THROUG(ILAM)
GOTO 100
101 CONTINUE
CLOSE(30)
C OK fix up
JLAM = ILAM -1
LAMN = LAM(1)
LAMX = LAM(JLAM)
LAMS = 5.0
NLAMI = NINT((LAMX-LAMN)/LAMS) + 1
DO ILAM = 1,NLAMI
LAMI(ILAM) = LAMN + FLOAT(ILAM-1)*LAMS
DO KLAM = 1,JLAM-1
THROUGI(ILAM) = 0.0
IF (LAMI(ILAM).GE.LAM(KLAM).AND.
& LAMI(ILAM).LT.LAM(KLAM+1)) THEN
THROUGI(ILAM) = THROUG(KLAM) +
& ((LAMI(ILAM)-LAM(KLAM))/
& (LAM(KLAM+1)-LAM(KLAM)))*
& (THROUG(KLAM+1) - THROUG(KLAM))
GOTO 200
ENDIF
ENDDO
200 CONTINUE
ENDDO
C Write out the fixed up
FNAMEI(IFILT) = 'new'//FNAME(IFILT)
OPEN (UNIT=30,FILE=FNAMEI(IFILT),FORM='FORMATTED')
DO ILAM = 1,NLAMI
WRITE(30,'(5X,F8.2,5X,F8.6)') LAMI(ILAM),THROUGI(ILAM)
ENDDO
CLOSE(30)
ENDDO
END
|
{"hexsha": "bd46bf99553ed2db7af680797455647b74703e4f", "size": 2009, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/piscola/filters/HST_GOODS/filterfix.f", "max_stars_repo_name": "temuller/PISCoLA", "max_stars_repo_head_hexsha": "e380603155991c267c26c4c93dfd650b9777b6b9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-09-18T11:30:46.000Z", "max_stars_repo_stars_event_max_datetime": "2019-09-18T11:30:46.000Z", "max_issues_repo_path": "src/piscola/filters/HST_GOODS/filterfix.f", "max_issues_repo_name": "temuller/PISCoLA", "max_issues_repo_head_hexsha": "e380603155991c267c26c4c93dfd650b9777b6b9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-05T21:04:40.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-25T11:10:57.000Z", "max_forks_repo_path": "src/piscola/filters/HST_GOODS/filterfix.f", "max_forks_repo_name": "temuller/PISCoLA", "max_forks_repo_head_hexsha": "e380603155991c267c26c4c93dfd650b9777b6b9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-21T20:21:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-22T16:10:18.000Z", "avg_line_length": 25.7564102564, "max_line_length": 64, "alphanum_fraction": 0.4917869587, "num_tokens": 726}
|
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import numpy as np
import tensorflow as tf
from sklearn.svm import OneClassSVM as SklearnOneClassSVM
MODEL_DIR = "model_save"
MODEL_PATH = MODEL_DIR + "/one_class_svm_model"
ENABLE_EAGER_EXECUTION = False
try:
tf.enable_eager_execution()
ENABLE_EAGER_EXECUTION = True
except Exception:
try:
tf.compat.v1.enable_eager_execution()
ENABLE_EAGER_EXECUTION = True
except Exception:
ENABLE_EAGER_EXECUTION = False
def dataset_reader(dataset):
if ENABLE_EAGER_EXECUTION:
for features in dataset:
yield features
else:
iter = dataset.make_one_shot_iterator()
one_element = iter.get_next()
with tf.Session() as sess:
try:
while True:
yield sess.run(one_element)
except tf.errors.OutOfRangeError:
pass
class OneClassSVM(tf.keras.Model):
def __init__(self,
feature_columns=None,
kernel='rbf',
degree=3,
gamma='scale',
coef0=0.0,
tol=0.001,
nu=0.5,
shrinking=True,
cache_size=200,
verbose=False,
max_iter=-1):
if os.path.exists(MODEL_PATH):
with open(MODEL_PATH, "rb") as f:
self.svm = pickle.load(f)
else:
self.svm = SklearnOneClassSVM(kernel=kernel,
degree=degree,
gamma=gamma,
coef0=coef0,
tol=tol,
nu=nu,
shrinking=shrinking,
cache_size=cache_size,
verbose=verbose,
max_iter=max_iter)
def concat_features(self, features):
assert isinstance(features, dict)
each_feature = []
for k, v in features.items():
if ENABLE_EAGER_EXECUTION:
v = v.numpy()
each_feature.append(v)
return np.concatenate(each_feature, axis=1)
def sqlflow_train_loop(self, dataset):
X = []
for features in dataset_reader(dataset):
X.append(self.concat_features(features))
X = np.concatenate(X)
self.svm.fit(X)
if not os.path.exists(MODEL_DIR):
os.mkdir(MODEL_DIR)
with open(MODEL_PATH, "wb") as f:
pickle.dump(self.svm, f, protocol=2)
def sqlflow_predict_one(self, features):
features = self.concat_features(features)
pred = self.svm.predict(features)
score = self.svm.decision_function(features)
return pred, score
|
{"hexsha": "d58739d637ebcdcb6b12f52617ec3456b060a674", "size": 3506, "ext": "py", "lang": "Python", "max_stars_repo_path": "sqlflow_models/one_class_svm.py", "max_stars_repo_name": "hebafer/models", "max_stars_repo_head_hexsha": "5dc6421f562ea447e501fa355a48a6ee89856a1d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 29, "max_stars_repo_stars_event_min_datetime": "2019-05-05T09:08:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-25T02:35:56.000Z", "max_issues_repo_path": "sqlflow_models/one_class_svm.py", "max_issues_repo_name": "xieliaing/models", "max_issues_repo_head_hexsha": "5dc6421f562ea447e501fa355a48a6ee89856a1d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 33, "max_issues_repo_issues_event_min_datetime": "2019-04-29T08:38:26.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:17:32.000Z", "max_forks_repo_path": "sqlflow_models/one_class_svm.py", "max_forks_repo_name": "xieliaing/models", "max_forks_repo_head_hexsha": "5dc6421f562ea447e501fa355a48a6ee89856a1d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2019-04-29T05:38:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-30T15:28:35.000Z", "avg_line_length": 32.462962963, "max_line_length": 74, "alphanum_fraction": 0.5598973189, "include": true, "reason": "import numpy", "num_tokens": 700}
|
[STATEMENT]
lemma trms\<^sub>s\<^sub>s\<^sub>t_append[simp]: "trms\<^sub>s\<^sub>s\<^sub>t (A@B) = trms\<^sub>s\<^sub>s\<^sub>t A \<union> trms\<^sub>s\<^sub>s\<^sub>t B"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. trms\<^sub>s\<^sub>s\<^sub>t (A @ B) = trms\<^sub>s\<^sub>s\<^sub>t A \<union> trms\<^sub>s\<^sub>s\<^sub>t B
[PROOF STEP]
unfolding trms\<^sub>s\<^sub>s\<^sub>t_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Union> (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p ` set (A @ B)) = \<Union> (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p ` set A) \<union> \<Union> (trms\<^sub>s\<^sub>s\<^sub>t\<^sub>p ` set B)
[PROOF STEP]
by force
|
{"llama_tokens": 305, "file": "Stateful_Protocol_Composition_and_Typing_Stateful_Strands", "length": 2}
|
module Mod_plcd_Elmope
use Mod_plcd_BaseElmope
use Mod_plcd_HangingNodes
use Mod_plcd_UPFormulation
use Mod_plcd_LargeStrainsOperations
use Mod_plcd_TransientProblem
use Mod_plcd_RotatingFrame
contains
subroutine SetPointers
call ResetProcedureComposition
call SetPointersAndHooksToNULLSUB
!Initialize setpointers
call SetPointersHangingNodes%Initialize
call SetPointersUPFormulation%Initialize
call SetPointersLargeStrains%Initialize
call SetPointersTransientProblem%Initialize
call SetPointersRotatingFrame%Initialize
!SetPointers
call SetPointersHangingNodes%Set
call SetPointersUPFormulation%Set
call SetPointersLargeStrains%Set
call SetPointersTransientProblem%Set
call SetPointersRotatingFrame%Set
!Finalize SetPointers
call SetPointersHangingNodes%Finalize
call SetPointersUPFormulation%Finalize
call SetPointersLargeStrains%Finalize
call SetPointersTransientProblem%Finalize
call SetPointersRotatingFrame%Finalize
end subroutine
end module
subroutine plcd_Elmope(b)
use Mod_plcd_BaseElmope
use Mod_PLCD
use Mod_plcd_BMatrixFactory
use Mod_plcd_Stages
use Mod_plcd_elmdir
use Mod_php_AssemblyVectorToSystem
use Mod_plcd_Elmope
use Mod_Debugging
implicit none
class(PLCDProblem), target :: b
integer(ip) :: idofn, inode,jnode,ipoin,idime,jdime, ielem2
!deb_PostprocessMatrix = 1
a => b
!SetPointers
call SetPointers
!This cannot be here
!NodalForces => a%cs%NodalForces
call a%Mesh%ElementAlloc(e,a%Memor,'DefaultRule','plcd_Elmope')
call a%Memor%alloc(a%ndofn,e%mnode,a%ndofn,e%mnode,elmat,'elmat','plcd_elmope')
call a%Memor%alloc(a%ndofn,e%mnode,elrhs,'elrhs','plcd_elmope')
call a%Memor%alloc(a%ndofn,e%mnode,a%ndofn,e%mnode,GaussElmat,'GaussElmat','plcd_elmope')
call a%Memor%alloc(e%ndime,e%mnode,3,eldisp,'eldisp','plcd_elmope')
call a%Memor%alloc(e%ndime,e%mnode,elNodalForces,'elNodalForces','plcd_elmope')
!Hook
call ProcHook%Initializations
call CreateBMatrix(a,a%Memor,BMat)
call BMat%Alloc(e,a%Memor)
call a%Mesh%GetNelem(nelem)
do ielem = 1,nelem
ielem2 = ielem
!Load Element
call a%Mesh%ElementLoad(ielem,e)
!Hook
call ProcHook%PreGauss
elmat = 0.0_rp
elrhs = 0.0_rp
!Compute linear derivatives
call e%elmdel
ElementMatData => a%ElementMaterialsData(ielem)%p
!Gathers
!Displacements at the previous iteration
call e%gather(e%ndime,eldisp(:,:,1),a%Displacement(:,:,1))
!Gausspoint loop
GaussPointLoop : do igaus = 1,e%pgaus
e%igaus = igaus
call e%elmder
dvol = e%weigp(e%igaus)*e%detjm
GaussElmat(:,1:e%pnode,:,1:e%pnode) = 0.0_rp
call ElementMatData%GetConstitutiveTensorPointer(e%igaus,C)
!Hook
call ProcHook%InGauss
call BMat%Setup(e)
call BMat%Bt_Times_Matrix_Times_B(C,GaussElmat(1:e%ndime,1:e%pnode,1:e%ndime,1:e%pnode))
!call BMat%Bt_Times_Matrix_Times_B(C,GaussElmat)
GaussElmat = GaussElmat*dvol
!Hook
call ProcHook%InGaussElmats
!Elmat
elmat(:,1:e%pnode,:,1:e%pnode) = elmat(:,1:e%pnode,:,1:e%pnode) + GaussElmat(:,1:e%pnode,:,1:e%pnode)
enddo GaussPointLoop
!Hook
call ProcHook%PostGaussElmats
!Hook
call ProcHook%PreDirichlet
!Dirichlet Boundary Conditions
call plcd_elmdir(a,e,elmat,elrhs)
!Assembly
call a%LinearSystem%Assembly(e,elmat,elrhs)
enddo
!Hook
call ProcHook%Finalizations
call BMat%DeAlloc(e,a%Memor)
call DestroyBMatrix(a,a%Memor,BMat)
call a%Memor%dealloc(e%ndime,e%mnode,3,eldisp,'eldisp','plcd_elmope')
call a%Memor%dealloc(e%ndime,e%mnode,elNodalForces,'elNodalForces','plcd_elmope')
call a%Memor%dealloc(a%ndofn,e%mnode,a%ndofn,e%mnode,elmat,'elmat','nsm_elmope')
call a%Memor%dealloc(a%ndofn,e%mnode,elrhs,'elrhs','nsm_elmope')
call a%Memor%dealloc(a%ndofn,e%mnode,a%ndofn,e%mnode,GaussElmat,'GaussElmat','plcd_elmope')
call a%Mesh%ElementDeAlloc(e,a%Memor,'DefaultRule','plcd_Elmope')
end subroutine plcd_Elmope
|
{"hexsha": "c6483e684f1c1291985de70d9f9864fe856ef5c2", "size": 4261, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Sources/modules/plcd/Elmopes/plcd_Elmope.f90", "max_stars_repo_name": "ciaid-colombia/InsFEM", "max_stars_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-24T08:19:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-24T08:19:54.000Z", "max_issues_repo_path": "Sources/modules/plcd/Elmopes/plcd_Elmope.f90", "max_issues_repo_name": "ciaid-colombia/InsFEM", "max_issues_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Sources/modules/plcd/Elmopes/plcd_Elmope.f90", "max_forks_repo_name": "ciaid-colombia/InsFEM", "max_forks_repo_head_hexsha": "be7eb35baa75c31e3b175e95286549ccd84f8d40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1401273885, "max_line_length": 110, "alphanum_fraction": 0.7052335133, "num_tokens": 1407}
|
import os
import sys
import urllib.request
import zipfile
import tensorflow as tf
from download_datasets import ensure_dataset_exists
import numpy as np
# Loads a morphological dataset in a vertical format.
# - The data consists of three Datasets
# - train
# - dev
# - test
# - Each dataset is composed of factors (FORMS, LEMMAS, TAGS), each an
# object containing the following fields:
# - word_strings: Strings of the original words.
# - word_ids: Word ids of the original words (uses <unk> and <pad>).
# - words_map: String -> word_id map.
# - words: Word_id -> string list.
# - alphabet_map: Character -> char_id map.
# - alphabet: Char_id -> character list.
# - charseqs: Sequences of characters of the original words.
class MorphoDataset:
class Factor:
PAD = 0
UNK = 1
BOW = 2
EOW = 3
def __init__(self, characters, train=None):
self.words_map = train.words_map if train else {"<pad>": self.PAD, "<unk>": self.UNK}
self.words = train.words if train else ["<pad>", "<unk>"]
self.word_ids = []
self.word_strings = []
self.characters = characters
if characters:
self.alphabet_map = train.alphabet_map if train else {
"<pad>": self.PAD, "<unk>": self.UNK, "<bow>": self.BOW, "<eow>": self.EOW}
self.alphabet = train.alphabet if train else ["<pad>", "<unk>", "<bow>", "<eow>"]
self.charseqs = []
class FactorBatch:
def __init__(self, word_ids, charseqs=None):
self.word_ids = word_ids
self.charseqs = charseqs
class Dataset:
FORMS = 0
LEMMAS = 1
TAGS = 2
FACTORS = 3
def __init__(self, data_file, train=None, shuffle_batches=True, add_bow_eow=False, max_sentences=None, seed=42):
# Create factors
self._data = []
for f in range(self.FACTORS):
self._data.append(MorphoDataset.Factor(f in [self.FORMS, self.LEMMAS], train._data[f] if train else None))
in_sentence = False
for line in data_file:
line = line.decode("utf-8").rstrip("\r\n")
if line:
columns = line.split("\t")
for f in range(self.FACTORS):
factor = self._data[f]
if not in_sentence:
if len(factor.word_ids):
factor.word_ids[-1] = np.array(factor.word_ids[-1], np.int32)
factor.word_ids.append([])
factor.word_strings.append([])
if factor.characters:
factor.charseqs.append([])
word = columns[f]
factor.word_strings[-1].append(word)
# Character-level information
if factor.characters:
factor.charseqs[-1].append([])
if add_bow_eow:
factor.charseqs[-1][-1].append(MorphoDataset.Factor.BOW)
for c in word:
if c not in factor.alphabet_map:
if train:
c = "<unk>"
else:
factor.alphabet_map[c] = len(factor.alphabet)
factor.alphabet.append(c)
factor.charseqs[-1][-1].append(factor.alphabet_map[c])
if add_bow_eow:
factor.charseqs[-1][-1].append(MorphoDataset.Factor.EOW)
# Word-level information
if word not in factor.words_map:
if train:
word = "<unk>"
else:
factor.words_map[word] = len(factor.words)
factor.words.append(word)
factor.word_ids[-1].append(factor.words_map[word])
in_sentence = True
else:
in_sentence = False
if max_sentences is not None and len(self._data[self.FORMS].word_ids) >= max_sentences:
break
self._size = len(self._data[self.FORMS].word_ids)
self._shuffler = np.random.RandomState(seed) if shuffle_batches else None
@property
def data(self):
return self._data
def size(self):
return self._size
def batches(self, size=None):
permutation = self._shuffler.permutation(self._size) if self._shuffler else np.arange(self._size)
while len(permutation):
batch_size = min(size or np.inf, len(permutation))
batch_perm = permutation[:batch_size]
permutation = permutation[batch_size:]
batch = []
max_sentence_len = max(len(self._data[self.FORMS].word_ids[i]) for i in batch_perm)
# Word-level data
for factor in self._data:
batch.append(MorphoDataset.FactorBatch(np.zeros([batch_size, max_sentence_len], np.int32)))
for i in range(batch_size):
batch[-1].word_ids[i, :len(factor.word_ids[batch_perm[i]])] = factor.word_ids[batch_perm[i]]
# Character-level data
for f, factor in enumerate(self._data):
if not factor.characters:
continue
max_charseq_len = max(len(charseq) for i in batch_perm for charseq in factor.charseqs[i])
batch[f].charseqs = np.zeros([batch_size, max_sentence_len, max_charseq_len], np.int32)
for i in range(batch_size):
for j, charseq in enumerate(factor.charseqs[batch_perm[i]]):
batch[f].charseqs[i, j, :len(charseq)] = charseq
yield batch
def __init__(self, add_bow_eow=False, max_sentences=None):
data_folder = os.environ['DATASETS_PATH'] if 'DATASETS_PATH' in os.environ else os.path.expanduser('~/datasets')
ensure_dataset_exists(data_folder)
dataset_path = os.path.join(data_folder, 'ud-treebanks-v2.2/UD_Czech-PDT')
for dataset in ["train", "dev", "test"]:
with open(os.path.join(dataset_path, f'cs_pdt-ud-{dataset}.lemmatag'), 'rb') as dataset_file:
setattr(self, dataset, self.Dataset(dataset_file,
train=self.train if dataset != "train" else None,
shuffle_batches=dataset == "train",
add_bow_eow=add_bow_eow,
max_sentences=max_sentences))
|
{"hexsha": "ef16c2d94b46dac1b81fe7db5f02d4c0e82eb5b4", "size": 7133, "ext": "py", "lang": "Python", "max_stars_repo_path": "morpho_dataset.py", "max_stars_repo_name": "jkulhanek/lemmatag-tf2", "max_stars_repo_head_hexsha": "816c376d8e6f894e34af67bc9076aed68f540bf8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "morpho_dataset.py", "max_issues_repo_name": "jkulhanek/lemmatag-tf2", "max_issues_repo_head_hexsha": "816c376d8e6f894e34af67bc9076aed68f540bf8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-08-25T16:12:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T02:46:17.000Z", "max_forks_repo_path": "morpho_dataset.py", "max_forks_repo_name": "jkulhanek/lemmatag", "max_forks_repo_head_hexsha": "816c376d8e6f894e34af67bc9076aed68f540bf8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.0308641975, "max_line_length": 122, "alphanum_fraction": 0.5087620917, "include": true, "reason": "import numpy", "num_tokens": 1464}
|
import pytest
from smt_solver.sat_solver.tests.test_sat_solver import TestSATSolver
from smt_solver.uf_solver.tests.test_uf_solver import TestUFSolver
from smt_solver.tq_solver.tests.test_tq_solver import TestTQSolver
from smt_solver.smt_solver import SMTSolver
from random import randint
import numpy as np
class TestSMTSolver:
@staticmethod
@pytest.mark.parametrize("variable_num, equation_num, function_num, coefficient_limits, operator_num",
np.full((10, 5), (5, 10, 2, (-5, 5), 10))
)
def test_random_formula(variable_num: int, equation_num: int, function_num: int, coefficient_limits: (int, int),
operator_num: int):
formula_z3, formula_our, formula_type = None, None, randint(1, 3)
if formula_type == 1:
formula_z3, formula_our = TestUFSolver.generate_random_boolean_formula(variable_num, operator_num)
elif formula_type == 2:
formula_z3, formula_our = TestUFSolver.generate_random_uf_formula(variable_num, equation_num, function_num,
operator_num)
elif formula_type == 3:
formula_z3, formula_our = TestTQSolver.generate_random_tq_formula(variable_num, equation_num,
coefficient_limits, operator_num)
if (formula_z3 is None) or (formula_our is None):
return
print("\n\n", "Z3 formula: ", formula_z3, "\n", "Our formula: ", formula_our)
assert TestSATSolver.compare_to_z3(formula_z3, SMTSolver(formula_our))
|
{"hexsha": "5e253fec5a58255e360583091f82fb52835a68f9", "size": 1674, "ext": "py", "lang": "Python", "max_stars_repo_path": "smt_solver/tests/test_smt_solver.py", "max_stars_repo_name": "AvivYaish/SMTsolver", "max_stars_repo_head_hexsha": "773041311ed8195ab48f669310df26ead3061912", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-07T14:35:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-07T14:35:31.000Z", "max_issues_repo_path": "smt_solver/tests/test_smt_solver.py", "max_issues_repo_name": "AvivYaish/SMTsolver", "max_issues_repo_head_hexsha": "773041311ed8195ab48f669310df26ead3061912", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "smt_solver/tests/test_smt_solver.py", "max_forks_repo_name": "AvivYaish/SMTsolver", "max_forks_repo_head_hexsha": "773041311ed8195ab48f669310df26ead3061912", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-12T04:48:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-12T04:48:57.000Z", "avg_line_length": 55.8, "max_line_length": 119, "alphanum_fraction": 0.6403823178, "include": true, "reason": "import numpy", "num_tokens": 373}
|
module Torch
# package code goes here
end # module
|
{"hexsha": "5ba90093ec44c8ca4a0a72c178a030254396446d", "size": 54, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Torch.jl", "max_stars_repo_name": "Faldict/Torch.jl", "max_stars_repo_head_hexsha": "5f7b90647ef1dd1a9b5a8c87df8e1d50853bc1e2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-05-27T04:08:46.000Z", "max_stars_repo_stars_event_max_datetime": "2017-05-27T04:08:46.000Z", "max_issues_repo_path": "src/Torch.jl", "max_issues_repo_name": "Faldict/Torch.jl", "max_issues_repo_head_hexsha": "5f7b90647ef1dd1a9b5a8c87df8e1d50853bc1e2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Torch.jl", "max_forks_repo_name": "Faldict/Torch.jl", "max_forks_repo_head_hexsha": "5f7b90647ef1dd1a9b5a8c87df8e1d50853bc1e2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 7.7142857143, "max_line_length": 24, "alphanum_fraction": 0.7222222222, "num_tokens": 13}
|
from torchvision.datasets import MNIST, CIFAR10
import albumentations as A
from albumentations.pytorch import ToTensorV2
import torch
import cv2
from torch.utils.data import DataLoader
import torchvision
import numpy as np
DATA_MEAN = (0.4914, 0.4822, 0.4465)
DATA_STD = (0.247, 0.2435, 0.2616)
class Transforms:
def __init__(self, transforms: A.Compose):
self.transforms = transforms
def __call__(self, img, *args, **kwargs):
return self.transforms(image=np.array(img))
class Loader:
def __init__(self, batch_size):
self.text = 'This class loads the data for the model'
self.batch_size=batch_size
def transform(self):
trainTransform = A.Compose([
A.HorizontalFlip(p=0.3),
A.ShiftScaleRotate(shift_limit=0.1, scale_limit=0.1, rotate_limit=30, interpolation=cv2.INTER_LINEAR, border_mode=cv2.BORDER_REFLECT_101, always_apply=False, p=0.5),
A.CoarseDropout(max_holes = 1, max_height=16, max_width=16, min_holes = 1, min_height=16, min_width=16, fill_value=DATA_MEAN, mask_fill_value = None, p=0.3),
A.Normalize(DATA_MEAN, DATA_STD),
ToTensorV2(),
])
simpleTransform = A.Compose([
A.Normalize(DATA_MEAN, DATA_STD),
ToTensorV2(),
])
return Transforms(trainTransform), Transforms(simpleTransform)
def Loader(self,trainTransform, simpleTransform, cuda: bool=True):
seed = 42
if cuda:
torch.cuda.manual_seed(seed)
kwargs = {'batch_size': self.batch_size, 'pin_memory': True, 'num_workers': 4}
else:
torch.manual_seed(seed)
kwargs = {'batch_size': self.batch_size}
train = CIFAR10(root='./data', train=True,
download=True, transform=trainTransform)
test = CIFAR10(root='./data', download=True, transform=simpleTransform)
train_loader = DataLoader(train, shuffle=True, **kwargs)
test_loader = DataLoader(test, shuffle=True, **kwargs)
return train_loader, test_loader
|
{"hexsha": "fa4a2f7717df15d55888a44b2f0d1272ad0eab90", "size": 2083, "ext": "py", "lang": "Python", "max_stars_repo_path": "assignment_7/src/dataLoader.py", "max_stars_repo_name": "amitbcp/tsai-vision", "max_stars_repo_head_hexsha": "14a66d4c3295714fdcc97db13804ffba9d6f06cc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "assignment_7/src/dataLoader.py", "max_issues_repo_name": "amitbcp/tsai-vision", "max_issues_repo_head_hexsha": "14a66d4c3295714fdcc97db13804ffba9d6f06cc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "assignment_7/src/dataLoader.py", "max_forks_repo_name": "amitbcp/tsai-vision", "max_forks_repo_head_hexsha": "14a66d4c3295714fdcc97db13804ffba9d6f06cc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-25T10:24:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-13T09:23:30.000Z", "avg_line_length": 34.7166666667, "max_line_length": 177, "alphanum_fraction": 0.6586653865, "include": true, "reason": "import numpy", "num_tokens": 513}
|
function constructnetwork!(m::JuMP.AbstractModel, branch_models::Array{NamedTuple{(:device, :formulation), Tuple{DataType,DataType}}}, netinjection::BalanceNamedTuple, system_formulation::Type{S}, sys::PowerSystems.PowerSystem; args...) where {S <: CopperPlatePowerModel}
copperplatebalance(m, netinjection, sys.time_periods)
end
function constructnetwork!(m::JuMP.AbstractModel, branch_models::Array{NamedTuple{(:device, :formulation), Tuple{DataType,DataType}}}, netinjection::BalanceNamedTuple, system_formulation::Type{S}, sys::PowerSystems.PowerSystem; args...) where {S <: AbstractFlowForm}
for category in branch_models
constructdevice!(m, netinjection, category.device, category.formulation, system_formulation, sys; args...)
end
nodalflowbalance(m, netinjection, system_formulation, sys)
end
function constructnetwork!(m::JuMP.AbstractModel, branch_models::Array{NamedTuple{(:device, :formulation), Tuple{DataType,DataType}}}, netinjection::BalanceNamedTuple, system_formulation::Type{StandardPTDF}, sys::PowerSystems.PowerSystem; args...)
if :PTDF in keys(args)
PTDF = args[:PTDF]
else
PTDF = nothing
end
if !isa(PTDF,PTDFArray)
@warn "no PTDF supplied"
PTDF, A = PowerSystems.buildptdf(sys.branches, sys.buses)
end
for category in branch_models
constructdevice!(m, netinjection, category.device, category.formulation, system_formulation, sys; args..., PTDF=PTDF)
end
nodalflowbalance(m, netinjection, system_formulation, sys)
end
function constructnetwork!(m::JuMP.AbstractModel, branch_models::Array{NamedTuple{(:device, :formulation), Tuple{DataType,DataType}}}, netinjection::BalanceNamedTuple, system_formulation::Type{S}, sys::PowerSystems.PowerSystem; args...) where {S <: AbstractDCPowerModel}
#= TODO: Needs to be generalized later for other branch models not covered by PM.
for category in branch_models
constructdevice!(m, netinjection, category.device, category.formulation, system_formulation, sys; args...)
end
=#
nodalflowbalance(m, netinjection, system_formulation, sys)
PM_F = (data::Dict{String,Any}; kwargs...) -> PM.GenericPowerModel(data, system_formulation; kwargs...)
PM_object = PS.build_nip_expr_model(m.ext[:PM_object], PM_F, jump_model=m);
m = PM_object.model
end
function constructnetwork!(m::JuMP.AbstractModel, branch_models::Array{NamedTuple{(:device, :formulation), Tuple{DataType,DataType}}}, netinjection::BalanceNamedTuple, system_formulation::Type{S}, sys::PowerSystems.PowerSystem; args...) where {S <: AbstractACPowerModel}
#= TODO: Needs to be generalized later for other branch models not covered by PM.
for category in branch_models
constructdevice!(m, netinjection, category.device, category.formulation, system_formulation, sys; args...)
end
=#
nodalflowbalance(m, netinjection, system_formulation, sys)
PM_F = (data::Dict{String,Any}; kwargs...) -> PM.GenericPowerModel(data, system_formulation; kwargs...)
PM_object = PS.build_nip_expr_model(m.ext[:PM_object], PM_F, jump_model=m);
m.ext[:PM_object] = PM_object
end
|
{"hexsha": "86b0d26dc73cff5046f888459c7bcbff9255486f", "size": 3173, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/component_constructors/network_constructor.jl", "max_stars_repo_name": "gitter-badger/PowerSimulations.jl", "max_stars_repo_head_hexsha": "608671297c4b813505aef4073932eae3d8875af6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/component_constructors/network_constructor.jl", "max_issues_repo_name": "gitter-badger/PowerSimulations.jl", "max_issues_repo_head_hexsha": "608671297c4b813505aef4073932eae3d8875af6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/component_constructors/network_constructor.jl", "max_forks_repo_name": "gitter-badger/PowerSimulations.jl", "max_forks_repo_head_hexsha": "608671297c4b813505aef4073932eae3d8875af6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.0694444444, "max_line_length": 271, "alphanum_fraction": 0.7406240151, "num_tokens": 803}
|
[STATEMENT]
lemma primfun_dominates:
"f < g \<Longrightarrow> dominates at_top (eval_primfun' f) (eval_primfun' g)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. f < g \<Longrightarrow> dominates at_top (eval_primfun' f) (eval_primfun' g)
[PROOF STEP]
by (elim less_primfun.elims; hypsubst) (simp_all add: ln_chain_dominates)
|
{"llama_tokens": 133, "file": "Landau_Symbols_Landau_Real_Products", "length": 1}
|
import os
import sys
import schemasim.schemas.l0_schema_templates as st
import schemasim.schemas.l1_geometric_primitives as gp
import schemasim.schemas.l2_geometric_primitive_relations as gpr
import schemasim.schemas.l3_primitive_movement as pm
import schemasim.schemas.l3_location as location
import numpy as np
class Path(st.RoleDefiningSchema):
def __init__(self, source=None, destination=None):
super().__init__()
self._type = "Path"
self._meta_type.append("Path")
self._roles = {"source": source, "destination": destination}
def _interpretPoint(self, role, frame, sim):
if (role not in self._roles) or (not isinstance(self._roles[role], st.Schema)):
return None, None
s = self._roles[role]
if "ParameterizedSchema" in s._meta_type:
name = s.getId()
return sim.translationVector(frame[name]), name
elif "GeometricPrimitive" in s._meta_type:
return s.getPoint(sim, frameData=frame), None
# TODO: add branch for generating points from Location schemas
return None, None
def _pathAtFrame(self, frameNum, frame, sim, parameterizedSchemas, disabledObjects):
space = sim.space()
source, sourceName = self._interpretPoint("source", frame, sim)
destination, destinationName = self._interpretPoint("destination", frame, sim)
ignoreList = [sourceName, destinationName] + disabledObjects
collisionManager = space.makeCollisionManager()
maximumRadius = 0.0
for name, data in frame.items():
findSep = name.rfind(":")
nameK = name
if -1 != findSep:
nameK = name[findSep]
if (nameK in parameterizedSchemas) and (name not in ignoreList):
mesh = space.loadVolume(parameterizedSchemas[nameK].getMeshPath(modifier=space.volumePathModifier()))
t = sim.translationVector(data)
pose = space.poseFromTR(t, sim.rotationRepresentation(data))
collisionManager.add_object(name, mesh, np.array(pose,dtype=np.double))
newRadius = 0.5*space.boundaryBoxDiameter(space.volumeBounds(mesh)) + space.vectorNorm(t)
if maximumRadius < newRadius:
maximumRadius = newRadius
if not source:
source = space.vectorAboveOrigin(maximumRadius)
if not destination:
destination = space.vectorAboveOrigin(maximumRadius)
path = space.planPath(source, destination, collisionManager, maximumRadius*1.01)
return path
def _pathCost(self, path, space):
return 0.0
def evaluateTimeline(self, frameData, simulator, parameterizedSchemas={}, disabledObjects=[]):
space = simulator.space()
cost = 0.0
frameCount = len(frameData)
for frameNum, frame in enumerate(frameData):
crCost = self._pathCost(self._pathAtFrame(frameNum, frame, simulator, parameterizedSchemas, disabledObjects), space)
if not crCost:
cost = frameCount*1.0
break
else:
cost = cost + crCost
judgement = True
if 0.1*frameCount < cost:
judgement = False
return judgement, cost
class PathAbsence(Path):
def __init__(self, source=None, destination=None):
super().__init__(source=source, destination=destination)
self._type = "PathAbsence"
self._meta_type.append("PathAbsence")
def _pathCost(self, path, space):
if path:
return None
return 0.0
class PathExistence(Path):
def __init__(self, source=None, destination=None):
super().__init__(source=source, destination=destination)
self._type = "PathExistence"
self._meta_type.append("PathExistence")
def _pathCost(self, path, space):
if not path:
return None
return 0.0
|
{"hexsha": "89bdcc4eb4860d59d8455ae6bc4756a614ebba27", "size": 3953, "ext": "py", "lang": "Python", "max_stars_repo_path": "schemasim/schemas/l4_path.py", "max_stars_repo_name": "mpomarlan/schemasim", "max_stars_repo_head_hexsha": "daf4a8273f743b4f5ac24549aeb1e60ea7402d2c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "schemasim/schemas/l4_path.py", "max_issues_repo_name": "mpomarlan/schemasim", "max_issues_repo_head_hexsha": "daf4a8273f743b4f5ac24549aeb1e60ea7402d2c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2020-01-07T10:38:01.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-09T17:00:13.000Z", "max_forks_repo_path": "schemasim/schemas/l4_path.py", "max_forks_repo_name": "mpomarlan/schemasim", "max_forks_repo_head_hexsha": "daf4a8273f743b4f5ac24549aeb1e60ea7402d2c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-10T16:44:24.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-10T16:44:24.000Z", "avg_line_length": 42.5053763441, "max_line_length": 128, "alphanum_fraction": 0.6468504933, "include": true, "reason": "import numpy", "num_tokens": 894}
|
"""Check if Stieltjes method, both analytical and discretized works as expected."""
import numpy
import numpoly
import chaospy
def test_analytical_stieltjes(analytical_distribution):
"""Assert that Analytical Stieltjes produces orthogonality."""
coeffs, [orth], norms = chaospy.analytical_stieltjes(
order=4, dist=analytical_distribution)
assert orth[0] == 1
assert numpy.allclose(chaospy.E(orth[1:], analytical_distribution), 0)
covariance = chaospy.E(
numpoly.outer(orth[1:], orth[1:]), analytical_distribution)
assert numpy.allclose(numpy.diag(numpy.diag(covariance)), covariance)
assert numpy.allclose(numpoly.lead_coefficient(orth), 1)
def test_stieltjes_compared(analytical_distribution):
"""Assert that discretized and analytical approach are equivalent."""
(alpha0, beta0), [orth0], norms0 = chaospy.analytical_stieltjes(
order=3, dist=analytical_distribution)
(alpha1, beta1), [orth1], norms1 = chaospy.discretized_stieltjes(
order=3, dist=analytical_distribution)
assert numpy.allclose(alpha0, alpha1)
assert numpy.allclose(beta0, beta1)
assert numpy.allclose(orth0.coefficients, orth1.coefficients)
assert numpy.allclose(norms0, norms1)
|
{"hexsha": "fe9a66f94a5a10c082f4cb71d0eb40114534088c", "size": 1240, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/recurrence/test_stieltjes_method.py", "max_stars_repo_name": "utsekaj42/chaospy", "max_stars_repo_head_hexsha": "0fb23cbb58eb987c3ca912e2a20b83ebab0514d0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 333, "max_stars_repo_stars_event_min_datetime": "2016-10-25T12:00:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:50:33.000Z", "max_issues_repo_path": "tests/recurrence/test_stieltjes_method.py", "max_issues_repo_name": "utsekaj42/chaospy", "max_issues_repo_head_hexsha": "0fb23cbb58eb987c3ca912e2a20b83ebab0514d0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 327, "max_issues_repo_issues_event_min_datetime": "2016-09-25T16:29:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T03:26:27.000Z", "max_forks_repo_path": "tests/recurrence/test_stieltjes_method.py", "max_forks_repo_name": "utsekaj42/chaospy", "max_forks_repo_head_hexsha": "0fb23cbb58eb987c3ca912e2a20b83ebab0514d0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 74, "max_forks_repo_forks_event_min_datetime": "2016-10-17T11:14:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-09T10:55:59.000Z", "avg_line_length": 42.7586206897, "max_line_length": 83, "alphanum_fraction": 0.7467741935, "include": true, "reason": "import numpy", "num_tokens": 325}
|
import numpy as np
from numpy import genfromtxt
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from starchive import identifiers
def linear(x, m, b):
model = m*x + b
return model
root_dir = '../data/'
a = np.genfromtxt(root_dir+'final_abundances_w_ncapture.csv', delimiter=',', dtype=None, names=True, encoding=None)
par = np.genfromtxt(root_dir+"final_parameters.csv", delimiter=',', dtype=None, names=True, encoding=None)
nissen_oxygen = np.genfromtxt("nissen_oxygen.txt", dtype=None, names=True, encoding=None)
nissen = np.genfromtxt("nissen.txt", delimiter='\t', dtype=None, names=True, encoding=None)
nissen_mask = []
mask = []
conv = identifiers.Converter()
for i,hdname in enumerate(nissen['Star']):
hdnumber = float(hdname[3:])
hipnumber = conv.hdtohip(hdnumber)
hipname = 'HIP' + str(hipnumber)
if hipname in a['id']:
print('{0} == {1}'.format(hdname, hipname))
nissen_mask = np.append(nissen_mask, i)
mask = np.append(mask, np.where(a['id'] == hipname)[0][0])
nissen_mask = [int(i) for i in nissen_mask] # idk why this is necessary but it is
mask = [int(i) for i in mask]
c2 = '#003399' # blue
c3 = '#CC0033' # red
c4 = '#339900' # green
plt.rcParams["font.sans-serif"] = "Helvetica"
fig = plt.figure()
xs = np.arange(-0.25,0.3,0.1)
elements = ['C', 'O', 'Si', 'Ni']
nissen_errors = [0.013, 0.0, 0.007, 0.006]
for i,el in enumerate(elements):
ax = fig.add_subplot(2,2,i+1)
abund = a["{0}I_1".format(el)][mask]
err = a["err_{0}I".format(el)][mask]
if el == 'C':
abunds = [a['CI_1'][mask], a['CH_1'][mask]]
errs = [a['err_CI'][mask], a['err_CH'][mask]]
(abund, err) = np.average(abunds, weights=errs, returned=True, axis=0)
abund_nissen = nissen['{0}Fe'.format(el)][nissen_mask] + nissen['FeH'][nissen_mask]
if el == 'O':
err_nissen = nissen_oxygen['sigma'][nissen_mask]
else:
err_nissen = np.zeros_like(abund_nissen) + nissen_errors[i]
ax.errorbar(abund_nissen, abund, xerr=err_nissen, yerr=err, fmt='o', c='black', ecolor='black', mec='black', ms=7)
ax.plot(xs, xs, color=c2, lw=2, ls='--')
diff = abund - abund_nissen
ax.text(-0.19, 0.12, '{0}: $\mu$ = {1:.3f} dex\n $\sigma$ = {2:.3f} dex'.format(el, np.mean(diff), np.std(diff)), fontsize=18)
ax.set_ylim([-0.22,0.22])
ax.set_xlim([-0.22,0.22])
ax.set_yticks(np.arange(-0.2,0.22,0.1))
ax.set_yticks(np.arange(-0.2,0.22,0.05), minor=True)
ax.set_xticks(np.arange(-0.2,0.22,0.1))
ax.set_xticks(np.arange(-0.2,0.22,0.05), minor=True)
ax.tick_params(axis='both', which='major', labelsize=16)
if (i % 2) != 0:
ax.set_yticklabels('',visible=False)
if el not in elements[-2:]:
ax.set_xticklabels('',visible=False)
fig.subplots_adjust(hspace=.05, wspace=.05)
fig.text(0.5, 0.015, '[X/H] (Nissen)', size=20, ha='center')
fig.text(0.015, 0.5, '[X/H] (this work)', rotation=90, size=20, va='center')
fig.savefig('nissen.pdf')
|
{"hexsha": "b4442afa4a2cd41dadd42b38457779561ab8a9cf", "size": 3056, "ext": "py", "lang": "Python", "max_stars_repo_path": "figures/mkplot_nissen.py", "max_stars_repo_name": "megbedell/solartwin-abundances", "max_stars_repo_head_hexsha": "200f3da3863edb39ee6a7a40271c294b8f36b16e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "figures/mkplot_nissen.py", "max_issues_repo_name": "megbedell/solartwin-abundances", "max_issues_repo_head_hexsha": "200f3da3863edb39ee6a7a40271c294b8f36b16e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "figures/mkplot_nissen.py", "max_forks_repo_name": "megbedell/solartwin-abundances", "max_forks_repo_head_hexsha": "200f3da3863edb39ee6a7a40271c294b8f36b16e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5824175824, "max_line_length": 131, "alphanum_fraction": 0.627617801, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1011}
|
/-
Copyright (c) 2022 Michael Stoll. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Michael Stoll
! This file was ported from Lean 3 source module number_theory.legendre_symbol.gauss_sum
! leanprover-community/mathlib commit d11893b411025250c8e61ff2f12ccbd7ee35ab15
! Please do not edit these lines, except to modify the commit id
! if you have ported upstream changes.
-/
import Mathbin.NumberTheory.LegendreSymbol.AddCharacter
import Mathbin.NumberTheory.LegendreSymbol.ZmodChar
import Mathbin.Algebra.CharP.CharAndCard
/-!
# Gauss sums
We define the Gauss sum associated to a multiplicative and an additive
character of a finite field and prove some results about them.
## Main definition
Let `R` be a finite commutative ring and let `R'` be another commutative ring.
If `χ` is a multiplicative character `R → R'` (type `mul_char R R'`) and `ψ`
is an additive character `R → R'` (type `add_char R R'`, which abbreviates
`(multiplicative R) →* R'`), then the *Gauss sum* of `χ` and `ψ` is `∑ a, χ a * ψ a`.
## Main results
Some important results are as follows.
* `gauss_sum_mul_gauss_sum_eq_card`: The product of the Gauss
sums of `χ` and `ψ` and that of `χ⁻¹` and `ψ⁻¹` is the cardinality
of the source ring `R` (if `χ` is nontrivial, `ψ` is primitive and `R` is a field).
* `gauss_sum_sq`: The square of the Gauss sum is `χ(-1)` times
the cardinality of `R` if in addition `χ` is a quadratic character.
* `quad_gauss_sum_frob`: For a quadratic character `χ`, raising
the Gauss sum to the `p`th power (where `p` is the characteristic of
the target ring `R'`) multiplies it by `χ p`.
* `char.card_pow_card`: When `F` and `F'` are finite fields and `χ : F → F'`
is a nontrivial quadratic character, then `(χ (-1) * #F)^(#F'/2) = χ (#F')`.
* `finite_field.two_pow_card`: For every finite field `F` of odd characteristic,
we have `2^(#F/2) = χ₈(#F)` in `F`.
This machinery can be used to derive (a generalization of) the Law of
Quadratic Reciprocity.
## Tags
additive character, multiplicative character, Gauss sum
-/
universe u v
open BigOperators
open AddChar MulChar
section GaussSumDef
-- `R` is the domain of the characters
variable {R : Type u} [CommRing R] [Fintype R]
-- `R'` is the target of the characters
variable {R' : Type v} [CommRing R']
/-!
### Definition and first properties
-/
/-- Definition of the Gauss sum associated to a multiplicative and an additive character. -/
def gaussSum (χ : MulChar R R') (ψ : AddChar R R') : R' :=
∑ a, χ a * ψ a
#align gauss_sum gaussSum
/-- Replacing `ψ` by `mul_shift ψ a` and multiplying the Gauss sum by `χ a` does not change it. -/
theorem gaussSum_mulShift (χ : MulChar R R') (ψ : AddChar R R') (a : Rˣ) :
χ a * gaussSum χ (mulShift ψ a) = gaussSum χ ψ :=
by
simp only [gaussSum, mul_shift_apply, Finset.mul_sum]
simp_rw [← mul_assoc, ← map_mul]
exact Fintype.sum_bijective _ a.mul_left_bijective _ _ fun x => rfl
#align gauss_sum_mul_shift gaussSum_mulShift
end GaussSumDef
/-!
### The product of two Gauss sums
-/
section GaussSumProd
-- In the following, we need `R` to be a finite field and `R'` to be a domain.
variable {R : Type u} [Field R] [Fintype R] {R' : Type v} [CommRing R'] [IsDomain R']
-- A helper lemma for `gauss_sum_mul_gauss_sum_eq_card` below
-- Is this useful enough in other contexts to be public?
private theorem gauss_sum_mul_aux {χ : MulChar R R'} (hχ : IsNontrivial χ) (ψ : AddChar R R')
(b : R) : (∑ a, χ (a * b⁻¹) * ψ (a - b)) = ∑ c, χ c * ψ (b * (c - 1)) :=
by
cases' eq_or_ne b 0 with hb hb
· -- case `b = 0`
simp only [hb, inv_zero, MulZeroClass.mul_zero, MulChar.map_zero, MulZeroClass.zero_mul,
Finset.sum_const_zero, map_zero_one, mul_one]
exact hχ.sum_eq_zero.symm
· -- case `b ≠ 0`
refine' (Fintype.sum_bijective _ (Equiv.mulLeft_bijective₀ b hb) _ _ fun x => _).symm
rw [mul_assoc, mul_comm x, ← mul_assoc, mul_inv_cancel hb, one_mul, mul_sub, mul_one]
#align gauss_sum_mul_aux gauss_sum_mul_aux
/-- We have `gauss_sum χ ψ * gauss_sum χ⁻¹ ψ⁻¹ = fintype.card R`
when `χ` is nontrivial and `ψ` is primitive (and `R` is a field). -/
theorem gaussSum_mul_gaussSum_eq_card {χ : MulChar R R'} (hχ : IsNontrivial χ) {ψ : AddChar R R'}
(hψ : IsPrimitive ψ) : gaussSum χ ψ * gaussSum χ⁻¹ ψ⁻¹ = Fintype.card R :=
by
simp only [gaussSum, AddChar.inv_apply, Finset.sum_mul, Finset.mul_sum, MulChar.inv_apply']
conv in _ * _ * (_ * _) => rw [mul_mul_mul_comm, ← map_mul, ← map_add_mul, ← sub_eq_add_neg]
simp_rw [gauss_sum_mul_aux hχ ψ]
rw [Finset.sum_comm]
classical
-- to get `[decidable_eq R]` for `sum_mul_shift`
simp_rw [← Finset.mul_sum, sum_mul_shift _ hψ, sub_eq_zero, mul_ite, MulZeroClass.mul_zero]
rw [Finset.sum_ite_eq' Finset.univ (1 : R)]
simp only [Finset.mem_univ, map_one, one_mul, if_true]
#align gauss_sum_mul_gauss_sum_eq_card gaussSum_mul_gaussSum_eq_card
/-- When `χ` is a nontrivial quadratic character, then the square of `gauss_sum χ ψ`
is `χ(-1)` times the cardinality of `R`. -/
theorem gaussSum_sq {χ : MulChar R R'} (hχ₁ : IsNontrivial χ) (hχ₂ : IsQuadratic χ)
{ψ : AddChar R R'} (hψ : IsPrimitive ψ) : gaussSum χ ψ ^ 2 = χ (-1) * Fintype.card R :=
by
rw [pow_two, ← gaussSum_mul_gaussSum_eq_card hχ₁ hψ, hχ₂.inv, mul_rotate']
congr
rw [mul_comm, ← gaussSum_mulShift _ _ (-1 : Rˣ), inv_mul_shift]
rfl
#align gauss_sum_sq gaussSum_sq
end GaussSumProd
/-!
### Gauss sums and Frobenius
-/
section gaussSum_frob
variable {R : Type u} [CommRing R] [Fintype R] {R' : Type v} [CommRing R']
-- We assume that the target ring `R'` has prime characteristic `p`.
variable (p : ℕ) [fp : Fact p.Prime] [hch : CharP R' p]
include fp hch
/-- When `R'` has prime characteristic `p`, then the `p`th power of the Gauss sum
of `χ` and `ψ` is the Gauss sum of `χ^p` and `ψ^p`. -/
theorem gaussSum_frob (χ : MulChar R R') (ψ : AddChar R R') :
gaussSum χ ψ ^ p = gaussSum (χ ^ p) (ψ ^ p) :=
by
rw [← frobenius_def, gaussSum, gaussSum, map_sum]
simp_rw [pow_apply' χ fp.1.Pos, map_mul, frobenius_def]
rfl
#align gauss_sum_frob gaussSum_frob
/-- For a quadratic character `χ` and when the characteristic `p` of the target ring
is a unit in the source ring, the `p`th power of the Gauss sum of`χ` and `ψ` is
`χ p` times the original Gauss sum. -/
theorem MulChar.IsQuadratic.gaussSum_frob (hp : IsUnit (p : R)) {χ : MulChar R R'}
(hχ : IsQuadratic χ) (ψ : AddChar R R') : gaussSum χ ψ ^ p = χ p * gaussSum χ ψ := by
rw [gaussSum_frob, pow_mul_shift, hχ.pow_char p, ← gaussSum_mulShift χ ψ hp.unit, ← mul_assoc,
hp.unit_spec, ← pow_two, ← pow_apply' _ (by norm_num : 0 < 2), hχ.sq_eq_one, ← hp.unit_spec,
one_apply_coe, one_mul]
#align mul_char.is_quadratic.gauss_sum_frob MulChar.IsQuadratic.gaussSum_frob
/-- For a quadratic character `χ` and when the characteristic `p` of the target ring
is a unit in the source ring and `n` is a natural number, the `p^n`th power of the Gauss
sum of`χ` and `ψ` is `χ (p^n)` times the original Gauss sum. -/
theorem MulChar.IsQuadratic.gaussSum_frob_iter (n : ℕ) (hp : IsUnit (p : R)) {χ : MulChar R R'}
(hχ : IsQuadratic χ) (ψ : AddChar R R') : gaussSum χ ψ ^ p ^ n = χ (p ^ n) * gaussSum χ ψ :=
by
induction' n with n ih
· rw [pow_zero, pow_one, pow_zero, MulChar.map_one, one_mul]
·
rw [pow_succ, mul_comm p, pow_mul, ih, mul_pow, hχ.gauss_sum_frob _ hp, ← mul_assoc, pow_succ,
mul_comm (p : R), map_mul, ← pow_apply' χ fp.1.Pos (p ^ n), hχ.pow_char p]
#align mul_char.is_quadratic.gauss_sum_frob_iter MulChar.IsQuadratic.gaussSum_frob_iter
end gaussSum_frob
/-!
### Values of quadratic characters
-/
section GaussSumValues
variable {R : Type u} [CommRing R] [Fintype R] {R' : Type v} [CommRing R'] [IsDomain R']
/-- If the square of the Gauss sum of a quadratic character is `χ(-1) * #R`,
then we get, for all `n : ℕ`, the relation `(χ(-1) * #R) ^ (p^n/2) = χ(p^n)`,
where `p` is the (odd) characteristic of the target ring `R'`.
This version can be used when `R` is not a field, e.g., `ℤ/8ℤ`. -/
theorem Char.card_pow_char_pow {χ : MulChar R R'} (hχ : IsQuadratic χ) (ψ : AddChar R R') (p n : ℕ)
[fp : Fact p.Prime] [hch : CharP R' p] (hp : IsUnit (p : R)) (hp' : p ≠ 2)
(hg : gaussSum χ ψ ^ 2 = χ (-1) * Fintype.card R) :
(χ (-1) * Fintype.card R) ^ (p ^ n / 2) = χ (p ^ n) :=
by
have : gaussSum χ ψ ≠ 0 := by
intro hf
rw [hf, zero_pow (by norm_num : 0 < 2), eq_comm, mul_eq_zero] at hg
exact
not_isUnit_prime_of_dvd_card p
((CharP.cast_eq_zero_iff R' p _).mp <| hg.resolve_left (is_unit_one.neg.map χ).NeZero) hp
rw [← hg]
apply mul_right_cancel₀ this
rw [← hχ.gauss_sum_frob_iter p n hp ψ, ← pow_mul, mul_comm, ← pow_succ,
Nat.two_mul_div_two_add_one_of_odd (fp.1.eq_two_or_odd'.resolve_left hp').pow]
#align char.card_pow_char_pow Char.card_pow_char_pow
/-- When `F` and `F'` are finite fields and `χ : F → F'` is a nontrivial quadratic character,
then `(χ(-1) * #F)^(#F'/2) = χ(#F')`. -/
theorem Char.card_pow_card {F : Type _} [Field F] [Fintype F] {F' : Type _} [Field F'] [Fintype F']
{χ : MulChar F F'} (hχ₁ : IsNontrivial χ) (hχ₂ : IsQuadratic χ)
(hch₁ : ringChar F' ≠ ringChar F) (hch₂ : ringChar F' ≠ 2) :
(χ (-1) * Fintype.card F) ^ (Fintype.card F' / 2) = χ (Fintype.card F') :=
by
obtain ⟨n, hp, hc⟩ := FiniteField.card F (ringChar F)
obtain ⟨n', hp', hc'⟩ := FiniteField.card F' (ringChar F')
let ψ := primitive_char_finite_field F F' hch₁
let FF' := CyclotomicField ψ.n F'
have hchar := Algebra.ringChar_eq F' FF'
apply (algebraMap F' FF').Injective
rw [map_pow, map_mul, map_natCast, hc', hchar, Nat.cast_pow]
simp only [← MulChar.ringHomComp_apply]
haveI := Fact.mk hp'
haveI := Fact.mk (hchar.subst hp')
rw [Ne, ← Nat.prime_dvd_prime_iff_eq hp' hp, ← isUnit_iff_not_dvd_char, hchar] at hch₁
exact
Char.card_pow_char_pow (hχ₂.comp _) ψ.char (ringChar FF') n' hch₁ (hchar ▸ hch₂)
(gaussSum_sq (hχ₁.comp <| RingHom.injective _) (hχ₂.comp _) ψ.prim)
#align char.card_pow_card Char.card_pow_card
end GaussSumValues
section GaussSumTwo
/-!
### The quadratic character of 2
This section proves the following result.
For every finite field `F` of odd characteristic, we have `2^(#F/2) = χ₈(#F)` in `F`.
This can be used to show that the quadratic character of `F` takes the value
`χ₈(#F)` at `2`.
The proof uses the Gauss sum of `χ₈` and a primitive additive character on `ℤ/8ℤ`;
in this way, the result is reduced to `card_pow_char_pow`.
-/
open ZMod
/-- For every finite field `F` of odd characteristic, we have `2^(#F/2) = χ₈(#F)` in `F`. -/
theorem FiniteField.two_pow_card {F : Type _} [Fintype F] [Field F] (hF : ringChar F ≠ 2) :
(2 : F) ^ (Fintype.card F / 2) = χ₈ (Fintype.card F) :=
by
have hp2 : ∀ n : ℕ, (2 ^ n : F) ≠ 0 := fun n => pow_ne_zero n (Ring.two_ne_zero hF)
obtain ⟨n, hp, hc⟩ := FiniteField.card F (ringChar F)
-- we work in `FF`, the eighth cyclotomic field extension of `F`
let FF := (Polynomial.cyclotomic 8 F).SplittingField
haveI : FiniteDimensional F FF :=
Polynomial.IsSplittingField.finiteDimensional FF (Polynomial.cyclotomic 8 F)
haveI : Fintype FF := FiniteDimensional.fintypeOfFintype F FF
have hchar := Algebra.ringChar_eq F FF
have FFp := hchar.subst hp
haveI := Fact.mk FFp
have hFF := ne_of_eq_of_ne hchar.symm hF
-- `ring_char FF ≠ 2`
have hu : IsUnit (ringChar FF : ZMod 8) :=
by
rw [isUnit_iff_not_dvd_char, ring_char_zmod_n]
rw [Ne, ← Nat.prime_dvd_prime_iff_eq FFp Nat.prime_two] at hFF
change ¬_ ∣ 2 ^ 3
exact mt FFp.dvd_of_dvd_pow hFF
-- there is a primitive additive character `ℤ/8ℤ → FF`, sending `a + 8ℤ ↦ τ^a`
-- with a primitive eighth root of unity `τ`
let ψ₈ := primitive_zmod_char 8 F (by convert hp2 3 <;> norm_num)
let τ : FF := ψ₈.char 1
have τ_spec : τ ^ 4 = -1 := by
refine' (sq_eq_one_iff.1 _).resolve_left _ <;>
· simp only [τ, ← map_nsmul_pow]
erw [AddChar.IsPrimitive.zMod_char_eq_one_iff 8 ψ₈.prim]
decide
-- we consider `χ₈` as a multiplicative character `ℤ/8ℤ → FF`
let χ := χ₈.ring_hom_comp (Int.castRingHom FF)
have hχ : χ (-1) = 1 := NormNum.int_cast_one
have hq : is_quadratic χ := is_quadratic_χ₈.comp _
-- we now show that the Gauss sum of `χ` and `ψ₈` has the relevant property
have hg : gaussSum χ ψ₈.char ^ 2 = χ (-1) * Fintype.card (ZMod 8) :=
by
rw [hχ, one_mul, card, gaussSum]
convert← congr_arg (· ^ 2) (Fin.sum_univ_eight fun x => (χ₈ x : FF) * τ ^ x.val)
· ext
congr
apply pow_one
convert_to(0 + 1 * τ ^ 1 + 0 + -1 * τ ^ 3 + 0 + -1 * τ ^ 5 + 0 + 1 * τ ^ 7) ^ 2 = _
· simp only [χ₈_apply, Matrix.cons_val_zero, Matrix.cons_val_one, Matrix.head_cons,
Matrix.cons_vec_bit0_eq_alt0, Matrix.cons_vec_bit1_eq_alt1, Matrix.cons_vecAppend,
Matrix.cons_vecAlt0, Matrix.cons_vecAlt1, Int.cast_zero, Int.cast_one, Int.cast_neg,
MulZeroClass.zero_mul]
rfl
convert_to 8 + (τ ^ 4 + 1) * (τ ^ 10 - 2 * τ ^ 8 - 2 * τ ^ 6 + 6 * τ ^ 4 + τ ^ 2 - 8) = _
· ring
· rw [τ_spec]
norm_num
-- this allows us to apply `card_pow_char_pow` to our situation
have h := Char.card_pow_char_pow hq ψ₈.char (ringChar FF) n hu hFF hg
rw [card, ← hchar, hχ, one_mul, ← hc, ← Nat.cast_pow (ringChar F), ← hc] at h
-- finally, we change `2` to `8` on the left hand side
convert_to(8 : F) ^ (Fintype.card F / 2) = _
·
rw [(by norm_num : (8 : F) = 2 ^ 2 * 2), mul_pow,
(FiniteField.isSquare_iff hF <| hp2 2).mp ⟨2, pow_two 2⟩, one_mul]
apply (algebraMap F FF).Injective
simp only [map_pow, map_bit0, map_one, map_intCast]
convert h
norm_num
#align finite_field.two_pow_card FiniteField.two_pow_card
end GaussSumTwo
|
{"author": "leanprover-community", "repo": "mathlib3port", "sha": "62505aa236c58c8559783b16d33e30df3daa54f4", "save_path": "github-repos/lean/leanprover-community-mathlib3port", "path": "github-repos/lean/leanprover-community-mathlib3port/mathlib3port-62505aa236c58c8559783b16d33e30df3daa54f4/Mathbin/NumberTheory/LegendreSymbol/GaussSum.lean"}
|
#-*-coding:utf-8-*-
'''
Created on Nov14 31,2018
@author: pengzhiliang
'''
import time
import numpy as np
import os
import os.path as osp
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from tqdm import tqdm
from torch.utils.data import Dataset,DataLoader
from torch.optim import lr_scheduler,Adam,SGD
from torchvision import datasets, models, transforms
from torchsummary import summary
from model.unet import UNet
from utils.metrics import Score,averageMeter
from utils.loss import cross_entropy2d,BCEDiceLoss,bootstrapped_cross_entropy2d
from dataloader.MRBrain_loader import MRBrainSDataset
from dataloader.augmentation import *
from dataloader.coder import merge_classes
# 参数设置
defualt_path = osp.join('/home/cv_xfwang/data/', 'MRBrainS')
learning_rate = 1e-6
batch_size = 32
num_workers = 4
resume_path = '/home/cv_xfwang/MRBrainS_seg/checkpoint/best_unet_model.pkl'
resume_flag = True
start_epoch = 0
end_epoch = 500
test_interval = 10
print_interval = 1
momentum=0.99
weight_decay = 0.005
best_iou = -100
# GPU or CPU
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Setup Dataloader
data_aug = Compose([
RandomHorizontallyFlip(0.5),
RandomRotate(10),
Scale(224),
])
train_loader = DataLoader(MRBrainSDataset(defualt_path, split='train', is_transform=True, \
img_norm=True, augmentations=data_aug), \
batch_size=batch_size,num_workers=num_workers,pin_memory=True,shuffle=True)
val_loader = DataLoader(MRBrainSDataset(defualt_path, split='val', is_transform=True, \
img_norm=True, augmentations=Compose([Scale(224)])), \
batch_size=1,num_workers=num_workers,pin_memory=True,shuffle=False)
# Setup Model and summary
model = UNet().to(device)
summary(model,(3,224,224),batch_size) # summary 网络参数
# model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
# 需要学习的参数
# base_learning_list = list(filter(lambda p: p.requires_grad, model.base_net.parameters()))
# learning_list = model.parameters()
# 优化器以及学习率设置
optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate,momentum=momentum,weight_decay=weight_decay)
# learning rate调节器
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.2 * end_epoch), int(0.6 * end_epoch),int(0.9 * end_epoch)], gamma=0.01)
# scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',patience=10, verbose=True)
criterion = cross_entropy2d
# criterion = BCEDiceLoss()
# running_metrics = Score(n_classes=9)
running_metrics = Score(n_classes=4) # label_test=[0,2,2,3,3,1,1,0,0]
label_test = [0,2,2,3,3,1,1,0,0]
# resume
if (osp.isfile(resume_path) and resume_flag):
checkpoint = torch.load(resume_path)
model.load_state_dict(checkpoint["model_state"])
optimizer.load_state_dict(checkpoint["optimizer_state"])
best_iou = checkpoint['best_iou']
# scheduler.load_state_dict(checkpoint["scheduler_state"])
# start_epoch = checkpoint["epoch"]
print("=====>",
"Loaded checkpoint '{}' (iter {})".format(
resume_path, checkpoint["epoch"]
)
)
else:
print("=====>","No checkpoint found at '{}'".format(resume_path))
print("load unet weight and bias")
model_dict = model.state_dict()
pretrained_dict = torch.load("/home/cv_xfwang/Pytorch-UNet/MODEL.pth")
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
# Training
def train(epoch):
print("Epoch: ",epoch)
model.train()
total_loss = 0
# for index, (img, mask) in tqdm(enumerate(train_loader)):
for index, (img, mask) in tqdm(enumerate(train_loader), total=len(train_loader), desc="Epoch {}".format(epoch), ncols=0):
#img: torch.Size([32, 3, 256, 256]) mask:torch.Size([32, 256, 256])
img,mask= img.to(device),mask.to(device)
optimizer.zero_grad()
output = model(img) #[-1, 9, 256, 256]
# _, pred = torch.max(output, dim=1)
loss = criterion(output,mask)#,size_average=False
total_loss += loss
loss.backward()
optimizer.step()
print("Average loss: %.4f"%(total_loss/(img.size(0)*(index+1))) )
# return mean IoU, mean dice
def test(epoch):
print(">>>Test: ")
global best_iou
model.eval()
running_metrics.reset()
with torch.no_grad():
for i, (img, mask) in tqdm(enumerate(val_loader)):
img = img.to(device)
output = model(img) #[-1, 9, 256, 256]
probs = F.softmax(output, dim=1)
_, pred = torch.max(probs, dim=1)
pred = pred.cpu().data[0].numpy()
label = mask.cpu().data[0].numpy()
pred = np.asarray(pred, dtype=np.int)
label = np.asarray(label, dtype=np.int)
# print(pred.shape,label.shape)
running_metrics.update(merge_classes(label),merge_classes(pred))
score, class_iou = running_metrics.get_scores()
for k, v in score.items():
print(k,':',v)
print(i, class_iou)
if score["Mean IoU : \t"] > best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": epoch + 1,
"model_state": model.state_dict(),
"optimizer_state": optimizer.state_dict(),
"scheduler_state": scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = osp.join(osp.split(resume_path)[0],"best_unet_model.pkl")
print("saving......")
torch.save(state, save_path)
# return mIoU, mean_dice
for epoch in range(start_epoch, end_epoch):
train(epoch)
test(epoch)
scheduler.step()
# print(train_loss[-1],train_acc[-1],test_loss[-1],test_acc[-1]
|
{"hexsha": "ff6b3fc1e47aa6ed402abc44bbe9d8841a9a016a", "size": 5864, "ext": "py", "lang": "Python", "max_stars_repo_path": "train_unet.py", "max_stars_repo_name": "pengzhiliang/MRBrainS_seg", "max_stars_repo_head_hexsha": "52c392edb0b3d3988cdf526002f2e6df5c8401fe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2018-12-12T06:39:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-14T09:23:23.000Z", "max_issues_repo_path": "train_unet.py", "max_issues_repo_name": "liumindbdx/MRBrainS_seg", "max_issues_repo_head_hexsha": "52c392edb0b3d3988cdf526002f2e6df5c8401fe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-10-30T09:20:20.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-11T11:51:17.000Z", "max_forks_repo_path": "train_unet.py", "max_forks_repo_name": "liumindbdx/MRBrainS_seg", "max_forks_repo_head_hexsha": "52c392edb0b3d3988cdf526002f2e6df5c8401fe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-04-02T10:47:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T00:54:47.000Z", "avg_line_length": 34.9047619048, "max_line_length": 137, "alphanum_fraction": 0.6741132333, "include": true, "reason": "import numpy", "num_tokens": 1556}
|
using Surrogates
using ForwardDiff
using LinearAlgebra
using Flux
using Flux: @epochs
using Flux.Tracker
using Zygote
#using Zygote: @nograd
#=
#FORWARD
###### 1D ######
lb = 0.0
ub = 10.0
n = 5
x = sample(n,lb,ub,SobolSample())
f = x -> x^2
y = f.(x)
#Radials
my_rad = RadialBasis(x,y,lb,ub,x->norm(x),2)
g = x -> ForwardDiff.derivative(my_rad,x)
g(5.0)
#Kriging
p = 1.5
my_krig = Kriging(x,y,p)
g = x -> ForwardDiff.derivative(my_krig,x)
g(5.0)
#Linear Surrogate
my_linear = LinearSurrogate(x,y,lb,ub)
g = x -> ForwardDiff.derivative(my_linear,x)
g(5.0)
#Inverse distance
p = 1.4
my_inverse = InverseDistanceSurrogate(x,y,p,lb,ub)
g = x -> ForwardDiff.derivative(my_inverse,x)
g(5.0)
#Lobachesky
n = 4
α = 2.4
my_loba = LobacheskySurrogate(x,y,α,n,lb,ub)
g = x -> ForwardDiff.derivative(my_loba,x)
g(5.0)
#Second order polynomial
my_second = SecondOrderPolynomialSurrogate(x,y,lb,ub)
g = x -> ForwardDiff.derivative(my_second,x)
g(5.0)
###### ND ######
lb = [0.0,0.0]
ub = [10.0,10.0]
n = 5
x = sample(n,lb,ub,SobolSample())
f = x -> x[1]*x[2]
y = f.(x)
#Radials
my_rad = RadialBasis(x,y,[lb,ub],z->norm(z),2)
g = x -> ForwardDiff.gradient(my_rad,x)
g([2.0,5.0])
#Kriging
theta = [2.0,2.0]
p = [1.9,1.9]
my_krig = Kriging(x,y,p,theta)
g = x -> ForwardDiff.gradient(my_krig,x)
g([2.0,5.0])
#Linear Surrogate
my_linear = LinearSurrogate(x,y,lb,ub)
g = x -> ForwardDiff.gradient(my_linear,x)
g([2.0,5.0])
#Inverse Distance
p = 1.4
my_inverse = InverseDistanceSurrogate(x,y,p,lb,ub)
g = x -> ForwardDiff.gradient(my_inverse,x)
g([2.0,5.0])
#Lobachesky
alpha = [1.4,1.4]
n = 4
my_loba_ND = LobacheskySurrogate(x,y,alpha,n,lb,ub)
g = x -> ForwardDiff.gradient(my_loba_ND,x)
g([2.0,5.0])
#Second order polynomial
my_second = SecondOrderPolynomialSurrogate(x,y,lb,ub)
g = x -> ForwardDiff.gradient(my_second,x)
g([2.0,5.0])
### Tracker ###
#=
#1D
lb = 0.0
ub = 10.0
n = 5
x = sample(n,lb,ub,SobolSample())
f = x -> x^2
y = f.(x)
#Radials
my_rad = RadialBasis(x,y,lb,ub,x->norm(x),2)
g = x -> Tracker.gradient(my_rad,x)
g(5.0)
#Kriging
p = 1.5
my_krig = Kriging(x,y,p)
g = x -> Tracker.gradient(my_krig,x)
g(5.0)
#Linear Surrogate
my_linear = LinearSurrogate(x,y,lb,ub)
g = x -> Tracker.gradient(my_linear,x)
g(5.0)
#Inverse distance
p = 1.4
my_inverse = InverseDistanceSurrogate(x,y,p,lb,ub)
g = x -> Tracker.gradient(my_inverse,x)
g(5.0)
#Lobachesky
n = 4
α = 2.4
my_loba = LobacheskySurrogate(x,y,α,n,lb,ub)
g = x -> Tracker.gradient(my_loba,x)
g(5.0)
#Second order polynomial
my_second = SecondOrderPolynomialSurrogate(x,y,lb,ub)
g = x -> Tracker.gradient(my_second,x)
g(5.0)
#ND
lb = [0.0,0.0]
ub = [10.0,10.0]
n = 100
x = sample(n,lb,ub,SobolSample())
f = x -> x[1]*x[2]^2
y = f.(x)
#Radials
my_rad = RadialBasis(x,y,[lb,ub],z->norm(z),2)
g = x -> Tracker.gradient(my_rad,x)
g([2.0,5.0])
#Kriging
theta = [2.0,2.0]
p = [1.9,1.9]
my_krig = Kriging(x,y,p,theta)
g = x -> Tracker.gradient(my_krig,x)
g([2.0,5.0])
#Linear Surrogate
my_linear = LinearSurrogate(x,y,lb,ub)
g = x -> Tracker.gradient(my_linear,x)
g([2.0,5.0])
#Inverse Distance
p = 1.4
my_inverse = InverseDistanceSurrogate(x,y,p,lb,ub)
g = x -> Tracker.gradient(my_inverse,x)
g([2.0,5.0])
#Lobachesky
alpha = [1.4,1.4]
n = 4
my_loba_ND = LobacheskySurrogate(x,y,alpha,n,lb,ub)
g = x -> Tracker.gradient(my_loba_ND,x)
g([2.0,5.0])
#Second order polynomial
my_second = SecondOrderPolynomialSurrogate(x,y,lb,ub)
g = x -> Tracker.gradient(my_second,x)
g([2.0,5.0])
=#
### ZYGOTE ###
=#
###### 1D ######
lb = 0.0
ub = 10.0
n = 5
x = sample(n,lb,ub,SobolSample())
f = x -> x^2
y = f.(x)
#Radials
my_rad = RadialBasis(x,y,lb,ub,x->norm(x),2)
g = x -> my_rad'(x)
g(5.0)
Zygote.refresh()
#Kriging
p = 1.5
my_krig = Kriging(x,y,p)
g = x -> my_krig'(x)
g(5.0)
Zygote.refresh()
#Linear Surrogate
my_linear = LinearSurrogate(x,y,lb,ub)
g = x -> my_linear'(x)
g(5.0)
Zygote.refresh()
#Inverse distance
p = 1.4
my_inverse = InverseDistanceSurrogate(x,y,p,lb,ub)
g = x -> my_inverse'(x)
g(5.0)
Zygote.refresh()
#Second order polynomial
my_second = SecondOrderPolynomialSurrogate(x,y,lb,ub)
g = x -> my_second'(x)
g(5.0)
Zygote.refresh()
#Lobachesky
n = 4
α = 2.4
my_loba = LobacheskySurrogate(x,y,α,n,lb,ub)
g = x -> my_loba'(x)
g(0.0)
#NN
Zygote.refresh()
model = Chain(Dense(1,1))
loss(x, y) = Flux.mse(model(x), y)
opt = Descent(0.01)
n_echos = 1
my_neural = NeuralSurrogate(x,y,lb,ub,model,loss,opt,n_echos)
g = x->my_neural'(x)
g(3.4)
###### ND ######
lb = [0.0,0.0]
ub = [10.0,10.0]
n = 5
x = sample(n,lb,ub,SobolSample())
f = x -> x[1]*x[2]
y = f.(x)
#Radials
my_rad = RadialBasis(x,y,[lb,ub],z->norm(z),2)
g = x -> Zygote.gradient(my_rad,x)
g((2.0,5.0))
#Kriging
Zygote.refresh()
theta = [2.0,2.0]
p = [1.9,1.9]
my_krig = Kriging(x,y,p,theta)
g = x -> Zygote.gradient(my_krig,x)
g((2.0,5.0))
#Linear Surrogate
Zygote.refresh()
my_linear = LinearSurrogate(x,y,lb,ub)
g = x -> Zygote.gradient(my_linear,x)
g((2.0,5.0))
#Inverse Distance
Zygote.refresh()
p = 1.4
my_inverse = InverseDistanceSurrogate(x,y,p,lb,ub)
g = x -> Zygote.gradient(my_inverse,x)
g((2.0,5.0))
#Lobachesky not working yet weird issue with Zygote @nograd
#=
Zygote.refresh()
alpha = [1.4,1.4]
n = 4
my_loba_ND = LobacheskySurrogate(x,y,alpha,n,lb,ub)
g = x -> Zygote.gradient(my_loba_ND,x)
g((2.0,5.0))
=#
#Second order polynomial mutating arrays
Zygote.refresh()
my_second = SecondOrderPolynomialSurrogate(x,y,lb,ub)
g = x -> Zygote.gradient(my_second,x)
g((2.0,5.0))
#NN
Zygote.refresh()
model = Chain(Dense(2,1))
loss(x, y) = Flux.mse(model(x), y)
opt = Descent(0.01)
n_echos = 1
my_neural = NeuralSurrogate(x,y,lb,ub,model,loss,opt,n_echos)
g = x -> Zygote.gradient(my_neural,x)
g((2.0,5.0))
|
{"hexsha": "4fc941928ae49424cf73b709e623531bc9cd73b0", "size": 5660, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/AD_compatibility.jl", "max_stars_repo_name": "UnofficialJuliaMirror/Surrogates.jl-6fc51010-71bc-11e9-0e15-a3fcc6593c49", "max_stars_repo_head_hexsha": "9680039453db69ccc9bad8721287e340381912f1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/AD_compatibility.jl", "max_issues_repo_name": "UnofficialJuliaMirror/Surrogates.jl-6fc51010-71bc-11e9-0e15-a3fcc6593c49", "max_issues_repo_head_hexsha": "9680039453db69ccc9bad8721287e340381912f1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/AD_compatibility.jl", "max_forks_repo_name": "UnofficialJuliaMirror/Surrogates.jl-6fc51010-71bc-11e9-0e15-a3fcc6593c49", "max_forks_repo_head_hexsha": "9680039453db69ccc9bad8721287e340381912f1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.6184210526, "max_line_length": 61, "alphanum_fraction": 0.6590106007, "num_tokens": 2318}
|
[STATEMENT]
lemma splits_iff: "(l, a, r) \<in> set (splits ll) = (ll = l @ a # r)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ((l, a, r) \<in> set (splits ll)) = (ll = l @ a # r)
[PROOF STEP]
by(induction ll arbitrary: l a r)(auto simp add: Cons_eq_append_conv)
|
{"llama_tokens": 119, "file": "ADS_Functor_Inclusion_Proof_Construction", "length": 1}
|
macro inline_widget(ex)
gname, name = ex.args
quote
function $(esc(name))(args...; props...)
widget = $(esc(gname))(args...)
length(props) > 0 && set!(widget; props...)
return widget
end
end
end
macro container_widget(ex)
gname, name = ex.args
quote
function $(esc(name))(children::Union{Function, GtkWidget, Tuple, Vector}, args...; props...)
widget = $(esc(gname))(args...)
length(props) > 0 && set!(widget; props...)
add!(widget, children)
return widget
end
end
end
function SpinButton(range::AbstractRange; init::Real = middle(range), props...)
spin = GtkSpinButton(range)
length(props) > 0 && set!(spin; props...)
set_gtk_property!(GAccessor.adjustment(spin), :value, init)
return spin
end
@inline_widget GtkLabel --> Label
macro label_str(text)
:( Label($(esc(text))) )
end
@inline_widget GtkEntry --> Entry
macro entry_str(text)
:( Entry(text = $(esc(text))) )
end
@inline_widget GtkScale --> Scale
@inline_widget GtkButton --> Button
@inline_widget GtkSpinner --> Spinner
@inline_widget GtkTextView --> TextView
@inline_widget GtkStatusbar --> StatusBar
@inline_widget Gtk.GtkSwitch --> Switch
@inline_widget GtkLinkButton --> LinkButton
@inline_widget GtkFontButton --> FontButton
@inline_widget GtkAppChooser --> AppChooser
@inline_widget GtkColorButton --> ColorButton
@inline_widget GtkProgressBar --> ProgressBar
@inline_widget GtkFileChooser --> FileChooser
@inline_widget GtkCheckButton --> CheckBox
@inline_widget GtkToggleButton --> ToggleButton
@inline_widget GtkComboBoxText --> TextList
@inline_widget GtkVolumeButton --> VolumeButton
@inline_widget GtkCanvas --> Canvas
function ColorButton(color::Colorant; props...)
ColorButton(convert(Gtk.GdkRGBA, color); props...)
end
function ColorButton(color::String; props...)
ColorButton(parse(Colorant, color); props...)
end
macro colorbtn_str(color)
:( ColorButton($(esc(color))) )
end
function Slider(range::AbstractRange; init::Real = middle(range), props...)
slider = Scale(false, range; props...)
set_gtk_property!(GAccessor.adjustment(slider), :value, init)
return slider
end
macro button_str(text)
:( Button($(esc(text))) )
end
@container_widget GtkBox --> Box
@container_widget GtkFrame --> Frame
@container_widget GtkPaned --> Paned
@container_widget GtkWindow --> Window
@container_widget GtkMenuBar --> MenuBar
@container_widget GtkToolbar --> Toolbar
@container_widget GtkTreeView --> TreeView
@container_widget GtkNotebook --> Notebook
@container_widget GtkScrolledWindow --> ScrolledWindow
@container_widget GtkExpander --> Expander
|
{"hexsha": "a19325011eed9bc8d394f220830b03717f37beb9", "size": 2797, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/widgets.jl", "max_stars_repo_name": "jorge-brito/Alexya.jl", "max_stars_repo_head_hexsha": "731f9357bedaefd1a015302623194f9108674003", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/widgets.jl", "max_issues_repo_name": "jorge-brito/Alexya.jl", "max_issues_repo_head_hexsha": "731f9357bedaefd1a015302623194f9108674003", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-02-11T06:59:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-29T10:29:40.000Z", "max_forks_repo_path": "src/widgets.jl", "max_forks_repo_name": "jorge-brito/Alexya.jl", "max_forks_repo_head_hexsha": "731f9357bedaefd1a015302623194f9108674003", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.402173913, "max_line_length": 101, "alphanum_fraction": 0.6785841974, "num_tokens": 665}
|
""" Unit test for the Problem class. """
import unittest
import numpy as np
from six import text_type, PY3
from six.moves import cStringIO
import warnings
from openmdao.components.linear_system import LinearSystem
from openmdao.core.component import Component
from openmdao.core.problem import Problem
from openmdao.core.checks import ConnectError
from openmdao.core.group import Group
from openmdao.components.param_comp import ParamComp
from openmdao.components.exec_comp import ExecComp
from openmdao.test.example_groups import ExampleGroup, ExampleGroupWithPromotes, ExampleByObjGroup
from openmdao.test.simple_comps import SimpleComp, SimpleImplicitComp, RosenSuzuki, FanIn
if PY3:
def py3fix(s):
return s.replace('<type', '<class')
else:
def py3fix(s):
return s
class TestProblem(unittest.TestCase):
def test_conflicting_connections(self):
# verify we get an error if we have conflicting implicit and explicit connections
root = Group()
# promoting G1.x will create an implicit connection to G3.x
# this is a conflict because G3.x (aka G3.C4.x) is already connected
# to G3.C3.x
G2 = root.add('G2', Group(), promotes=['x']) # BAD PROMOTE
G2.add('C1', ParamComp('x', 5.), promotes=['x'])
G1 = G2.add('G1', Group(), promotes=['x'])
G1.add('C2', ExecComp('y=x*2.0'), promotes=['x'])
G3 = root.add('G3', Group(), promotes=['x'])
G3.add('C3', ExecComp('y=x*2.0'))
G3.add('C4', ExecComp('y=x*2.0'), promotes=['x'])
root.connect('G2.G1.C2.y', 'G3.C3.x')
G3.connect('C3.y', 'x')
prob = Problem(root)
try:
prob.setup(check=False)
except Exception as error:
msg = "Target 'G3.C4.x' is connected to multiple unknowns: ['G2.C1.x', 'G3.C3.y']"
self.assertEqual(text_type(error), msg)
else:
self.fail("Error expected")
def test_check_promotes(self):
# verify we get an error at setup time if we have promoted a var that doesn't exist
# valid case, no error
prob = Problem(Group())
G = prob.root.add('G', Group())
C = G.add('C', SimpleComp(), promotes=['x*', 'y'])
# ignore warning about the unconnected param
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore")
prob.setup(check=False)
# promoting a non-existent variable should throw an error
prob = Problem(Group())
G = prob.root.add('G', Group())
C = G.add('C', SimpleComp(), promotes=['spoon']) # there is no spoon
try:
prob.setup(check=False)
except Exception as error:
msg = "'C' promotes 'spoon' but has no variables matching that specification"
self.assertEqual(text_type(error), msg)
else:
self.fail("Error expected")
# promoting a pattern with no matches should throw an error
prob = Problem(Group())
G = prob.root.add('G', Group())
P = G.add('P', ParamComp('x', 5.), promotes=['a*']) # there is no match
try:
prob.setup(check=False)
except Exception as error:
msg = "'P' promotes 'a*' but has no variables matching that specification"
self.assertEqual(text_type(error), msg)
else:
self.fail("Error expected")
def test_conflicting_promotions(self):
# verify we get an error if we have conflicting promotions
root = Group()
# promoting G1.x will create an implicit connection to G3.x
# this is a conflict because G3.x (aka G3.C4.x) is already connected
# to G3.C3.x
G2 = root.add('G2', Group())
G2.add('C1', ParamComp('x', 5.), promotes=['x'])
G1 = G2.add('G1', Group(), promotes=['x'])
G1.add('C2', ExecComp('y=x*2.0'), promotes=['x'])
G3 = root.add('G3', Group(), promotes=['x'])
G3.add('C3', ExecComp('y=x*2.0'), promotes=['y']) # promoting y
G3.add('C4', ExecComp('y=x*2.0'), promotes=['x', 'y']) # promoting y again.. BAD
prob = Problem(root)
try:
prob.setup(check=False)
except Exception as error:
msg = "Promoted name 'G3.y' matches multiple unknowns: ['G3.C3.y', 'G3.C4.y']"
self.assertEqual(text_type(error), msg)
else:
self.fail("Error expected")
def test_conflicting_promoted_state_vars(self):
# verify we get an error if we have conflicting promoted state variables
root = Group()
comp1 = SimpleImplicitComp()
comp2 = SimpleImplicitComp()
root.add('c1', comp1, promotes=['z']) # promote the state, z
root.add('c2', comp2, promotes=['z']) # promote the state, z, again.. BAD
prob = Problem(root)
with self.assertRaises(RuntimeError) as err:
prob.setup(check=False)
expected_msg = "Promoted name 'z' matches multiple unknowns: ['c1.z', 'c2.z']"
self.assertEqual(str(err.exception), expected_msg)
def test_unconnected_param_access(self):
prob = Problem(root=Group())
G1 = prob.root.add('G1', Group())
G2 = G1.add('G2', Group())
C1 = G2.add('C1', ExecComp(['y=2.0*x',
'z=x*x-2.0']))
C2 = G2.add('C2', ExecComp(['y=2.0*x',
'z=x*x-2.0']))
G2.connect('C1.y', 'C2.x')
# ignore warning about the unconnected param
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore")
prob.setup(check=False)
prob.run()
C1.params['x'] = 2.
self.assertEqual(prob['G1.G2.C1.x'], 2.0)
prob['G1.G2.C1.x'] = 99.
self.assertEqual(C1.params['x'], 99.)
def test_unconnected_param_access_with_promotes(self):
prob = Problem(root=Group())
G1 = prob.root.add('G1', Group())
G2 = G1.add('G2', Group(), promotes=['x'])
C1 = G2.add('C1', ExecComp(['y=2.0*x',
'z=x*x-2.0']), promotes=['x'])
C2 = G2.add('C2', ExecComp(['y=2.0*x',
'z=x*x-2.0']))
G2.connect('C1.y', 'C2.x')
# ignore warning about the unconnected param
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore")
prob.setup(check=False)
prob.run()
# still must use absolute naming to find params even if they're
# promoted. Promoted names for params can refer to more than one param.
C1.params['x'] = 2.
self.assertEqual(prob['G1.x'], 2.0)
self.assertEqual(prob.root.G1.G2.C1.params['x'], 2.0)
prob['G1.x'] = 99.
self.assertEqual(C1.params['x'], 99.)
prob['G1.x'] = 12.
self.assertEqual(C1.params['x'], 12.)
prob['G1.x'] = 17.
self.assertEqual(prob.root.G1.G2.C1.params['x'], 17.0)
prob.run()
def test_input_input_explicit_conns_no_conn(self):
prob = Problem(root=Group())
root = prob.root
root.add('p1', ParamComp('x', 1.0))
root.add('c1', ExecComp('y = x*2.0'))
root.add('c2', ExecComp('y = x*3.0'))
root.connect('c1.x', 'c2.x')
# ignore warning about the unconnected params
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("ignore")
prob.setup(check=False)
prob.run()
self.assertEqual(root.connections, {})
def test_input_input_explicit_conns_w_conn(self):
prob = Problem(root=Group())
root = prob.root
root.add('p1', ParamComp('x', 1.0))
root.add('c1', ExecComp('y = x*2.0'))
root.add('c2', ExecComp('y = x*3.0'))
root.connect('c1.x', 'c2.x')
root.connect('p1.x', 'c2.x')
prob.setup(check=False)
prob.run()
self.assertEqual(root.connections['c1.x'], 'p1.x')
self.assertEqual(root.connections['c2.x'], 'p1.x')
self.assertEqual(len(root.connections), 2)
def test_calc_gradient_interface_errors(self):
root = Group()
prob = Problem(root=root)
root.add('comp', ExecComp('y=x*2.0'))
try:
prob.calc_gradient(['comp.x'], ['comp.y'], mode='junk')
except Exception as error:
msg = "mode must be 'auto', 'fwd', 'rev', or 'fd'"
self.assertEqual(text_type(error), msg)
else:
self.fail("Error expected")
try:
prob.calc_gradient(['comp.x'], ['comp.y'], return_format='junk')
except Exception as error:
msg = "return_format must be 'array' or 'dict'"
self.assertEqual(text_type(error), msg)
else:
self.fail("Error expected")
def test_calc_gradient(self):
root = Group()
parm = root.add('parm', ParamComp('x', np.array([1., 1., 1., 1.])))
comp = root.add('comp', RosenSuzuki())
root.connect('parm.x', 'comp.x')
prob = Problem(root)
prob.setup(check=False)
prob.run()
param_list = ['parm.x']
unknown_list = ['comp.f', 'comp.g']
# check that calc_gradient returns proper dict value when mode is 'fwd'
J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict')
np.testing.assert_almost_equal(J['comp.f']['parm.x'], np.array([
[ -3., -3., -17., 9.],
]))
np.testing.assert_almost_equal(J['comp.g']['parm.x'], np.array([
[ 3., 1., 3., 1.],
[ 1., 4., 2., 3.],
[ 6., 1., 2., -1.],
]))
# check that calc_gradient returns proper array value when mode is 'fwd'
J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='array')
np.testing.assert_almost_equal(J, np.array([
[-3., -3., -17., 9.],
[ 3., 1., 3., 1.],
[ 1., 4., 2., 3.],
[ 6., 1., 2., -1.],
]))
# check that calc_gradient returns proper dict value when mode is 'rev'
J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict')
np.testing.assert_almost_equal(J['comp.f']['parm.x'], np.array([
[ -3., -3., -17., 9.],
]))
np.testing.assert_almost_equal(J['comp.g']['parm.x'], np.array([
[ 3., 1., 3., 1.],
[ 1., 4., 2., 3.],
[ 6., 1., 2., -1.],
]))
# check that calc_gradient returns proper array value when mode is 'rev'
J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='array')
np.testing.assert_almost_equal(J, np.array([
[-3., -3., -17., 9.],
[ 3., 1., 3., 1.],
[ 1., 4., 2., 3.],
[ 6., 1., 2., -1.],
]))
# check that calc_gradient returns proper dict value when mode is 'fd'
J = prob.calc_gradient(param_list, unknown_list, mode='fd', return_format='dict')
np.testing.assert_almost_equal(J['comp.f']['parm.x'], np.array([
[ -3., -3., -17., 9.],
]), decimal=5)
np.testing.assert_almost_equal(J['comp.g']['parm.x'], np.array([
[ 3., 1., 3., 1.],
[ 1., 4., 2., 3.],
[ 6., 1., 2., -1.],
]), decimal=5)
# check that calc_gradient returns proper array value when mode is 'fd'
J = prob.calc_gradient(param_list, unknown_list, mode='fd', return_format='array')
np.testing.assert_almost_equal(J, np.array([
[-3., -3., -17., 9.],
[ 3., 1., 3., 1.],
[ 1., 4., 2., 3.],
[ 6., 1., 2., -1.],
]), decimal=5)
def test_calc_gradient_multiple_params(self):
prob = Problem()
prob.root = FanIn()
prob.setup(check=False)
prob.run()
param_list = ['p1.x1', 'p2.x2']
unknown_list = ['comp3.y']
# check that calc_gradient returns proper dict value when mode is 'fwd'
J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='dict')
np.testing.assert_almost_equal(J['comp3.y']['p2.x2'], np.array([[ 35.]]))
np.testing.assert_almost_equal(J['comp3.y']['p1.x1'], np.array([[ -6.]]))
# check that calc_gradient returns proper array value when mode is 'fwd'
J = prob.calc_gradient(param_list, unknown_list, mode='fwd', return_format='array')
np.testing.assert_almost_equal(J, np.array([[-6., 35.]]))
# check that calc_gradient returns proper dict value when mode is 'rev'
J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='dict')
np.testing.assert_almost_equal(J['comp3.y']['p2.x2'], np.array([[ 35.]]))
np.testing.assert_almost_equal(J['comp3.y']['p1.x1'], np.array([[ -6.]]))
# check that calc_gradient returns proper array value when mode is 'rev'
J = prob.calc_gradient(param_list, unknown_list, mode='rev', return_format='array')
np.testing.assert_almost_equal(J, np.array([[-6., 35.]]))
# check that calc_gradient returns proper dict value when mode is 'fd'
J = prob.calc_gradient(param_list, unknown_list, mode='fd', return_format='dict')
np.testing.assert_almost_equal(J['comp3.y']['p2.x2'], np.array([[ 35.]]))
np.testing.assert_almost_equal(J['comp3.y']['p1.x1'], np.array([[ -6.]]))
# check that calc_gradient returns proper array value when mode is 'fd'
J = prob.calc_gradient(param_list, unknown_list, mode='fd', return_format='array')
np.testing.assert_almost_equal(J, np.array([[-6., 35.]]))
def test_explicit_connection_errors(self):
class A(Component):
def __init__(self):
super(A, self).__init__()
self.add_state('x', 0)
class B(Component):
def __init__(self):
super(B, self).__init__()
self.add_param('x', 0)
prob = Problem()
prob.root = Group()
prob.root.add('A', A())
prob.root.add('B', B())
prob.root.connect('A.x', 'B.x')
prob.setup(check=False)
expected_error_message = ("Source 'A.y' cannot be connected to target 'B.x': "
"'A.y' does not exist.")
prob = Problem()
prob.root = Group()
prob.root.add('A', A())
prob.root.add('B', B())
prob.root.connect('A.y', 'B.x')
with self.assertRaises(ConnectError) as cm:
prob.setup(check=False)
self.assertEqual(str(cm.exception), expected_error_message)
expected_error_message = ("Source 'A.x' cannot be connected to target 'B.y': "
"'B.y' does not exist.")
prob = Problem()
prob.root = Group()
prob.root.add('A', A())
prob.root.add('B', B())
prob.root.connect('A.x', 'B.y')
with self.assertRaises(ConnectError) as cm:
prob.setup(check=False)
self.assertEqual(str(cm.exception), expected_error_message)
expected_error_message = ("Source 'A.x' cannot be connected to target 'A.x': "
"Target must be a parameter but 'A.x' is an unknown.")
prob = Problem()
prob.root = Group()
prob.root.add('A', A())
prob.root.add('B', B())
prob.root.connect('A.x', 'A.x')
with self.assertRaises(ConnectError) as cm:
prob.setup(check=False)
self.assertEqual(str(cm.exception), expected_error_message)
def test_check_connections(self):
class A(Component):
def __init__(self):
super(A, self).__init__()
self.add_state('y', np.zeros((2,)), shape=(2,))
class B(Component):
def __init__(self):
super(B, self).__init__()
self.add_param('y', np.zeros((3,)), shape=(3,))
class C(Component):
def __init__(self):
super(C, self).__init__()
self.add_state('y', np.zeros((2,)))
class D(Component):
def __init__(self):
super(D, self).__init__()
self.add_param('y', np.zeros((2,)))
class E(Component):
def __init__(self):
super(E, self).__init__()
self.add_param('y', 1.0)
#Explicit
expected_error_message = py3fix("Type '<type 'numpy.ndarray'>' of source "
"'A.y' must be the same as type "
"'<type 'float'>' of target "
"'E.y'")
prob = Problem()
prob.root = Group()
prob.root.add('A', A())
prob.root.add('E', E())
prob.root.connect('A.y', 'E.y')
with self.assertRaises(ConnectError) as cm:
prob.setup(check=False)
self.assertEqual(str(cm.exception), expected_error_message)
#Implicit
expected_error_message = py3fix("Type '<type 'numpy.ndarray'>' of source "
"'y' must be the same as type "
"'<type 'float'>' of target "
"'y'")
prob = Problem()
prob.root = Group()
prob.root.add('A', A(), promotes=['y'])
prob.root.add('E', E(), promotes=['y'])
with self.assertRaises(ConnectError) as cm:
prob.setup(check=False)
self.assertEqual(str(cm.exception), expected_error_message)
# Explicit
expected_error_message = ("Shape '(2,)' of the source 'A.y' "
"must match the shape '(3,)' "
"of the target 'B.y'")
prob = Problem()
prob.root = Group()
prob.root.add('A', A())
prob.root.add('B', B())
prob.root.connect('A.y', 'B.y')
with self.assertRaises(ConnectError) as cm:
prob.setup(check=False)
raised_error = str(cm.exception)
raised_error = raised_error.replace('(2L,', '(2,')
raised_error = raised_error.replace('(3L,', '(3,')
self.assertEqual(raised_error, expected_error_message)
# Implicit
expected_error_message = ("Shape '(2,)' of the source 'y' "
"must match the shape '(3,)' "
"of the target 'y'")
prob = Problem()
prob.root = Group()
prob.root.add('A', A(), promotes=['y'])
prob.root.add('B', B(), promotes=['y'])
with self.assertRaises(ConnectError) as cm:
prob.setup(check=False)
raised_error = str(cm.exception)
raised_error = raised_error.replace('(2L,', '(2,')
raised_error = raised_error.replace('(3L,', '(3,')
self.assertEqual(raised_error, expected_error_message)
# Explicit
expected_error_message = ("Shape '(2,)' of the source 'C.y' must match the shape '(3,)' "
"of the target 'B.y'")
prob = Problem()
prob.root = Group()
prob.root.add('B', B())
prob.root.add('C', C())
prob.root.connect('C.y', 'B.y')
with self.assertRaises(ConnectError) as cm:
prob.setup(check=False)
raised_error = str(cm.exception)
raised_error = raised_error.replace('(2L,', '(2,')
raised_error = raised_error.replace('(3L,', '(3,')
self.assertEqual(raised_error, expected_error_message)
# Implicit
expected_error_message = ("Shape '(2,)' of the source 'y' must match the shape"
" '(3,)' of the target 'y'")
prob = Problem()
prob.root = Group()
prob.root.add('B', B(), promotes=['y'])
prob.root.add('C', C(), promotes=['y'])
with self.assertRaises(ConnectError) as cm:
prob.setup(check=False)
raised_error = str(cm.exception)
raised_error = raised_error.replace('(2L,', '(2,')
raised_error = raised_error.replace('(3L,', '(3,')
self.assertEqual(raised_error, expected_error_message)
# Explicit
prob = Problem()
prob.root = Group()
prob.root.add('A', A())
prob.root.add('D', D())
prob.root.connect('A.y', 'D.y')
stream = cStringIO()
checks = prob.setup(out_stream=stream)
self.assertEqual(checks['no_unknown_comps'], ['D'])
self.assertEqual(checks['recorders'], [])
content = stream.getvalue()
self.assertTrue("The following components have no unknowns:\nD\n" in content)
self.assertTrue("No recorders have been specified, so no data will be saved." in content)
# Implicit
prob = Problem()
prob.root = Group()
prob.root.add('A', A(), promotes=['y'])
prob.root.add('D', D(), promotes=['y'])
stream = cStringIO()
checks = prob.setup(out_stream=stream)
self.assertEqual(checks['no_unknown_comps'], ['D'])
self.assertEqual(checks['recorders'], [])
content = stream.getvalue()
self.assertTrue("The following components have no unknowns:\nD\n" in content)
self.assertTrue("No recorders have been specified, so no data will be saved." in content)
# Explicit
prob = Problem()
prob.root = Group()
prob.root.add('C', C())
prob.root.add('D', D())
prob.root.connect('C.y', 'D.y')
stream = cStringIO()
checks = prob.setup(out_stream=stream)
self.assertEqual(checks['no_unknown_comps'], ['D'])
self.assertEqual(checks['recorders'], [])
content = stream.getvalue()
self.assertTrue("The following components have no unknowns:\nD\n" in content)
self.assertTrue("No recorders have been specified, so no data will be saved." in content)
# Implicit
prob = Problem()
prob.root = Group()
prob.root.add('C', C(), promotes=['y'])
prob.root.add('D', D(), promotes=['y'])
stream = cStringIO()
checks = prob.setup(out_stream=stream)
self.assertEqual(checks['no_unknown_comps'], ['D'])
self.assertEqual(checks['recorders'], [])
content = stream.getvalue()
self.assertTrue("The following components have no unknowns:\nD\n" in content)
self.assertTrue("No recorders have been specified, so no data will be saved." in content)
def test_simplest_run(self):
prob = Problem(root=Group())
root = prob.root
root.add('x_param', ParamComp('x', 7.0))
root.add('mycomp', ExecComp('y=x*2.0'))
root.connect('x_param.x', 'mycomp.x')
prob.setup(check=False)
prob.run()
result = root.unknowns['mycomp.y']
self.assertAlmostEqual(14.0, result, 3)
def test_simplest_run_w_promote(self):
prob = Problem(root=Group())
root = prob.root
# ? Didn't we say that ParamComp by default promoted its variable?
root.add('x_param', ParamComp('x', 7.0), promotes=['x'])
root.add('mycomp', ExecComp('y=x*2.0'), promotes=['x'])
prob.setup(check=False)
prob.run()
result = root.unknowns['mycomp.y']
self.assertAlmostEqual(14.0, result, 3)
def test_variable_access(self):
prob = Problem(root=ExampleGroup())
# set with a different shaped array
try:
prob['G2.C1.x']
except Exception as err:
msg = "'unknowns' has not been initialized, setup() must be called before 'G2.C1.x' can be accessed"
self.assertEqual(text_type(err), msg)
else:
self.fail('Exception expected')
prob.setup(check=False)
self.assertEqual(prob['G2.C1.x'], 5.) # default output from ParamComp
self.assertEqual(prob['G2.G1.C2.y'], 5.5) # output from ExecComp
self.assertEqual(prob.root.G3.C3.params['x'], 0.) # initial value for a parameter
self.assertEqual(prob.root.G2.G1.C2.params['x'], 0.) # initial value for a parameter
prob = Problem(root=ExampleGroupWithPromotes())
prob.setup(check=False)
self.assertEqual(prob.root.G2.G1.C2.params['x'], 0.) # initial value for a parameter
# __setitem__
prob['G2.G1.C2.y'] = 99.
self.assertEqual(prob['G2.G1.C2.y'], 99.)
def test_variable_access_before_setup(self):
prob = Problem(root=ExampleGroup())
try:
prob['G2.C1.x'] = 5.
except AttributeError as err:
msg = "'unknowns' has not been initialized, setup() must be called before 'G2.C1.x' can be accessed"
self.assertEqual(text_type(err), msg)
else:
self.fail('Exception expected')
try:
prob.run()
except AttributeError as err:
msg = "'unknowns' has not been initialized, setup() must be called before 'x' can be accessed"
self.assertEqual(text_type(err), msg)
else:
self.fail('Exception expected')
def test_basic_run(self):
prob = Problem(root=ExampleGroup())
prob.setup(check=False)
prob.run()
self.assertAlmostEqual(prob['G3.C4.y'], 40.)
def test_byobj_run(self):
prob = Problem(root=ExampleByObjGroup())
prob.setup(check=False)
prob.run()
self.assertEqual(prob['G3.C4.y'], 'fooC2C3C4')
def test_scalar_sizes(self):
class A(Component):
def __init__(self):
super(A, self).__init__()
self.add_param('x', shape=1)
self.add_output('y', shape=1)
class B(Component):
def __init__(self):
super(B, self).__init__()
self.add_param('x', shape=2)
self.add_output('y', shape=2)
class C(Component):
def __init__(self):
super(C, self).__init__()
self.add_param('x', shape=3)
self.add_output('y', shape=3)
# Scalar Values
prob = Problem()
root = prob.root = Group()
root.add('X', ParamComp('x', 0., shape=1), promotes=['x'])
root.add('A1', A(), promotes=['x'])
root.add('A2', A())
root.connect('A1.y', 'A2.x')
prob.setup(check=False)
# Array Values
prob = Problem()
root = prob.root = Group()
root.add('X', ParamComp('x', np.zeros(2), shape=2), promotes=['x'])
root.add('B1', B(), promotes=['x'])
root.add('B2', B())
root.connect('B1.y', 'B2.x')
prob.setup(check=False)
# Mismatched Array Values
prob = Problem()
root = prob.root = Group()
root.add('X', ParamComp('x', np.zeros(2), shape=2), promotes=['x'])
root.add('B1', B(), promotes=['x'])
root.add('C1', C())
root.connect('B1.y', 'C1.x')
with self.assertRaises(ConnectError) as cm:
prob.setup(check=False)
expected_error_message = "Shape '(2,)' of the source "\
"'B1.y' must match the shape '(3,)' "\
"of the target 'C1.x'"
self.assertEqual(expected_error_message, str(cm.exception))
# Mismatched Scalar to Array Value
prob = Problem()
root = prob.root = Group()
root.add('X', ParamComp('x', 0., shape=1), promotes=['x'])
root.add('B1', B(), promotes=['x'])
with self.assertRaises(ConnectError) as cm:
prob.setup(check=False)
expected_error_message = py3fix("Type '<type 'float'>' of source "
"'x' must be the same as type "
"'<type 'numpy.ndarray'>' of target "
"'x'")
self.assertEqual(expected_error_message, str(cm.exception))
def test_mode_auto(self):
# Make sure mode=auto chooses correctly for all prob sizes as well
# as for abs/rel/etc paths
prob = Problem()
root = prob.root = Group()
root.add('p1', ParamComp('a', 1.0), promotes=['*'])
root.add('p2', ParamComp('b', 1.0), promotes=['*'])
root.add('comp', ExecComp(['x = 2.0*a + 3.0*b', 'y=4.0*a - 1.0*b']), promotes=['*'])
root.ln_solver.options['mode'] = 'auto'
prob.setup(check=False)
prob.run()
mode = prob._mode('auto', ['a'], ['x'])
self.assertEqual(mode, 'fwd')
mode = prob._mode('auto', ['a', 'b'], ['x'])
self.assertEqual(mode, 'rev')
# make sure _check function does it too
#try:
#mode = prob._check_for_matrix_matrix(['a'], ['x'])
#except Exception as err:
#msg = "Group '' must have the same mode as root to use Matrix Matrix."
#self.assertEqual(text_type(err), msg)
#else:
#self.fail('Exception expected')
root.ln_solver.options['mode'] = 'fwd'
mode = prob._check_for_matrix_matrix(['a', 'b'], ['x'])
self.assertEqual(mode, 'fwd')
def test_check_matrix_matrix(self):
prob = Problem()
root = prob.root = Group()
root.add('p1', ParamComp('a', 1.0), promotes=['*'])
root.add('p2', ParamComp('b', 1.0), promotes=['*'])
sub1 = root.add('sub1', Group(), promotes=['*'])
sub2 = sub1.add('sub2', Group(), promotes=['*'])
sub2.add('comp', ExecComp(['x = 2.0*a + 3.0*b', 'y=4.0*a - 1.0*b']), promotes=['*'])
prob.setup(check=False)
prob.run()
# NOTE: this call won't actually calculate mode because default ln_solver
# is ScipyGMRES and its default mode is 'fwd', not 'auto'.
mode = prob._check_for_matrix_matrix(['a'], ['x'])
root.ln_solver.options['mode'] = 'rev'
sub1.ln_solver.options['mode'] = 'rev'
try:
mode = prob._check_for_matrix_matrix(['a'], ['x'])
except Exception as err:
msg = "Group 'sub2' has mode 'fwd' but the root group has mode 'rev'. Modes must match to use Matrix Matrix."
self.assertEqual(text_type(err), msg)
else:
self.fail('Exception expected')
sub1.ln_solver.options['mode'] = 'fwd'
sub2.ln_solver.options['mode'] = 'rev'
try:
mode = prob._check_for_matrix_matrix(['a'], ['x'])
except Exception as err:
msg = "Group 'sub1' has mode 'fwd' but the root group has mode 'rev'. Modes must match to use Matrix Matrix."
self.assertEqual(text_type(err), msg)
else:
self.fail('Exception expected')
sub1.ln_solver.options['mode'] = 'rev'
mode = prob._check_for_matrix_matrix(['a'], ['x'])
def test_fd_skip_keys(self):
prob = Problem()
root = prob.root = Group()
comp = Component()
comp.add_param('x', 0.)
comp.add_param('y', 0.)
comp.add_output('z', 0.)
comp.solve_nonlinear = lambda p, u, r: u.__setitem__('z', 1.)
comp._get_fd_params = lambda: ['x']
comp.jacobian = lambda a,b,c: {('z', 'x'): 0.}
root.add('comp', comp, promotes=['x', 'y'])
root.add('px', ParamComp('x', 0.), promotes=['*'])
root.add('py', ParamComp('y', 0.), promotes=['*'])
prob.setup(check=False)
prob.run()
try:
prob.check_partial_derivatives()
except KeyError as err:
self.fail('KeyError raised: {0}'.format(str(err)))
class TestCheckSetup(unittest.TestCase):
def test_out_of_order(self):
prob = Problem(root=Group())
root = prob.root
G1 = root.add("G1", Group())
G2 = G1.add("G2", Group())
C1 = G2.add("C1", ExecComp('y=x*2.0'))
C2 = G2.add("C2", ExecComp('y=x*2.0'))
C3 = G2.add("C3", ExecComp('y=x*2.0'))
G2.connect("C1.y", "C3.x")
G2.connect("C3.y", "C2.x")
# force wrong order
G2.set_order(['C1', 'C2', 'C3'])
stream = cStringIO()
checks = prob.setup(out_stream=stream)
self.assertEqual(checks['out_of_order'], [('G1.G2',[('C2',['C3'])])])
def test_cycle(self):
prob = Problem(root=Group())
root = prob.root
G1 = root.add("G1", Group())
G2 = G1.add("G2", Group())
C1 = G2.add("C1", ExecComp('y=x*2.0'))
C2 = G2.add("C2", ExecComp('y=x*2.0'))
C3 = G2.add("C3", ExecComp('y=x*2.0'))
G2.connect("C1.y", "C3.x")
G2.connect("C3.y", "C2.x")
G2.connect("C2.y", "C1.x")
# force wrong order
G2.set_order(['C1', 'C2', 'C3'])
stream = cStringIO()
checks = prob.setup(out_stream=stream)
auto = G2.list_auto_order()
self.assertTrue(auto==['C1', 'C3', 'C2'] or
auto==['C3', 'C2', 'C1'] or
auto==['C2', 'C1', 'C3'])
self.assertTrue("Group 'G1.G2' has the following cycles: [['C1', 'C2', 'C3']]" in
stream.getvalue())
oo = checks['out_of_order']
self.assertEqual(oo[0][0], 'G1.G2')
expected = {
('C2','C3'): 'C1',
('C3',): 'C2',
('C2',): 'C1',
}
for node, afters in oo[0][1]:
self.assertEqual(node, expected[tuple(afters)])
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "726d0053669b7be144b422a5b74f47fc052d58f1", "size": 33755, "ext": "py", "lang": "Python", "max_stars_repo_path": "openmdao/core/test/test_problem.py", "max_stars_repo_name": "jcchin/project_clippy", "max_stars_repo_head_hexsha": "ed38e11a96848a81c024c5a0e5821bc5db04fdc7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openmdao/core/test/test_problem.py", "max_issues_repo_name": "jcchin/project_clippy", "max_issues_repo_head_hexsha": "ed38e11a96848a81c024c5a0e5821bc5db04fdc7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openmdao/core/test/test_problem.py", "max_forks_repo_name": "jcchin/project_clippy", "max_forks_repo_head_hexsha": "ed38e11a96848a81c024c5a0e5821bc5db04fdc7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.570964247, "max_line_length": 122, "alphanum_fraction": 0.5424677826, "include": true, "reason": "import numpy", "num_tokens": 8490}
|
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FCMA preprocessing."""
# Authors: Yida Wang
# (Intel Labs), 2017
import math
import time
import numpy as np
import logging
from scipy.stats.mstats import zscore
from mpi4py import MPI
from enum import Enum
from ..image import mask_images, multimask_images
logger = logging.getLogger(__name__)
__all__ = [
"generate_epochs_info",
"prepare_fcma_data",
"prepare_mvpa_data",
"prepare_searchlight_mvpa_data",
"RandomType",
]
def _separate_epochs(activity_data, epoch_list):
""" create data epoch by epoch
Separate data into epochs of interest specified in epoch_list
and z-score them for computing correlation
Parameters
----------
activity_data: list of 2D array in shape [nVoxels, nTRs]
the masked activity data organized in voxel*TR formats of all subjects
epoch_list: list of 3D array in shape [condition, nEpochs, nTRs]
specification of epochs and conditions
assuming all subjects have the same number of epochs
len(epoch_list) equals the number of subjects
Returns
-------
raw_data: list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs
and z-scored in preparation of correlation computation
len(raw_data) equals the number of epochs
labels: list of 1D array
the condition labels of the epochs
len(labels) labels equals the number of epochs
"""
time1 = time.time()
raw_data = []
labels = []
for sid in range(len(epoch_list)):
epoch = epoch_list[sid]
for cond in range(epoch.shape[0]):
sub_epoch = epoch[cond, :, :]
for eid in range(epoch.shape[1]):
r = np.sum(sub_epoch[eid, :])
if r > 0: # there is an epoch in this condition
# mat is row-major
# regardless of the order of acitvity_data[sid]
mat = activity_data[sid][:, sub_epoch[eid, :] == 1]
mat = np.ascontiguousarray(mat.T)
mat = zscore(mat, axis=0, ddof=0)
# if zscore fails (standard deviation is zero),
# set all values to be zero
mat = np.nan_to_num(mat)
mat = mat / math.sqrt(r)
raw_data.append(mat)
labels.append(cond)
time2 = time.time()
logger.debug(
'epoch separation done, takes %.2f s' %
(time2 - time1)
)
return raw_data, labels
def _randomize_single_subject(data, seed=None):
"""Randomly permute the voxels of the subject.
The subject is organized as Voxel x TR,
this method shuffles the voxel dimension in place.
Parameters
----------
data: 2D array in shape [nVoxels, nTRs]
Activity image data to be shuffled.
seed: Optional[int]
Seed for random state used implicitly for shuffling.
Returns
-------
None.
"""
if seed is not None:
np.random.seed(seed)
np.random.shuffle(data)
def _randomize_subject_list(data_list, random):
"""Randomly permute the voxels of a subject list.
The method shuffles the subject one by one in place according to
the random type. If RandomType.NORANDOM, return the original list.
Parameters
----------
data_list: list of 2D array in shape [nVxels, nTRs]
Activity image data list to be shuffled.
random: RandomType
Randomization type.
Returns
-------
None.
"""
if random == RandomType.REPRODUCIBLE:
for i in range(len(data_list)):
_randomize_single_subject(data_list[i], seed=i)
elif random == RandomType.UNREPRODUCIBLE:
for data in data_list:
_randomize_single_subject(data)
class RandomType(Enum):
"""Define the random types as enumeration
NORANDOM means do not randomize the image data;
REPRODUCIBLE means randomize the image data with a fixed seed so that the
permutation holds between different runs;
UNREPRODUCIBLE means truly randomize the image data which returns
different results in different runs.
"""
NORANDOM = 0
REPRODUCIBLE = 1
UNREPRODUCIBLE = 2
def prepare_fcma_data(images, conditions, mask1, mask2=None,
random=RandomType.NORANDOM, comm=MPI.COMM_WORLD):
"""Prepare data for correlation-based computation and analysis.
Generate epochs of interests, then broadcast to all workers.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
mask1: np.ndarray
Mask to apply to each image.
mask2: Optional[np.ndarray]
Mask to apply to each image.
If it is not specified, the method will assign None to the returning
variable raw_data2 and the self-correlation on raw_data1 will be
computed
random: Optional[RandomType]
Randomize the image data within subject or not.
comm: MPI.Comm
MPI communicator to use for MPI operations.
Returns
-------
raw_data1: list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs, specified by the first mask.
len(raw_data) equals the number of epochs
raw_data2: Optional, list of 2D array in shape [epoch length, nVoxels]
the data organized in epochs, specified by the second mask if any.
len(raw_data2) equals the number of epochs
labels: list of 1D array
the condition labels of the epochs
len(labels) labels equals the number of epochs
"""
rank = comm.Get_rank()
labels = []
raw_data1 = []
raw_data2 = []
if rank == 0:
logger.info('start to apply masks and separate epochs')
if mask2 is not None:
masks = (mask1, mask2)
activity_data1, activity_data2 = zip(*multimask_images(images,
masks,
np.float32))
_randomize_subject_list(activity_data2, random)
raw_data2, _ = _separate_epochs(activity_data2, conditions)
else:
activity_data1 = list(mask_images(images, mask1, np.float32))
_randomize_subject_list(activity_data1, random)
raw_data1, labels = _separate_epochs(activity_data1, conditions)
time1 = time.time()
raw_data_length = len(raw_data1)
raw_data_length = comm.bcast(raw_data_length)
# broadcast the data subject by subject to prevent size overflow
for i in range(raw_data_length):
if rank != 0:
raw_data1.append(None)
if mask2 is not None:
raw_data2.append(None)
raw_data1[i] = comm.bcast(raw_data1[i], root=0)
if mask2 is not None:
raw_data2[i] = comm.bcast(raw_data2[i], root=0)
if comm.Get_size() > 1:
labels = comm.bcast(labels, root=0)
if rank == 0:
time2 = time.time()
logger.info(
'data broadcasting done, takes %.2f s' %
(time2 - time1)
)
if mask2 is None:
raw_data2 = None
return raw_data1, raw_data2, labels
def generate_epochs_info(epoch_list):
""" use epoch_list to generate epoch_info defined below
Parameters
----------
epoch_list: list of 3D (binary) array in shape [condition, nEpochs, nTRs]
Contains specification of epochs and conditions, assuming
1. all subjects have the same number of epochs;
2. len(epoch_list) equals the number of subjects;
3. an epoch is always a continuous time course.
Returns
-------
epoch_info: list of tuple (label, sid, start, end).
label is the condition labels of the epochs;
sid is the subject id, corresponding to the index of raw_data;
start is the start TR of an epoch (inclusive);
end is the end TR of an epoch(exclusive).
Assuming len(labels) labels equals the number of epochs and
the epochs of the same sid are adjacent in epoch_info
"""
time1 = time.time()
epoch_info = []
for sid, epoch in enumerate(epoch_list):
for cond in range(epoch.shape[0]):
sub_epoch = epoch[cond, :, :]
for eid in range(epoch.shape[1]):
r = np.sum(sub_epoch[eid, :])
if r > 0: # there is an epoch in this condition
start = np.nonzero(sub_epoch[eid, :])[0][0]
epoch_info.append((cond, sid, start, start + r))
time2 = time.time()
logger.debug(
'epoch separation done, takes %.2f s' %
(time2 - time1)
)
return epoch_info
def prepare_mvpa_data(images, conditions, mask):
"""Prepare data for activity-based model training and prediction.
Average the activity within epochs and z-scoring within subject.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
mask: np.ndarray
Mask to apply to each image.
Returns
-------
processed_data: 2D array in shape [num_voxels, num_epochs]
averaged epoch by epoch processed data
labels: 1D array
contains labels of the data
"""
activity_data = list(mask_images(images, mask, np.float32))
epoch_info = generate_epochs_info(conditions)
num_epochs = len(epoch_info)
(d1, _) = activity_data[0].shape
processed_data = np.empty([d1, num_epochs])
labels = np.empty(num_epochs)
subject_count = [0] # counting the epochs per subject for z-scoring
cur_sid = -1
# averaging
for idx, epoch in enumerate(epoch_info):
labels[idx] = epoch[0]
if cur_sid != epoch[1]:
subject_count.append(0)
cur_sid = epoch[1]
subject_count[-1] += 1
processed_data[:, idx] = \
np.mean(activity_data[cur_sid][:, epoch[2]:epoch[3]],
axis=1)
# z-scoring
cur_epoch = 0
for i in subject_count:
if i > 1:
processed_data[:, cur_epoch:cur_epoch + i] = \
zscore(processed_data[:, cur_epoch:cur_epoch + i],
axis=1, ddof=0)
cur_epoch += i
# if zscore fails (standard deviation is zero),
# set all values to be zero
processed_data = np.nan_to_num(processed_data)
return processed_data, labels
def prepare_searchlight_mvpa_data(images, conditions, data_type=np.float32,
random=RandomType.NORANDOM):
""" obtain the data for activity-based voxel selection using Searchlight
Average the activity within epochs and z-scoring within subject,
while maintaining the 3D brain structure. In order to save memory,
the data is processed subject by subject instead of reading all in before
processing. Assuming all subjects live in the identical cube.
Parameters
----------
images: Iterable[SpatialImage]
Data.
conditions: List[UniqueLabelConditionSpec]
Condition specification.
data_type
Type to cast image to.
random: Optional[RandomType]
Randomize the image data within subject or not.
Returns
-------
processed_data: 4D array in shape [brain 3D + epoch]
averaged epoch by epoch processed data
labels: 1D array
contains labels of the data
"""
time1 = time.time()
epoch_info = generate_epochs_info(conditions)
num_epochs = len(epoch_info)
processed_data = None
logger.info(
'there are %d subjects, and in total %d epochs' %
(len(conditions), num_epochs)
)
labels = np.empty(num_epochs)
# assign labels
for idx, epoch in enumerate(epoch_info):
labels[idx] = epoch[0]
# counting the epochs per subject for z-scoring
subject_count = np.zeros(len(conditions), dtype=np.int32)
logger.info('start to apply masks and separate epochs')
for sid, f in enumerate(images):
data = f.get_data().astype(data_type)
[d1, d2, d3, d4] = data.shape
if random == RandomType.REPRODUCIBLE:
data = data.reshape((d1 * d2 * d3, d4))
_randomize_single_subject(data, seed=sid)
data = data.reshape((d1, d2, d3, d4))
elif random == RandomType.UNREPRODUCIBLE:
data = data.reshape((d1 * d2 * d3, d4))
_randomize_single_subject(data)
data = data.reshape((d1, d2, d3, d4))
if processed_data is None:
processed_data = np.empty([d1, d2, d3, num_epochs],
dtype=data_type)
# averaging
for idx, epoch in enumerate(epoch_info):
if sid == epoch[1]:
subject_count[sid] += 1
processed_data[:, :, :, idx] = \
np.mean(data[:, :, :, epoch[2]:epoch[3]], axis=3)
logger.debug(
'file %s is loaded and processed, with data shape %s',
f.get_filename(), data.shape
)
# z-scoring
cur_epoch = 0
for i in subject_count:
if i > 1:
processed_data[:, :, :, cur_epoch:cur_epoch + i] = \
zscore(processed_data[:, :, :, cur_epoch:cur_epoch + i],
axis=3, ddof=0)
cur_epoch += i
# if zscore fails (standard deviation is zero),
# set all values to be zero
processed_data = np.nan_to_num(processed_data)
time2 = time.time()
logger.info(
'data processed for activity-based voxel selection, takes %.2f s' %
(time2 - time1)
)
return processed_data, labels
|
{"hexsha": "dc80e4f76f7cc015b13db24494952074260908bc", "size": 14358, "ext": "py", "lang": "Python", "max_stars_repo_path": "brainiak/fcma/preprocessing.py", "max_stars_repo_name": "osaaso3/brainiak", "max_stars_repo_head_hexsha": "153552c9b65e8354fa45985454f96978e0a92579", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 235, "max_stars_repo_stars_event_min_datetime": "2017-10-31T22:58:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T06:16:57.000Z", "max_issues_repo_path": "brainiak/fcma/preprocessing.py", "max_issues_repo_name": "osaaso3/brainiak", "max_issues_repo_head_hexsha": "153552c9b65e8354fa45985454f96978e0a92579", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 205, "max_issues_repo_issues_event_min_datetime": "2017-10-24T16:55:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:56:14.000Z", "max_forks_repo_path": "brainiak/fcma/preprocessing.py", "max_forks_repo_name": "osaaso3/brainiak", "max_forks_repo_head_hexsha": "153552c9b65e8354fa45985454f96978e0a92579", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 104, "max_forks_repo_forks_event_min_datetime": "2017-11-01T20:04:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T18:55:34.000Z", "avg_line_length": 34.5975903614, "max_line_length": 79, "alphanum_fraction": 0.6203510238, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3366}
|
import numpy as np
from pyrr import Quaternion, matrix44, Matrix44, Vector3
from ..base.utils import *
class Transform:
"""
This class will manage the basic transformation that can be
performed to a geometry.
This class uses pyrr module that it's a packadge with many
operations that can be used directly with OpenGL. In this class
the selected approach will be Object Oriented because its features.
Documentation can be founf in the following link:
https://github.com/adamlwgriffiths/Pyrr
Parameters:
default position, rotation and scale can be set intially.
To-Do:
Pivot implementation. So it's rotate based on a point.
Advanced transformations such as shear, bend, twist, et..
"""
def __init__(self, position=None, rotation=None, scale=None):
# Create private members for the setters (properties)
self.__position = self._get_Vector3(position)
self.__rotation = self._get_Vector3(rotation)
self.__scale = self._get_Vector3(scale)
# Initiali<e variables and Window
self._initialize()
def _get_Vector3(self, value):
if empty(value):
return None
# Check if it's already a Vector3 instance
if isinstance(value,(Vector3)):
return value
else:
return Vector3(value)
def _initialize(self):
# Create default transformations: position, rotation and scale
if self.position is None:
self.position = Vector3([0.0,0.0,0.0])
if self.rotation is None:
self.rotation = Vector3([0.0,0.0,0.0])
if self.scale is None:
self.scale = Vector3([1.0,1.0,1.0])
@property
def position(self):
return self.__position
@position.setter
def position(self, value):
self.__position = self._get_Vector3(value)
@property
def rotation(self):
return self.__rotation
@rotation.setter
def rotation(self, value):
self.__rotation = self._get_Vector3(value)
@property
def scale(self):
return self.__scale
@scale.setter
def scale(self, value):
self.__scale = self._get_Vector3(value)
@property
def model(self):
"""
This property will perform the current transformation and
return a 4x4 matrix with the transformation matrix. This
matrix could be send to the shader so it can perform the
model-view transformation for any geometry
"""
# Create scale matrix transformation
scale = Matrix44.from_scale(self.scale)
#Convert the current degrees vector into radians
rotation = np.radians(self.rotation)
rotationY = Quaternion.from_x_rotation(rotation.x)
rotationX = Quaternion.from_y_rotation(rotation.y)
rotationZ = Quaternion.from_z_rotation(rotation.z)
# compute all rotations.
rotation = rotationX * rotationY * rotationZ
# Create translation matrix transformation
translation = Matrix44.from_translation(self.position)
# Compute transformation matrix. convert to float32
return np.array(scale * rotation * translation,dtype=np.float32)
def transform(self, point):
"""
This function will apply the current transformation to
the following point.
"""
# Get the current tranformation matrix
matrix = self.model
# transform our point by the matrix to model-view
return matrix * self._get_Vector3(point)
def __enter__(self):
# Enter will always return the object itself. Use with With expressons
return self
def __exit__(self, exc_type, exc_value, traceback):
# Clean all the memery stored
self._dispose()
def _dispose(self):
pass
|
{"hexsha": "aeed010058c390f860ed8dc810d39c946486ad5a", "size": 3934, "ext": "py", "lang": "Python", "max_stars_repo_path": "zero/core/geometry/transform.py", "max_stars_repo_name": "jsa4000/OpenGL-Python", "max_stars_repo_head_hexsha": "62055ba0c16f54507b7ba709d6691b2e9c7bc152", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "zero/core/geometry/transform.py", "max_issues_repo_name": "jsa4000/OpenGL-Python", "max_issues_repo_head_hexsha": "62055ba0c16f54507b7ba709d6691b2e9c7bc152", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "zero/core/geometry/transform.py", "max_forks_repo_name": "jsa4000/OpenGL-Python", "max_forks_repo_head_hexsha": "62055ba0c16f54507b7ba709d6691b2e9c7bc152", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-08T06:12:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-08T06:12:11.000Z", "avg_line_length": 33.0588235294, "max_line_length": 78, "alphanum_fraction": 0.6395526182, "include": true, "reason": "import numpy", "num_tokens": 834}
|
import numpy as np
import cv2
def getNormalMask(coco, imageObj, filterClasses):
"""
iscrowd is set to None, therefore it only works for single
mask : (height, width)
Parameters
------------------------------------
"""
# Load categorical ids for filterclasses
catIds = coco.getCatIds(catNms=filterClasses)
input_image_size = (imageObj['height'],imageObj['width'])
# Load annotations for image object
annIds = coco.getAnnIds(imageObj['id'], catIds=catIds, iscrowd=None)
anns = coco.loadAnns(annIds)
# Create mask for image object
mask = np.zeros(input_image_size)
for a in range(len(anns)):
pixel_value = anns[a]['category_id']
new_mask = cv2.resize(coco.annToMask(anns[a])*pixel_value, input_image_size)
mask = np.maximum(new_mask, mask)
return mask
|
{"hexsha": "95bb1e245cfd0e2c7901f309512df73fc5c07678", "size": 845, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/cocoFunctions.py", "max_stars_repo_name": "qualiphal/parallel-phal", "max_stars_repo_head_hexsha": "a6bbfdb104d13c4c45914e02d53f32e1b134ca3c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils/cocoFunctions.py", "max_issues_repo_name": "qualiphal/parallel-phal", "max_issues_repo_head_hexsha": "a6bbfdb104d13c4c45914e02d53f32e1b134ca3c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/cocoFunctions.py", "max_forks_repo_name": "qualiphal/parallel-phal", "max_forks_repo_head_hexsha": "a6bbfdb104d13c4c45914e02d53f32e1b134ca3c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.2962962963, "max_line_length": 84, "alphanum_fraction": 0.6485207101, "include": true, "reason": "import numpy", "num_tokens": 217}
|
//-------------------------------------------------------------------
// MetaInfo Framework (MIF)
// https://github.com/tdv/mif
// Created: 03.2017
// Copyright (C) 2016-2017 tdv
//-------------------------------------------------------------------
// STD
#include <cstdint>
#include <sstream>
#include <stdexcept>
#include <string>
// BOOST
#include <boost/algorithm/string.hpp>
#include <boost/filesystem.hpp>
#include <boost/scope_exit.hpp>
// MIF
#include <mif/application/application.h>
#include <mif/common/uuid_generator.h>
#include <mif/common/log.h>
#include <mif/db/transaction.h>
#include <mif/db/id/service.h>
#include <mif/service/create.h>
class Application
: public Mif::Application::Application
{
public:
Application(int argc, char const **argv)
: Mif::Application::Application{argc, argv}
{
boost::program_options::options_description commonOptions{"Common database options"};
commonOptions.add_options()
("type", boost::program_options::value<std::string>(&m_type)->default_value("sqlite"),
"Database type (postgres or sqlite)")
("db-prefix", boost::program_options::value<std::string>(&m_dbPrefix)->default_value("mif_db_test"),
"Database name prefix")
("clean", boost::program_options::value<bool>(&m_cleanResult)->default_value(true),
"Remove all generated data")
;
AddCustomOptions(commonOptions);
boost::program_options::options_description pgOptions{"PostgreSQL options"};
pgOptions.add_options()
("pg-host", boost::program_options::value<std::string>(&m_pgHost)->default_value("localhost"),
"PostgreSQL database host")
("pg-port", boost::program_options::value<std::uint16_t>(&m_pgPort)->default_value(5432),
"PostgreSQL database port")
("pg-user", boost::program_options::value<std::string>(&m_pgUser)->default_value("postgres"),
"PostgreSQL database user")
("pg-pwd", boost::program_options::value<std::string>(&m_pgPassword)->default_value(""),
"PostgreSQL database user password")
("pg-timeout", boost::program_options::value<std::uint32_t>(&m_pgConnectionTimeout)->default_value(10),
"PostgreSQL database connection timeout")
;
AddCustomOptions(pgOptions);
boost::program_options::options_description sqliteOptions{"SQLite options"};
sqliteOptions.add_options()
("sqlite-in-memory", boost::program_options::value<bool>(&m_sqliteInMemory)->default_value(true),
"SQLite in-memory database")
("sqlite-dir", boost::program_options::value<std::string>(&m_sqliteDir)->default_value("."),
"SQLite database dir")
;
AddCustomOptions(sqliteOptions);
}
private:
std::string m_type;
std::string m_dbPrefix;
bool m_cleanResult;
std::string m_pgHost;
std::uint16_t m_pgPort;
std::string m_pgUser;
std::string m_pgPassword;
std::uint32_t m_pgConnectionTimeout;
bool m_sqliteInMemory;
std::string m_sqliteDir;
std::string GenerateDbName() const
{
auto name = Mif::Common::UuidGenerator{}.Generate();
boost::algorithm::erase_all(name, "-");
if (!m_dbPrefix.empty())
name = m_dbPrefix + "_" + name;
return name;
}
// Mif.Application.Application
virtual void OnStart() override final
{
if (m_type == "postgres")
DemoPostgreSQL();
else if (m_type == "sqlite")
DemoSQLite();
else
throw std::invalid_argument{"Type \"" + m_type + "\" not supported."};
}
void ShowData(Mif::Db::IConnectionPtr connection)
{
// Run a parametrized query
auto statement = connection->CreateStatement(
"select * from test "
"where id >= $1 and id <= $2 "
"order by id;"
);
auto recordset = statement->Execute({"5", "7"});
auto const count = recordset->GetFieldsCount();
MIF_LOG(Info) << "Fields count: " << count;
for (std::size_t i = 0 ; i < count ; ++i)
MIF_LOG(Info) << "\"" << recordset->GetFieldName(i) << "\" is the name of the field " << i << ".";
while (recordset->Read())
{
for (std::size_t i = 0 ; i < count ; ++i)
{
MIF_LOG(Info) << recordset->GetFieldName(i) << ": "
<< (recordset->IsNull(i) ? std::string{"null"} : recordset->GetAsString(i));
}
}
}
void DemoPostgreSQL()
{
auto dbName = GenerateDbName();
// Create database
{
MIF_LOG(Info) << "Create database \"" << dbName << "\"";
auto connection = Mif::Service::Create<Mif::Db::Id::Service::PostgreSQL, Mif::Db::IConnection>(
m_pgHost, m_pgPort, m_pgUser, m_pgPassword, std::string{}, m_pgConnectionTimeout);
connection->ExecuteDirect(
"CREATE DATABASE " + dbName + " WITH OWNER " + m_pgUser + ";"
);
}
// Drop database on exit from method
BOOST_SCOPE_EXIT(&m_pgHost, &m_pgPort, &m_pgUser, &m_pgPassword, &m_pgConnectionTimeout,
&m_cleanResult, &dbName)
{
if (m_cleanResult)
{
try
{
MIF_LOG(Info) << "Drop database \"" << dbName << "\"";
auto connection = Mif::Service::Create<Mif::Db::Id::Service::PostgreSQL, Mif::Db::IConnection>(
m_pgHost, m_pgPort, m_pgUser, m_pgPassword, std::string{}, m_pgConnectionTimeout);
connection->ExecuteDirect("DROP DATABASE " + dbName + ";");
}
catch (std::exception const &e)
{
MIF_LOG(Warning) << "Failed to drop database \"" << dbName << "\" Error: " << e.what();
}
}
}
BOOST_SCOPE_EXIT_END
// Connect to database
MIF_LOG(Info) << "Connect ot database \"" << dbName << "\"";
auto connection = Mif::Service::Create<Mif::Db::Id::Service::PostgreSQL, Mif::Db::IConnection>(
m_pgHost, m_pgPort, m_pgUser, m_pgPassword, dbName, m_pgConnectionTimeout);
Mif::Db::Transaction transaction{connection};
// Create table 'test'
MIF_LOG(Info) << "Create table 'test'";
connection->ExecuteDirect(
"create table test"
"("
" id serial not null primary key,"
" key varchar not null,"
" value varchar"
");"
);
// Create index
MIF_LOG(Info) << "Create index";
connection->ExecuteDirect(
"create unique index test_unique_key_index on test (key);"
);
// Fill table
MIF_LOG(Info) << "Fill table";
connection->ExecuteDirect(
"insert into test (key, value) "
"select 'key_' || t.i::text, 'value_' || t.i::text "
"from generate_series(1, 10) as t(i);"
);
transaction.Commit();
// Show data
ShowData(connection);
}
void DemoSQLite()
{
std::string fileName;
BOOST_SCOPE_EXIT(&m_cleanResult, &fileName)
{
if (m_cleanResult && !fileName.empty())
{
// Remove database file
MIF_LOG(Info) << "Remove database file \"" << fileName << "\"";
boost::filesystem::remove(fileName);
}
}
BOOST_SCOPE_EXIT_END
// Connect to database
Mif::Db::IConnectionPtr connection;
if (m_sqliteInMemory)
{
MIF_LOG(Info) << "Create in-memory database";
connection = Mif::Service::Create<Mif::Db::Id::Service::SQLite, Mif::Db::IConnection>();
}
else
{
auto const path = boost::filesystem::absolute(m_sqliteDir).parent_path() / GenerateDbName();
fileName = path.c_str();
MIF_LOG(Info) << "Create or connect database from file \"" << fileName << "\"";
connection = Mif::Service::Create<Mif::Db::Id::Service::SQLite, Mif::Db::IConnection>(fileName);
}
Mif::Db::Transaction transaction{connection};
// Create table 'test'
MIF_LOG(Info) << "Create table 'test'";
connection->ExecuteDirect(
"create table test"
"("
" id integer primary key autoincrement,"
" key varchar not null,"
" value varchar"
");"
);
// Fill table
MIF_LOG(Info) << "Fill table";
for (int i = 1 ; i <= 10 ; ++i)
{
auto const index = std::to_string(i);
connection->ExecuteDirect(
"insert into test (key, value) values ('key_" + index + "', 'value_" + index + "')"
);
}
transaction.Commit();
// Show data
ShowData(connection);
}
};
int main(int argc, char const **argv)
{
return Mif::Application::Run<Application>(argc, argv);
}
|
{"hexsha": "b0f8636c70a08b63163d96f1e5ca9b5720dd17eb", "size": 9536, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "examples/db_client/src/main.cpp", "max_stars_repo_name": "paceholder/mif", "max_stars_repo_head_hexsha": "ff3c18f577048c94887220bb92477ce102f01599", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4.0, "max_stars_repo_stars_event_min_datetime": "2018-03-26T11:49:13.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-22T06:18:35.000Z", "max_issues_repo_path": "examples/db_client/src/main.cpp", "max_issues_repo_name": "paceholder/mif", "max_issues_repo_head_hexsha": "ff3c18f577048c94887220bb92477ce102f01599", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/db_client/src/main.cpp", "max_forks_repo_name": "paceholder/mif", "max_forks_repo_head_hexsha": "ff3c18f577048c94887220bb92477ce102f01599", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2018-10-01T09:16:29.000Z", "max_forks_repo_forks_event_max_datetime": "2018-10-01T09:16:29.000Z", "avg_line_length": 35.3185185185, "max_line_length": 119, "alphanum_fraction": 0.5311451342, "num_tokens": 2107}
|
Despite all her names Pennys owners arent really big fans of Dio, Warren Zevon or Prince.
Penny has many hobbies and interests, some of which include the following:
Chasing / wrestling with cats
Sleeping
Making sure nobody is sleeping
Users/TaylorStreet Coprophagia
Humping inanimate objects
Chewing (she loves Beechnut)
Vampire hunting
Playfully biting people in inappropriate places
Creating situations with electronics for Brad to troubleshoot / fix (to make sure he doesnt get rusty).
Hiding gifts for people to find later
Licking
Stealing panties
Theoretical physics, particularly string theory
Some of these she learned from her owners (no, not the coprophagia) and some she learned from her buddy Users/AlinaAsrian Alina
20080219 11:16:10 nbsp Great, show everybody my underwear! Users/BradBenedict
20080904 22:09:29 nbsp Yknow, any dog that has a Zevon inspired nickname gets my support. Users/JabberWokky
|
{"hexsha": "3ed9e676b4cd39000866ae91ee64847c66d58247", "size": 949, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/Penny_the_Dog.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/Penny_the_Dog.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/Penny_the_Dog.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8928571429, "max_line_length": 127, "alphanum_fraction": 0.7934668072, "num_tokens": 240}
|
import numpy as np
from scipy.spatial.distance import pdist
from sklearn.metrics import pairwise_kernels
def kernel_matrix(x):
n_samples, _ = x.shape
h = np.identity(n_samples) - np.full((n_samples, n_samples), 1 / n_samples)
kx = pairwise_kernels(x, metric='rbf', gamma=np.median(pdist(x)))
return h @ kx @ h
|
{"hexsha": "c298e06da1b7f8c76d3e27b067f1cefd465f108e", "size": 328, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/causality/pc/independence/utils.py", "max_stars_repo_name": "AnverK/VK_Graduation", "max_stars_repo_head_hexsha": "a8d457d1fcb677d417a5ea82011393160762c0b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-09-11T12:14:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T17:54:43.000Z", "max_issues_repo_path": "src/causality/pc/independence/utils.py", "max_issues_repo_name": "AnverK/VK_Graduation", "max_issues_repo_head_hexsha": "a8d457d1fcb677d417a5ea82011393160762c0b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/causality/pc/independence/utils.py", "max_forks_repo_name": "AnverK/VK_Graduation", "max_forks_repo_head_hexsha": "a8d457d1fcb677d417a5ea82011393160762c0b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-10T11:40:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-10T11:40:17.000Z", "avg_line_length": 29.8181818182, "max_line_length": 79, "alphanum_fraction": 0.7134146341, "include": true, "reason": "import numpy,from scipy", "num_tokens": 89}
|
import click
import platform
import cv2
import numpy as np
try:
import urllib.request as urllib
except:
import urllib
from ggb import GGB, ColorSpace
import ggb
def print_version(ctx: click.Context, param: click.Parameter, value: bool) -> None:
if not value or ctx.resilient_parsing:
return
click.echo(
"Running GGB %s with %s %s on %s"
% (
ggb.__version__,
platform.python_implementation(),
platform.python_version(),
platform.system(),
)
)
ctx.exit()
@click.command()
@click.argument("image")
@click.option(
"--output",
type=str,
default="ggb_image.png",
help="Output to the GGB Image result file.",
show_default=True,
)
@click.option(
"--version",
is_flag=True,
callback=print_version,
expose_value=False,
is_eager=True,
help="Display the GGB version and exit.",
)
def main(
image: str,
output: str,
) -> None:
img = None
try:
req = urllib.urlopen(image)
arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
img = cv2.imdecode(arr, -1)
except:
img = cv2.imread(image)
ggb_image = GGB(image=img, input_color=ColorSpace.BGR).process()
ggb_image.write(output)
print("The GGB image is written to {}".format(output))
if __name__ == "__main__":
main()
|
{"hexsha": "2024e784ba2466b2ddbfdf6500b1669c3c3194d6", "size": 1377, "ext": "py", "lang": "Python", "max_stars_repo_path": "ggb/__main__.py", "max_stars_repo_name": "reshalfahsi/GGB", "max_stars_repo_head_hexsha": "f56994ffcd6a83762d67705116e690c7a64c9093", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-06T05:45:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-06T05:45:07.000Z", "max_issues_repo_path": "ggb/__main__.py", "max_issues_repo_name": "reshalfahsi/GGBColorSpace", "max_issues_repo_head_hexsha": "f56994ffcd6a83762d67705116e690c7a64c9093", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-06-19T02:20:02.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-19T02:20:02.000Z", "max_forks_repo_path": "ggb/__main__.py", "max_forks_repo_name": "reshalfahsi/GGB", "max_forks_repo_head_hexsha": "f56994ffcd6a83762d67705116e690c7a64c9093", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-10T04:14:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-10T04:14:24.000Z", "avg_line_length": 20.8636363636, "max_line_length": 83, "alphanum_fraction": 0.6187363834, "include": true, "reason": "import numpy", "num_tokens": 339}
|
# -*- coding: utf-8 -*-
"""
Es 2
"""
import allMethods as fz
import math
import sympy as sym
import sympy.utilities.lambdify
import numpy as np
import matplotlib.pyplot as plt
x = sym.symbols("x")
fx = sym.tan(x) - x
dfx = sym.diff(fx, x, 1)
f = sym.lambdify(x, fx, np)
df = sym.lambdify(x, dfx, np)
x, iterazioni, xks = fz.bisezione(f, (3/5)*math.pi, (37/25)*math.pi, 1e-8)
for x in range(4):
xNew, itNew, xkNew = fz.newton(f, df, xks[x], 1e-8, 1e-8, 200)
xSec, itSec, xkSec = fz.secanti(f, xks[x], (3/5)*math.pi, 1e-8, 1e-8, 200)
print("valore: {:e}, Newton xk={:e} con {:d} iterazioni".format(xks[x], xNew, itNew))
print("valore: {:e}, Secanti xk={:e} con {:d} iterazioni".format(xks[x], xSec, itSec))
|
{"hexsha": "45248cb473652829da86a12c6473f359b5e79624", "size": 748, "ext": "py", "lang": "Python", "max_stars_repo_path": "zeri_di_funzione/esercizi/es2.py", "max_stars_repo_name": "luigi-borriello00/Metodi_SIUMerici", "max_stars_repo_head_hexsha": "cf1407c0ad432a49a96dcd08303213e48723c57a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-06-23T14:47:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-07T08:39:27.000Z", "max_issues_repo_path": "zeri_di_funzione/esercizi/es2.py", "max_issues_repo_name": "luigi-borriello00/Metodi_SIUMerici", "max_issues_repo_head_hexsha": "cf1407c0ad432a49a96dcd08303213e48723c57a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "zeri_di_funzione/esercizi/es2.py", "max_forks_repo_name": "luigi-borriello00/Metodi_SIUMerici", "max_forks_repo_head_hexsha": "cf1407c0ad432a49a96dcd08303213e48723c57a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.92, "max_line_length": 91, "alphanum_fraction": 0.6042780749, "include": true, "reason": "import numpy,import sympy", "num_tokens": 299}
|
%!TEX root = ../thesis.tex
%*******************************************************************************
%*********************************** Seventh Chapter *****************************
%*******************************************************************************
\chapter{Centre Vortex Visualisations}\label{chapter:Visualisations}
\ifpdf
\graphicspath{{Chapter7/Figs/Raster/}{Chapter7/Figs/PDF/}{Chapter7/Figs/}}
\else
\graphicspath{{Chapter7/Figs/Vector/}{Chapter7/Figs/}}
\fi
\textit{A brief discussion of the work presented in this chapter will appear in the proceedings ``Visualizations of Centre Vortex Structure in Lattice Simulations'', }\citet{Biddle:2018a} \textit{and ``Publicizing Lattice Field Theory through Visualization'', }\citet{Biddle:2018b}.\\
\textit{The digital version of this chapter contains interactive 3D models embedded in the document. To interact with these models, it is necessary to open the document in Adobe Reader or Adobe Acrobat (requires version 9 or newer). Linux users should install Adobe acroread version 9.4.1, the last edition to have full 3D support. Note that 3D content must also be enabled for the interactive content to be available, and for proper rendering it is necessary to enable double-sided rendering in the preferences menu. To view the models, click on the figures marked as \textbf{Interactive} in the caption. To rotate the model, click and hold the left mouse button and move the mouse. Use the scroll wheel or shift-click to zoom. Some pre-set views of the model are also provided to highlight areas of interest. To reset the model back to its original orientation and zoom, press the ‘home’ icon in the toolbar or change the view to ‘Default view’.}\\
In previous chapters we have motivated the significance of centre vortices in QCD through the calculation of the gluon propagator. Although we can predict many of the properties of vortices through calculation, these properties can also be explored through visualisations of the lattice. To this end, in this chapter we present a novel visualisation technique that allows us to view thin centre vortices on the lattice through the use of 3D models~\footnote{All 3D models have been generated using Advanced Visual Systems (AVS) Express Visualisation Edition, version 8.4.1.}. These models allow for a highly interactive exploration of the vortex structure of the QCD vacuum.
\section{Spatially-Oriented Vortices}
As the lattice is a four-dimensional hypercube, we visualise the centre vortices on a set of 3D slices. The choice of dimension to take slices along is irrelevant in Euclidean space, so to maximise the volume of each slice we choose to take slices along the $x$-axis, resulting in $N_x$ slices each with dimensions $N_y\times N_z\times N_t$. However, the transition between slices is most intuitively thought of as `stepping though time', so we re-label our coordinates such that each slice is considered to be a snapshot at fixed $t$, with local coordinates $(x,y,z)$. Within each slice we can visualise all vortices associated with an $x-y$, $x-z$ or $y-z$ plaquette by calculating $P_{x\,y}(\bf{x})$, $P_{y\,z}(\bf{x})$ and $P_{z\,x}(\bf{x})$ for all $\bf{x}$ in the slice. These vortices will be referred to as the `spatially-oriented' vortices, as they are fixed in time. The plaquettes are evaluated on a centre projected configuration, so $P_{\mu\nu}\in \lbrace -1,\,0,\,+1\rbrace$. For a $+1$ vortex, a blue jet is plotted piercing the centre of the plaquette, and for a $-1$ vortex a red jet is plotted. The direction of the jet is set according to a right-hand rule, such that
\begin{itemize}[leftmargin=*,itemsep=0pt,labelsep=12pt]
\item $P_{x\,y}=\pm 1\implies \pm\hat{z}$ direction.
\item $P_{y\,z}=\pm 1\implies \pm\hat{x}$ direction.
\item $P_{x\,z}=\pm 1\implies \mp\hat{y}$ direction,
\end{itemize}
An example of this plotting convention is shown in Fig.~\ref{fig:SpacialVortices}.\\
%
\begin{figure}[ht]
\centering
\scalebox{1}{\input{Chapter7/Figs/SpaceVortices.tex}}
\caption[An example of the plotting convention for vortices located within a 3D time slice.]{\label{fig:SpacialVortices}An example of the plotting convention for vortices located within a 3D time slice. \textbf{Left:} A $+1$ vortex in the $+\hat{z}$ direction. \textbf{Right:} A $-1$ vortex in the $-\hat{z}$ direction.}
\end{figure}
%
\clearpage
The 3D slices for $t=1,2$ with the spatially-oriented vortices plotted appear as in Figs.~\ref{fig:PlaqT01}, \ref{fig:PlaqT02}. At first glance the vortex structure appears highly complex, and it is difficult to identify the significant features. As such, we make use of the 3D models to hone in and isolate the important features present in these slices. We present some of these features in Fig.~\ref{fig:VortexFeatures}.\\
%
\begin{figure}[htb!]
\centering
\begin{subfigure}[b]{0.3\textwidth}
\includegraphics[width=\textwidth]{./plaqt1_line.png}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.3\textwidth}
\includegraphics[width=\textwidth]{./plaqt1_loop.png}
\end{subfigure}\hfill
\begin{subfigure}[b]{0.3\textwidth}
\includegraphics[width=\textwidth]{./plaqt1_monopole.png}
\end{subfigure}
\caption[Examples of the properties of projected vortices.]{\label{fig:VortexFeatures} \textbf{Left:} Vortices form continuous lines, highlighted with orange arrows in this diagram. Note that because of the lattice periodicity, these lines may wrap around to the opposite edge of the lattice. \textbf{Middle:} Vortices must form closed loops to conserve the vortex flux. \textbf{Right:} $SU(3)$ vortices are capable of forming monopoles or branching points where three vortices emerge or converge at a single point.}
\end{figure}
%
\begin{figure}[!p]
\centering
\includemedia[
noplaybutton,
3Dtoolbar,
3Dmenu,
label=Plaq_CFG95_T01.u3d,
3Dviews=./Chapter7/Views/Views_Plaq_T01.vws,
3Dcoo = 10 10 20, %Centre of orbit, half lattice length in each direction
3Dc2c = 0 1 0, % Direction vector from camera to centre of orbit
3Droo = 50, % Radius of orbit, distance from camera to centre of orbit
3Droll = 270, % Roll about the camera vector in degrees
3Dlights=CAD,
width=\linewidth, % for 1330x970 from full screen capture
]{\includegraphics{Plaq_CFG95_T01.png}}{./Chapter7/U3D/Plaq_CFG95_T01.u3d}
%\includegraphics[width=\linewidth]{Plaq_CFG95_T01.png}
\caption{\label{fig:PlaqT01}The $t=1$ slice with all spatially-oriented vortices plotted. (\textbf{Interactive})}
\end{figure}
%
\begin{figure}
\includemedia[
noplaybutton,
3Dtoolbar,
3Dmenu,
label=Plaq_CFG95_T02.u3d,
3Dviews=./Chapter7/Views/Views_Plaq_T02.vws,
3Dcoo = 10 10 20, %Centre of orbit, half lattice length in each direction
3Dc2c = 0 1 0, % Direction vector from camera to centre of orbit
3Droo = 50, % Radius of orbit, distance from camera to centre of orbit
3Droll = 270, % Roll about the camera vector in degrees
3Dlights=CAD,
width=\linewidth, % for 1330x970 from full screen capture
]{\includegraphics{Plaq_CFG95_T02.png}}{./Chapter7/U3D/Plaq_CFG95_T02.u3d}
%\includegraphics[width=\linewidth]{Plaq_CFG95_T02.png}
\caption{\label{fig:PlaqT02}The $t=2$ slice with all spatially-oriented vortices plotted. (\textbf{Interactive})}
\end{figure}
%
It is an excellent sanity check to see that the vortices do indeed form closed lines, as they must to conserve the centre flux and satisfy the Bianchi identity~\cite{Engelhardt:2003wm,Spengler:2018dxt}. We also observe that the vortex loops tend to be large. This agrees with the observation made of $SU(2)$ vortices in Refs.~\cite{Engelhardt:1999fd,Bertle:1999tw} that below the critical deconfinement temperature, $T_C$, almost all vortices identified had the maximum possible extent. It will be the subject of future work to investigate whether as the temperature increases the $SU(3)$ vortex loops begin to shrink and cease to percolate, indicating a transition to the deconfining phase.\\
The presence of branching/monopole points is of particular interest, as previous studies have primarily focussed on $SU(2)$ theory which is free from these structures. This is because it is only in $SU(3)$ (or more generally, $SU(N)$ with $N>2$) that it is possible to conserve the centre flux at the intersection of 3 vortices, as shown in Fig.~\ref{fig:VortexBranching}. It is clear from our visualisations that these points occur frequently in the confining phase, in agreement with the findings of Ref.~\cite{Spengler:2018dxt}. The ambiguity between monopoles and branching points arises from the lack of definite orientation for the vortex line. With our plotting convention, each jet can be thought of as indicating directed flow of $+1$ centre charge. However, this is entirely equivalent to picturing $-1$ charge flowing in the direction opposite to the jet due to the fact that the $P_{\mu\nu} = P_{\nu\mu}^\dagger$, so calculating the plaquette with the opposite orientation will give the opposite charge. Furthermore, because $\exp(2\pi i/3) = \exp(-4\pi i/3)$, one unit of positive charge is equivalent to two units of negative charge (and vice-versa), and hence we could also interpret our models as representing the directed flow of two units of negative charge. This ambiguity is highlighted in Fig.~\ref{fig:VortexBranching}, where we see the equivalence between a branching point and a monopole. Although it may seem to be a drawback, the lack of definite orientation for vortices is also an important property of the vortex model, as it permits non-vanishing topological charge~\cite{Engelhardt:2010ft,Engelhardt:1999xw}. It is nevertheless important to keep in mind the presence of this ambiguity when visualising the vortex vacuum.
%
\begin{figure}[htb!]
\centering
\includegraphics[width=0.9\linewidth]{./VortexBranching.pdf}
\caption[Vortex branching and monopole points]{\label{fig:VortexBranching}\textbf{Left:} A vortex branching point. \textbf{Right:} A vortex monopole. The arrows indicate the direction of flow for the labelled charge. Note that for both figures, the vortex charge is conserved at the vertex. By considering the charge of the left-most vortex flowing in the opposite direction, these two diagrams can be interchanged.}
\end{figure}
%
\section{Spacetime-oriented Vortices}
For each link in a given 3D slice there are two additional plaquettes that lie in the $x_i - t$ plane, pointing in the positive and negative time directions. Vortices associated with spacetime-oriented plaquettes contain information about the way the line-like vortices evolve with time, or equivalently, how the vortex surfaces appear in four dimensions. In a given 3D slice we only have access to one link associated with a spacetime-oriented vortex, and as such we plot an arrow along this link to indicate its association with this vortex. We adopt the following plotting convention for these spacetime-oriented vortices:
\begin{itemize}[leftmargin=*,itemsep=0pt,labelsep=12pt]
\item \makebox[13em][l]{$+1$ vortex, forward in time} $\implies$ cyan arrow, positively oriented.
\item \makebox[13em][l]{$+1$ vortex, backward in time} $\implies$ cyan arrow, negatively oriented.
\item \makebox[13em][l]{$-1$ vortex, forward in time} $\implies$ orange arrow, positively oriented.
\item \makebox[13em][l]{$-1$ vortex, backward in time} $\implies$ orange arrow, negatively oriented.
\end{itemize}
An example of these conventions is shown in Fig.~\ref{fig:TimeVortices}. Utilising these conventions, we see that the first two time slices now appear as Figs.~\ref{fig:PlaqLinkT01}, \ref{fig:PlaqLinkT02}.\\
%
\begin{figure}[H]
\centering
\begin{subfigure}[t]{0.45\textwidth}
\centering
\input{Chapter7/Figs/p1TimeVortex.tex}
\end{subfigure}
\hfill
\begin{subfigure}[t]{0.45\textwidth}
\centering
\input{Chapter7/Figs/m1TimeVortex.tex}
\end{subfigure}
\caption[Example of the plotting convention used for spacetime-oriented vortices.]{\textbf{Left:} A $+1$ vortex in the forward $x-t$ plane (shaded blue) will be plotted as a cyan arrow in the $+\hat{x}$ direction. \textbf{Right:} A $-1$ vortex in the forward $x-t$ plane (shaded red) will be plotted as an orange arrow in the $+\hat{x}$ direction.}
\label{fig:TimeVortices}
\end{figure}
%
%\newpage
\begin{figure}[!p]
\centering
\includemedia[
noplaybutton,
3Dtoolbar,
3Dmenu,
label=PlaqLink_CFG95_T01.u3d,
3Dviews=./Chapter7/Views/Views_PlaqLink_T01.vws,
3Dcoo = 10 10 20, %Centre of orbit, half lattice length in each direction
3Dc2c = 0 1 0, % Direction vector from camera to centre of orbit
3Droo = 50, % Radius of orbit, distance from camera to centre of orbit
3Droll = 270, % Roll about the camera vector in degrees
3Dlights=CAD,
width=\linewidth, % for 1330x970 from full screen capture
]{\includegraphics{PlaqLink_CFG95_T01.png}}{./Chapter7/U3D/PlaqLink_CFG95_T01.u3d}
%\includegraphics[width=\linewidth]{PlaqLink_CFG95_T01.png}
\caption{\label{fig:PlaqLinkT01}The $t=1$ slice with all spatially-oriented and spacetime-oriented vortices plotted. (\textbf{Interactive})}
\end{figure}
\begin{figure}
\centering
\includemedia[
noplaybutton,
3Dtoolbar,
3Dmenu,
label=PlaqLink_CFG95_T02.u3d,
3Dviews=./Chapter7/Views/Views_PlaqLink_T02.vws,
3Dcoo = 10 10 20, %Centre of orbit, half lattice length in each direction
3Dc2c = 0 1 0, % Direction vector from camera to centre of orbit
3Droo = 50, % Radius of orbit, distance from camera to centre of orbit
3Droll = 270, % Roll about the camera vector in degrees
3Dlights=CAD,
width=\linewidth, % for 1330x970 from full screen capture
]{\includegraphics{PlaqLink_CFG95_T02.png}}{./Chapter7/U3D/PlaqLink_CFG95_T02.u3d}
%\includegraphics[width=\linewidth]{PlaqLink_CFG95_T02.png}
\caption{\label{fig:PlaqLinkT02}The $t=2$ slice with all spatially-oriented and spacetime-oriented vortices plotted. (\textbf{Interactive})}
\end{figure}
%
%\newpage
As we step through time, we expect to see the positively oriented vortices retain their colour but swap direction as they transition from being forwards in time to backwards in time, as shown in Fig.~\ref{fig:VortexArrows}. The spacetime-oriented vortices act as predictors of vortex motion between slices. To see this, consider Fig.~\ref{fig:VortexMotion}. In Fig.~\ref{fig:VortexMotion1}, we observe a line of four $-1$ (red) spatially-oriented vortices with no spacetime-oriented links associated with them, indicating that this line should remain fixed as we step through time. Alternatively, towards the top of the red line we observe a branching point with two associated $-1$ spacetime-oriented arrows, indicating that this branching point should move in the direction of the spacetime-oriented vortices. Observing the same region at $t=2$ in Fig.~\ref{fig:VortexMotion2}, we see that this is precisely what occurs. The vortex line has remained fixed, whereas the branching point has shifted one lattice spacing to the left, in accordance with the direction indicated by the spacetime-oriented vortex.\\
%
\begin{figure}[htb!]
\centering
\begin{subfigure}[b]{0.45\textwidth}
\centering
\includegraphics[height=0.2\textheight]{./plaqlinet1_forwardarrows.png}
\subcaption{\label{fig:VortexArrows1}$t=1$}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.45\textwidth}
\centering
\includegraphics[height=0.2\textheight]{./plaqlinet2_backwardarrows.png}
\caption{\label{fig:VortexArrows2}$t=2$}
\end{subfigure}
\caption[The change in spacetime-oriented vortices as we step through time.]{\label{fig:VortexArrows}The change in spacetime-oriented vortices as we step through time. We observe the spacetime-oriented arrows change direction, however the phase (colour) of the vortex remains the same.}
\end{figure}
%
\begin{figure}[H]
\centering
\begin{subfigure}[b]{0.45\textwidth}
\centering
\includegraphics[height=0.3\textheight]{./plaqlinet1_line&monopole.png}
\subcaption{\label{fig:VortexMotion1}$t=1$}
\end{subfigure}
\hspace{-1cm}
\begin{subfigure}[b]{0.45\textwidth}
\centering
\includegraphics[height=0.3\textheight]{./plaqlinet2_line&monopole.png}
\caption{\label{fig:VortexMotion2}$t=2$}
\end{subfigure}
\caption[An example of spacetime-oriented vortices predicting the motion of the spatially-oriented vortices.]{\label{fig:VortexMotion}An example of spacetime-oriented vortices predicting the motion of the spatially-oriented vortices. We observe the $-1$ (red) vortex line with no associated spacetime-oriented vortices remain stationary as we transition from $t=1$ to $t=2$.}
\end{figure}
%
Another example of spacetime-oriented vortices predicting the motion of vortices is shown in Fig.~\ref{fig:VortexLineMotion}. Here we see in Fig.~\ref{fig:VortexLineMotion1} a line of three $+1$ spatially-oriented vortices each with an associated $-1$ spacetime-oriented vortex below them. As we step to $t=2$ in Fig.~\ref{fig:VortexLineMotion2} we observe the spacetime-oriented arrows change direction as expected, and the spatially-oriented vortex line shifts one lattice spacing down such that the spacetime-oriented vortices are now above them.\\
%
\begin{figure}[H]
\centering
\begin{subfigure}[b]{0.45\textwidth}
\centering
\includegraphics[height=0.2\textheight]{./plaqlinet1_SW8_line.png}
\subcaption{\label{fig:VortexLineMotion1}$t=1$}
\end{subfigure}
\hspace{-0.5cm}
\begin{subfigure}[b]{0.45\textwidth}
\centering
\includegraphics[height=0.2\textheight]{./plaqlinet2_SW8_line.png}
\subcaption{\label{fig:VortexLineMotion2}$t=2$}
\end{subfigure}
\caption[A second example of spacetime-oriented vortices predicting the motion of the spatially-oriented vortices.]{\label{fig:VortexLineMotion}A second example of spacetime-oriented vortices predicting the motion of the spatially-oriented vortices. Here we see the $+1$ (blue) vortex line transition one lattice spacing down as we step from $t=1$ to $t=2$.}
\end{figure}
%
The cases presented in Fig.~\ref{fig:VortexMotion} and Fig.~\ref{fig:VortexLineMotion} are ideal, where the spatially-oriented vortex shifts only one lattice spacing between time slices. However, it is frequently the case where the spatially-oriented vortices shift multiple lattice spacings per time step. To see how this occurs diagrammatically, consider Fig.~\ref{fig:ComplexStructure}. The shaded red plaquettes indicate the location of a spatially-oriented vortex which would be plotted in the suppressed $\hat{x}$ direction. The red line demonstrates how the centre charge pierces between the two time slices. Within each slice we would observe the spacetime-oriented links shown, however the spatially-oriented vortex appears to move three plaquettes in one time step. These multiple transitions make it harder to track the motion of vortices between time slices; nevertheless, the spacetime-oriented vortices are a useful tool for understanding how centre vortices evolve with time. It is worth making clear that if a spatially-oriented vortex has no associated spacetime-oriented vortices then it is guaranteed to remain stationary. In this respect, the lack of spacetime-oriented vortices is a clear and valuable indicator of vortex behaviour.
%
\begin{figure}[H]
\centering
\scalebox{1}{
\input{Chapter7/Figs/VortexLineDiagram.tex}}
\caption{\label{fig:ComplexStructure}A demonstration of how spatially-oriented vortices can transition multiple lattice spacings in a single time step.}
\end{figure}
\section{Topological Charge}\label{sec:TopChargeVis}
We now wish to observe the relationship between vortices and topological charge. As stated in Sec.~\ref{sec:TopQ}, the topological charge density is given by
%
\begin{equation}
q ( x ) = \frac { 1 } { 32 \pi ^ { 2 } } \, \epsilon _ { \mu \nu \rho \sigma } \, \Tr\left( F _ { \mu \nu }( x ) \, F _ { \rho \sigma }(x) \right) \, .
\end{equation}
%
Given the presence of the antisymmetric tensor, it is clear that for there to be non-trivial topological charge present on the projected vortex configurations, we require that the tangent vectors of the vortex surface span all four dimensions. This condition is met at so-called \textit{singular points}. The contribution to the topological charge from these singular points is discussed in detail in Refs.~\cite{Bruckmann:2003yd,Engelhardt:2010ft,Engelhardt:2000wc,Engelhardt:1999xw}. In our visualisations, these singular points appear as a spatially-oriented vortex running parallel to a spacetime-oriented vortex, as shown in Fig.~\ref{fig:SingularPoint}.\\
%
\begin{figure}[H]
\centering
\scalebox{0.85}{\input{Chapter7/Figs/SingularPoint.tex}}
\caption[The signature of a singular point at the origin. Here the vortex surface spans all four directions.]{\label{fig:SingularPoint} The signature of a singular point, in which the vortex surface spans all four directions. The colour and orientation of the vortices is irrelevant, so long as they are parallel.}
\end{figure}
%
We calculate the topological charge via the method outlined in Sec.~\ref{sec:TopQ} on a lattice configuration after eight sweeps of cooling. This cooling is necessary to remove short-range fluctuations, but is a sufficiently low number of sweeps so as to minimally perturb the configuration. We plot regions of positive topological charge in red, and regions of negative topological charge in blue, with a colour gradient to indicate the magnitude. Only topological charge of sufficient magnitude is plotted to better emphasise regions of significant topological charge. Overlaying the topological charge visualisation onto Figs.~\ref{fig:PlaqLinkT01}, \ref{fig:PlaqLinkT02}, we obtain Figs.~\ref{fig:PlaqLinkQT01}, \ref{fig:PlaqLinkQT02}. By studying the regions of high topological charge, we note that we can indeed observe their relationship with singular points, as shown in Fig.~\ref{fig:SingularPointVis}.\\
%
\begin{figure}[H]
\centering
\begin{subfigure}[b]{0.45\textwidth}
\centering
\includegraphics[height=0.2\textheight]{./PlaqLinkQt2_SingularCollection.png}
\end{subfigure}
\hfill
\begin{subfigure}[b]{0.5\textwidth}
\centering
\includegraphics[height=0.2\textheight]{./PlaqLinkt2_SingularCollection.png}
\end{subfigure}
\caption[A collection of singular points shown with and without topological charge overlaid.]{\label{fig:SingularPointVis}A collection of singular points shown with (left) and without (right) topological charge overlaid.}
\end{figure}
%
\begin{figure}[!p]
\centering
\includemedia[
noplaybutton,
3Dtoolbar,
3Dmenu,
label=PlaqLinkQ_CFG95_T01.u3d,
3Dviews=./Chapter7/Views/Views_PlaqLinkQ_T01.vws,
3Dcoo = 10 10 20, %Centre of orbit, half lattice length in each direction
3Dc2c = 0 1 0, % Direction vector from camera to centre of orbit
3Droo = 50, % Radius of orbit, distance from camera to centre of orbit
3Droll = 270, % Roll about the camera vector in degrees
3Dlights=CAD,
width=\linewidth, % for 1330x970 from full screen capture
]{\includegraphics{PlaqLinkTopQ_CFG95_T01.png}}{./Chapter7/U3D/PlaqLinkTopQ_CFG95_T01.u3d}
%\includegraphics[width=\linewidth]{./PlaqLinkTopQ_CFG95_T01.png}
\caption{\label{fig:PlaqLinkQT01}Topological charge density overlaying the $t=1$ slice. (\textbf{Interactive})}
\end{figure}
%
\begin{figure}
\centering
\includemedia[
noplaybutton,
3Dtoolbar,
3Dmenu,
label=PlaqLinkQ_CFG95_T02.u3d,
3Dviews=./Chapter7/Views/Views_PlaqLinkQ_T02.vws,
3Dcoo = 10 10 20, %Centre of orbit, half lattice length in each direction
3Dc2c = 0 1 0, % Direction vector from camera to centre of orbit
3Droo = 50, % Radius of orbit, distance from camera to centre of orbit
3Droll = 270, % Roll about the camera vector in degrees
3Dlights=CAD,
width=\linewidth, % for 1330x970 from full screen capture
]{\includegraphics{PlaqLinkTopQ_CFG95_T02.png}}{./Chapter7/U3D/PlaqLinkTopQ_CFG95_T02.u3d}
%\includegraphics[width=\linewidth]{./PlaqLinkTopQ_CFG95_T02.png}
\caption{\label{fig:PlaqLinkQT02}Topological charge density overlaying the $t=2$ slice. (\textbf{Interactive})}
\end{figure}
To quantify the correlation between vortex locations and topological charge, we use the following measure
%
\begin{equation}
C = V\frac{\sum_x |q(x)|\,L(x)}{\sum_x |q(x)|\,\sum_x L(x)}\, ,
\label{eq:TopQCorrelation}
\end{equation}
%
where $V$ is the lattice volume, and
%
\begin{equation}
L(x) =
\begin{cases}
1\, , & \text{Vortex associated with any plaquette touching $x$}\\
0\, , & \text{No vortex associated with any plaquette touching $x$}\, .
\end{cases}
\end{equation}
%
In the case of no correlation, $C=1$. If there is correlation or anti-correlation, then $C>1$ or $C<1$ respectively. The value of $C$ for our configurations is shown in Fig.~\ref{fig:Correlation}, with an average over all 100 configurations of $\bar{C} = 1.46$. This indicates a correlation between vortex locations and regions of high topological charge. Further work to investigate the precise nature of this correlation and its relationship to singular points is planned.\\
%
\begin{figure}[H]
\centering
\includegraphics[width=0.8\linewidth]{./Correlation.pdf}
\caption[The correlation measure for each configuration.]{\label{fig:Correlation} The correlation measure for each configuration. The dashed line indicates the average value across all 100 configurations, with one standard deviation of the distribution shown in green.}
\end{figure}
%
Finally, it is also interesting to study the vortex structure of the lattice after cooling. After eight sweeps of cooling, we obtain Fig.~\ref{fig:PlaqLinkTopQ_SW8}. We clearly see that the complexity of the vortex structure has been greatly reduced, however the regions associated with topological charge have been less affected by the smoothing process. We can see this by recalculating the topological charge correlation (see Eq.~\eqref{eq:TopQCorrelation}), shown in Fig.~\ref{fig:Correlation_sw07}. The average correlation has increased to a new average of $\bar{C}=1.76$, indicating that the residual vortices show a stronger correlation to the regions of high topological charge than those on the un-cooled configurations. This further supports the previously mentioned notion that cooling serves to isolate `genuine' topological objects and filter out those arising from fluctuations during the Monte-Carlo lattice generation process.
%
\begin{figure}[H]
\centering
\includemedia[
noplaybutton,
3Dtoolbar,
3Dmenu,
label=PlaqLinkQ_CFG95_T01_8SW.u3d,
3Dviews=./Chapter7/Views/Views_PlaqLinkQ_T01_8SW.vws,
3Dcoo = 10 10 20, %Centre of orbit, half lattice length in each direction
3Dc2c = 0 1 0, % Direction vector from camera to centre of orbit
3Droo = 50, % Radius of orbit, distance from camera to centre of orbit
3Droll = 270, % Roll about the camera vector in degrees
3Dlights=CAD,
width=0.85\linewidth, % for 1330x970 from full screen capture
]{\includegraphics{PlaqLinkTopQ_CFG95_T01_8SW.png}}{./Chapter7/U3D/PlaqLinkTopQ_CFG95_T01_8SW.u3d}
%\includegraphics[width=\linewidth]{./PlaqLinkTopQ_CFG95_T01_8SW.png}
\caption{\label{fig:PlaqLinkTopQ_SW8}The vortex structure and topological charge after eight sweeps of cooling, for $t=1$. (\textbf{Interactive})}
\end{figure}
%
\begin{figure}[H]
\centering
\includegraphics[width=0.75\linewidth]{./Correlation_sw07.pdf}
\caption[The correlation measure for each configuration after eight sweeps of cooling.]{\label{fig:Correlation_sw07}The correlation measure for each configuration after eight sweeps of cooling. The dashed line indicates the average value across all 100 configurations, with one standard deviation of the distribution shown in green.}
\end{figure}
\section{Summary}
In this chapter we have presented a new way to visualise the four-dimensional structure of centre vortices on the lattice through the use of 3D visualisation techniques. These visualisations give new insight into the geometry and time-evolution of vortices, as well as revealing a direct connection to topological charge. The work presented here confirms the qualitative expectations of the centre vortex model. In future it will be valuable to explore the Gribov issue in vortex identification and the sensitivity of our visualisations to the Gribov copy problem. Quantitative studies of branching/monopole points and singular points are also of interest. Studying the change in size of centre vortex loops as the temperature tends towards the deconfining phase would be a worthwhile line of investigation as well. From this work, it is clear that visualisations of centre vortices provide valuable information about the structure of the QCD vacuum that is otherwise not apparent through purely numerical results, and that visualisations elegantly complement the exploration of vortex models.
|
{"hexsha": "9069de8d438a718350206f84e5bc61cde672a384", "size": 28705, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Chapter7/chapter7.tex", "max_stars_repo_name": "jamesbiddle/Masters_Thesis", "max_stars_repo_head_hexsha": "275177c3167b490d678575f0078cc6c87614b7bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Chapter7/chapter7.tex", "max_issues_repo_name": "jamesbiddle/Masters_Thesis", "max_issues_repo_head_hexsha": "275177c3167b490d678575f0078cc6c87614b7bb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Chapter7/chapter7.tex", "max_forks_repo_name": "jamesbiddle/Masters_Thesis", "max_forks_repo_head_hexsha": "275177c3167b490d678575f0078cc6c87614b7bb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 79.2955801105, "max_line_length": 1752, "alphanum_fraction": 0.7707716426, "num_tokens": 7925}
|
# -*- coding: utf-8 -*-
"""
Implements evaluating the tight-binding model by checking the distance
between its orbitals and the atomic positions.
"""
import tbmodels
import numpy as np
from aiida import orm
from aiida.engine import calcfunction, run_get_node
from aiida.engine.processes import ExitCode
from ._base import ModelEvaluationBase
@calcfunction
def get_max_distance(tb_model, structure):
"""
Get the maximum cartesian distance between model orbitals and the
nearest atom.
"""
with tb_model.open(mode='rb') as input_file:
model = tbmodels.io.load(input_file)
reference_structure_pmg = structure.get_pymatgen()
if not np.allclose(model.uc, reference_structure_pmg.lattice.matrix):
return ExitCode(
300,
"The model and reference structure unit cells do not match.",
invalidates_cache=False
)
dist_per_orbital = np.min(
reference_structure_pmg.lattice.get_all_distances(
model.pos, reference_structure_pmg.frac_coords
),
axis=-1
)
max_dist = np.max(dist_per_orbital)
return orm.Float(max_dist)
class MaximumOrbitalDistanceEvaluation(ModelEvaluationBase):
"""
Evaluate the maximum distance between model orbitals and crystal
atoms.
"""
@classmethod
def define(cls, spec):
super().define(spec)
spec.outline(cls.run_evaluation)
def run_evaluation(self):
"""Run the calcfunction to get the maximum distance.
"""
res, node = run_get_node(
get_max_distance,
tb_model=self.inputs.tb_model,
structure=self.inputs.reference_structure
)
# Propagate exit code
if not node.is_finished_ok:
return ExitCode(node.exit_status, node.exit_message)
self.out('cost_value', res)
|
{"hexsha": "8459c9856b050ded7ad92849b089d04dad932187", "size": 1866, "ext": "py", "lang": "Python", "max_stars_repo_path": "aiida_tbextraction/model_evaluation/_pos_distance.py", "max_stars_repo_name": "greschd/aiida-tbextraction", "max_stars_repo_head_hexsha": "6b51cd6fce8feaea6c7a9235a49073a2500eead3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-06-26T03:02:13.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-30T09:26:12.000Z", "max_issues_repo_path": "aiida_tbextraction/model_evaluation/_pos_distance.py", "max_issues_repo_name": "greschd/aiida-tbextraction", "max_issues_repo_head_hexsha": "6b51cd6fce8feaea6c7a9235a49073a2500eead3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2019-09-26T20:17:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-04T18:53:54.000Z", "max_forks_repo_path": "aiida_tbextraction/model_evaluation/_pos_distance.py", "max_forks_repo_name": "greschd/aiida-tbextraction", "max_forks_repo_head_hexsha": "6b51cd6fce8feaea6c7a9235a49073a2500eead3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2018-07-19T09:13:48.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-13T19:57:21.000Z", "avg_line_length": 27.0434782609, "max_line_length": 73, "alphanum_fraction": 0.6752411576, "include": true, "reason": "import numpy", "num_tokens": 396}
|
module TestPkg
using FilePathsBase
import Base: ==
__init__() = FilePathsBase.register(TestPath)
# Warning: We only expect this test to work on posix systems.
struct TestPath <: AbstractPath
segments::Tuple{Vararg{String}}
root::String
drive::String
separator::String
end
TestPath() = TestPath(tuple(), "", "test:", ";")
function TestPath(str::AbstractString)
str = String(str)
@assert startswith(str, "test:")
drive = "test:"
root = ""
str = str[6:end]
if isempty(str)
return TestPath(tuple("."), "", drive)
end
tokenized = split(str, ";")
if isempty(tokenized[1])
root = ";"
end
return TestPath(tuple(map(String, filter!(!isempty, tokenized))...), root, drive, ";")
end
FilePathsBase.ispathtype(::Type{TestPath}, str::AbstractString) = startswith(str, "test:")
function test2posix(fp::TestPath)
return PosixPath(fp.segments, isempty(fp.root) ? "" : "/")
end
function posix2test(fp::PosixPath)
return TestPath(fp.segments, isempty(fp.root) ? "" : ";", "test:", ";")
end
# We're going to implement most of the posix API, but this won't make sense for many path types
FilePathsBase.exists(fp::TestPath) = exists(test2posix(fp))
Base.real(fp::TestPath) = posix2test(real(test2posix(fp)))
FilePathsBase.stat(fp::TestPath) = stat(test2posix(fp))
FilePathsBase.lstat(fp::TestPath) = lstat(test2posix(fp))
FilePathsBase.mode(fp::TestPath) = stat(fp).mode
Base.size(fp::TestPath) = stat(fp).size
FilePathsBase.created(fp::TestPath) = stat(fp).ctime
FilePathsBase.modified(fp::TestPath) = stat(fp).mtime
FilePathsBase.isdir(fp::TestPath) = isdir(mode(fp))
Base.isfile(fp::TestPath) = isfile(mode(fp))
Base.islink(fp::TestPath) = islink(lstat(fp).mode)
Base.issocket(fp::TestPath) = issocket(mode(fp))
Base.isfifo(fp::TestPath) = issocket(mode(fp))
Base.ischardev(fp::TestPath) = ischardev(mode(fp))
Base.isblockdev(fp::TestPath) = isblockdev(mode(fp))
Base.ismount(fp::TestPath) = ismount(test2posix(fp))
FilePathsBase.isexecutable(fp::TestPath) = isexecutable(test2posix(fp))
Base.iswritable(fp::TestPath) = iswritable(test2posix(fp))
Base.isreadable(fp::TestPath) = isreadable(test2posix(fp))
Base.cd(fp::TestPath) = cd(test2posix(fp))
Base.cd(f::Function, fp::TestPath) = cd(f, test2posix(fp))
Base.mkdir(fp::TestPath; kwargs...) = mkdir(test2posix(fp); kwargs...)
Base.symlink(src::TestPath, dest::TestPath; kwargs...) = symlink(test2posix(src), test2posix(dest); kwargs...)
Base.rm(fp::TestPath; kwargs...) = rm(test2posix(fp); kwargs...)
Base.readdir(fp::TestPath) = readdir(test2posix(fp))
Base.read(fp::TestPath) = read(test2posix(fp))
Base.write(fp::TestPath, x) = write(test2posix(fp), x)
Base.chown(fp::TestPath, args...; kwargs...) = chown(test2posix(fp), args...; kwargs...)
Base.chmod(fp::TestPath, args...; kwargs...) = chmod(test2posix(fp), args...; kwargs...)
end
|
{"hexsha": "c10463ebf3a1ad26c6d3ab831843ead09cfb89e5", "size": 2872, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/testpkg.jl", "max_stars_repo_name": "UnofficialJuliaMirror/FilePathsBase.jl-48062228-2e41-5def-b9a4-89aafe57970f", "max_stars_repo_head_hexsha": "9ea8d7cb5e638a386cf41b042875569620302d32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/testpkg.jl", "max_issues_repo_name": "UnofficialJuliaMirror/FilePathsBase.jl-48062228-2e41-5def-b9a4-89aafe57970f", "max_issues_repo_head_hexsha": "9ea8d7cb5e638a386cf41b042875569620302d32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/testpkg.jl", "max_forks_repo_name": "UnofficialJuliaMirror/FilePathsBase.jl-48062228-2e41-5def-b9a4-89aafe57970f", "max_forks_repo_head_hexsha": "9ea8d7cb5e638a386cf41b042875569620302d32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4567901235, "max_line_length": 110, "alphanum_fraction": 0.6963788301, "num_tokens": 807}
|
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
N = 2 # Size of minibatch
H = 3 # Number of dimension of hidden vec
T = 20 # Length of time data
dh = np.ones((N, H))
np.random.seed(3) # Set seed of random number due to reproducibility
# Wh = np.random.randn(H, H)
Wh = np.random.randn(H, H) * 0.5
norm_list = []
for t in range(T):
dh = np.dot(dh, Wh.T)
norm = np.sqrt(np.sum(dh**2)) / N
norm_list.append(norm)
print(norm_list)
# グラフの描画
plt.plot(np.arange(len(norm_list)), norm_list)
plt.xlabel('time step')
plt.ylabel('norm')
plt.savefig('graph.png')
|
{"hexsha": "1c8b83ebeabd5fbd4651eefd165761b54641f711", "size": 649, "ext": "py", "lang": "Python", "max_stars_repo_path": "ch06/rnn_gradient_graph.py", "max_stars_repo_name": "YaGiNA/DLfS2", "max_stars_repo_head_hexsha": "3dbaba7a62c198b50849de2e3b74d92897a4cae7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-05-15T09:17:23.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-15T09:17:23.000Z", "max_issues_repo_path": "ch06/rnn_gradient_graph.py", "max_issues_repo_name": "YaGiNA/DLfS2", "max_issues_repo_head_hexsha": "3dbaba7a62c198b50849de2e3b74d92897a4cae7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ch06/rnn_gradient_graph.py", "max_forks_repo_name": "YaGiNA/DLfS2", "max_forks_repo_head_hexsha": "3dbaba7a62c198b50849de2e3b74d92897a4cae7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.6333333333, "max_line_length": 71, "alphanum_fraction": 0.6363636364, "include": true, "reason": "import numpy", "num_tokens": 194}
|
from typing import Optional
import math
import numpy as np
from banditpylib.arms import PseudoArm
from banditpylib.data_pb2 import Context, Actions, Feedback
from .utils import MABLearner
class Softmax(MABLearner):
r"""Softmax policy
At time :math:`t`, sample arm :math:`i` to play with sampling weight
.. math::
\exp\left( \bar{\mu}_i(t) / \gamma \right)
where :math:`\gamma` is a parameter to control how much exploration we want.
:param int arm_num: number of arms
:param float gamma: gamma
:param Optional[str] name: alias name
.. note::
When :math:`\gamma` approaches 0, the learner will have an increasing
probability to select the arm with the maximum empirical mean rewards. When
:math:`\gamma` approaches to infinity, the policy of the learner tends to
become uniform sampling.
"""
def __init__(self,
arm_num: int,
gamma: float = 1.0,
name: Optional[str] = None):
super().__init__(arm_num=arm_num, name=name)
if gamma <= 0:
raise ValueError('Gamma is expected greater than 0. Got %.2f.' % gamma)
self.__gamma = gamma
def _name(self) -> str:
return 'softmax'
def reset(self):
self.__pseudo_arms = [PseudoArm() for arm_id in range(self.arm_num)]
# Current time step
self.__time = 1
def actions(self, context: Context) -> Actions:
del context
actions = Actions()
arm_pull = actions.arm_pulls.add()
if self.__time <= self.arm_num:
arm_pull.arm.id = self.__time - 1
else:
weights = np.array([
math.exp(self.__pseudo_arms[arm_id].em_mean / self.__gamma)
for arm_id in range(self.arm_num)
])
arm_pull.arm.id = np.random.choice(
self.arm_num, 1, p=[weight / sum(weights) for weight in weights])[0]
arm_pull.times = 1
return actions
def update(self, feedback: Feedback):
arm_feedback = feedback.arm_feedbacks[0]
self.__pseudo_arms[arm_feedback.arm.id].update(
np.array(arm_feedback.rewards))
self.__time += 1
|
{"hexsha": "08abf3ff4ed3e871bb926c63f79397fcd80bb599", "size": 2055, "ext": "py", "lang": "Python", "max_stars_repo_path": "banditpylib/learners/mab_learner/softmax.py", "max_stars_repo_name": "Alanthink/banditpylib", "max_stars_repo_head_hexsha": "ba6dc84d87ae9e9aec48cd622ec9988dccdd18c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2020-02-05T23:53:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-16T21:06:16.000Z", "max_issues_repo_path": "banditpylib/learners/mab_learner/softmax.py", "max_issues_repo_name": "sheelfshah/banditpylib", "max_issues_repo_head_hexsha": "d455424ed74be1850ee3969b7b31f08d49339005", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-02-06T00:23:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-06T16:37:10.000Z", "max_forks_repo_path": "banditpylib/learners/mab_learner/softmax.py", "max_forks_repo_name": "sheelfshah/banditpylib", "max_forks_repo_head_hexsha": "d455424ed74be1850ee3969b7b31f08d49339005", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2020-02-06T00:05:10.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-18T17:24:29.000Z", "avg_line_length": 28.1506849315, "max_line_length": 79, "alphanum_fraction": 0.6622871046, "include": true, "reason": "import numpy", "num_tokens": 545}
|
import numpy as np
import cv2
from pyzbar.pyzbar import decode
def checkQR(img):
qrList = []
for qrcode in decode(img):
data = qrcode.data.decode('utf-8')
qrList.append(data)
if len(qrList)>0:
return True,qrList
else:
return False,qrList
img = cv2.imread("re4.jpg")
# Here I added a sharpenening effect to better detect the qr code
kernel = np.array([[-1, -1, -1],[-1, 8, -1],[-1, -1, 0]], np.float32)
kernel = 1/2 * kernel
sharp = cv2.filter2D(img, -1, kernel)
print(checkQR(img))
|
{"hexsha": "c7bd3591b247e9573f0dff2d784619fba82d2c97", "size": 581, "ext": "py", "lang": "Python", "max_stars_repo_path": "QR_code/save/QR _function.py", "max_stars_repo_name": "RobEn-AAST/AI-UAVC", "max_stars_repo_head_hexsha": "732683fd5821d492b772cc5f966e86aed164a68c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2022-02-05T15:51:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T17:38:54.000Z", "max_issues_repo_path": "QR_code/save/QR _function.py", "max_issues_repo_name": "RobEn-AAST/AI-UAVC", "max_issues_repo_head_hexsha": "732683fd5821d492b772cc5f966e86aed164a68c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "QR_code/save/QR _function.py", "max_forks_repo_name": "RobEn-AAST/AI-UAVC", "max_forks_repo_head_hexsha": "732683fd5821d492b772cc5f966e86aed164a68c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.6060606061, "max_line_length": 71, "alphanum_fraction": 0.578313253, "include": true, "reason": "import numpy", "num_tokens": 172}
|
## characters defined by actions/homomorphisms
function _action_class_fun(
conjugacy_cls::AbstractVector{CCl},
) where {CCl <: AbstractOrbit{<:PermutationGroups.AbstractPerm}}
vals = Int[PermutationGroups.nfixedpoints(first(cc)) for cc in conjugacy_cls]
# in general:
# vals = [tr(matrix_representative(first(cc))) for cc in conjugacy_cls]
return Characters.ClassFunction(vals, conjugacy_cls)
end
function _action_class_fun(
conjugacy_cls::AbstractVector{CCl},
) where {CCl <: AbstractOrbit{<:AbstractMatrix}}
vals = [tr(first(cc)) for cc in conjugacy_cls]
return Characters.ClassFunction(vals, conjugacy_cls)
end
function _action_class_fun(
hom::InducedActionHomomorphism{<:ByPermutations},
conjugacy_cls
)
vals = Int[PermutationGroups.nfixedpoints(induce(hom, first(cc))) for cc in conjugacy_cls]
# in general:
# vals = [tr(matrix_representative(first(cc))) for cc in conjugacy_cls]
return Characters.ClassFunction(vals, conjugacy_cls)
end
function _action_class_fun(
hom::InducedActionHomomorphism{<:ByLinearTransformation},
conjugacy_cls,
)
vals = [tr(induce(hom, first(cc))) for cc in conjugacy_cls]
return Characters.ClassFunction(vals, conjugacy_cls)
end
"""
action_character([::Type{T}, ]conjugacy_cls, tbl::CharacterTable)
Return the character of the representation given by the elements in the
conjugacy classes `conjugacy_cls`.
This corresponds to the classical definition of characters as a traces of the
representation matrices.
"""
action_character(conjugacy_clss, tbl::CharacterTable) =
action_character(eltype(tbl), conjugacy_clss, tbl)
function action_character(::Type{T}, conjugacy_clss, tbl::CharacterTable) where T
ac_char = _action_class_fun(conjugacy_clss)
constituents = Int[dot(ac_char, χ) for χ in irreducible_characters(tbl)]
return Character{T}(tbl, constituents)
end
function action_character(::Type{T}, conjugacy_cls, tbl::CharacterTable) where T <: Union{AbstractFloat, ComplexF64}
ac_char = _action_class_fun(conjugacy_cls)
constituents = [dot(ac_char, χ) for χ in irreducible_characters(tbl)]
all(constituents) do c
ac = abs(c)
abs(ac - round(ac)) < eps(real(T))*length(conjugacy_cls)
end
return Character{T}(tbl, round.(Int, abs.(constituents)))
end
"""
action_character([::Type{T}, ]hom::InducedActionHomomorphism, tbl::CharacterTable)
Return the character of the representation given by the images under `hom` of
elements in `conjugacy_classes(tbl)`.
This corresponds to the classical definition of characters as a traces of the
representation matrices.
"""
action_character(hom::InducedActionHomomorphism, tbl::CharacterTable) =
action_character(eltype(tbl), hom, tbl)
function action_character(
::Type{T},
hom::InducedActionHomomorphism,
tbl::CharacterTable
) where T
ac_char = _action_class_fun(hom, conjugacy_classes(tbl))
constituents = Int[dot(ac_char, χ) for χ in irreducible_characters(T, tbl)]
return Character{T}(tbl, constituents)
end
|
{"hexsha": "545648b6387cba31de45a9596657802a7fc440c0", "size": 3054, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/action_characters.jl", "max_stars_repo_name": "thinh-le/SymbolicWedderburn.jl", "max_stars_repo_head_hexsha": "fe363d2e269602dd487d9f33665141e0cbdfc87b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/action_characters.jl", "max_issues_repo_name": "thinh-le/SymbolicWedderburn.jl", "max_issues_repo_head_hexsha": "fe363d2e269602dd487d9f33665141e0cbdfc87b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/action_characters.jl", "max_forks_repo_name": "thinh-le/SymbolicWedderburn.jl", "max_forks_repo_head_hexsha": "fe363d2e269602dd487d9f33665141e0cbdfc87b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.511627907, "max_line_length": 116, "alphanum_fraction": 0.7521283563, "num_tokens": 765}
|
import tensorflow as tf
import numpy as np
import scipy.misc
def normalization(data):
"""
normalized the input data
:param data: input
:return: normalized data
"""
_range = np.max(data) - np.min(data)
return (data - np.min(data)) / _range
def img255_normalization(img):
"""
Standard img normalization function in the project
:param img: input img
:return: normalized img
"""
n_img = (img / 255)
return n_img
def UnMasked_Img_layer(input):
"""
Keras Lambda layer for mask process
:param input: Raw output tensor from the Generator
:return: Unmasked output tensor
"""
Img = input[0]
Mask = input[1]
Mask = tf.concat([Mask, Mask, Mask], axis=-1)
return tf.multiply(Img, Mask)
def Mask_Img_layer(input):
"""
Keras Lambda layer for mask process
:param input: Raw output tensor from the Generator
:return: Masked output tensor
"""
Img = input[0]
Mask = input[1]
sMask = tf.subtract(tf.constant(1., shape=Mask.get_shape().as_list()), Mask)
sMask = tf.concat([sMask, sMask, sMask], axis=-1)
return tf.multiply(Img, sMask)
def Masked_Img(Imgs, Masks):
"""
Generate the masked img
:param Img: input imgs
:param Masks: masks
:return: masked imgs
"""
# keep axis[-1]=3
mask=Masks[0,:,:,:]
Output_Imgs=[]
for img in Imgs:
Output_Imgs.append(img*(1-mask)+mask)
return np.array(Output_Imgs,dtype=float)
def Img2Img_with_mask(Img, Masks):
"""
Covert img to img_with_mask
:param Img: input imgs
:param Masks: input masks
:return: imgs with masks
"""
# convert axis[-1]=3 to axis[-1]=4
return np.concatenate([Img, Masks], axis=-1)
def Pyramidal_Img_Resize(Batch_Img, pyramidal_img_size=[256, 128, 64, 32, 16, 8]):
"""
Resize the input Img to pyramidal imgs
:param Batch_Img: input imgs
:param pyramidal_img_size: list of different size for pyramidal imgs
:return: Pyramidal imgs
"""
batch_num = Batch_Img.shape[0]
i = 0
# decoder,batch,h,w,c
Batch_Pyramidal_Resized_Img = []
while i < len(pyramidal_img_size):
j = 0
Pyramidal_Resized_Img = []
while j < batch_num:
Pyramidal_Resized_Img.append(img255_normalization(
scipy.misc.imresize(Batch_Img[j], [pyramidal_img_size[i], pyramidal_img_size[i]])))
j = j + 1
i = i + 1
Batch_Pyramidal_Resized_Img.append(np.array(Pyramidal_Resized_Img))
return Batch_Pyramidal_Resized_Img
def img_brighten(img, factor):
"""
Increase the brightness of the input img
:param img: input img
:param factor: the degree of increasing the brightness
:return: output img
"""
img2 = img * factor
img2 = np.clip(img2, 0, 255)
return img2
def img_noise(img, factor=0.02):
"""
Add noise to the img
:param img: input img
:param factor: noise factor
:return: output img
"""
img_noisy = img + factor * np.random.normal(0, 255, img.shape)
img_noisy = np.clip(img_noisy, 0, 255)
return img_noisy
def img2rgb(img):
"""
Convert the image into rgb mode for displaying
:param img: input img
:return: output img
"""
img_rgb = (img / 255)[:, :, [2, 1, 0]]
return img_rgb
def img_reversed(img, mode):
"""
Reverse the img on different directions
:param img: input img
:param mode: reverse mode
:return: new img
"""
x_size, y_size, _ = img.shape
img_reverse = np.zeros([x_size, y_size, 3])
if mode == 1:
i = 0
while i < x_size:
img_reverse[i] = img[-i]
i = i + 1
elif mode == 2:
i = 0
while i < y_size:
img_reverse[:, i] = img[:, -i]
i = i + 1
elif mode == 0:
img_reverse = img
return img_reverse
def Random_Rectangle_Img(img, shape=[256, 256], Is_Centred=False, Is_Random_size=False, Is_square=True,
Maximum_smaller_scale=3):
"""
Generate Mask at ramdom position
:param img: input img
:param shape: Mask shape
:param Is_Centred: whether the mask is centred
:param Is_Random_size: whether the mask used a random size
:param Is_square: whether the mask is a square
:param Maximum_smaller_scale: A config to generate random size masks,default to be 3
:return: Img in the mask
"""
x, y, _ = img.shape
position = []
if Is_Centred == True:
xc = x // 2
yc = y // 2
position.append(xc)
position.append(yc)
else:
xu_limit = x - shape[0] // 2
xl_limit = shape[0] // 2
yu_limit = y - shape[1] // 2
yl_limit = shape[1] // 2
position.append(np.random.randint(xl_limit, xu_limit))
position.append(np.random.randint(yl_limit, yu_limit))
# Generate masks
if Is_Random_size == True:
mask_shape = []
mask_shape.append(np.random.randint(shape[0] // Maximum_smaller_scale, shape[0]))
if Is_square == True:
mask_shape.append(mask_shape[0])
else:
mask_shape.append(np.random.randint(shape[1] // Maximum_smaller_scale, shape[1]))
else:
mask_shape = shape
extract_img = img[position[0] - mask_shape[0] // 2:position[0] + mask_shape[0] // 2,
position[1] - mask_shape[1] // 2:position[1] + mask_shape[1] // 2, :]
return np.array(extract_img, dtype=int)
def Resize_Img(img, ll_scale_factor=1.5):
"""
Resize the Img
:param img: input Img
:param ll_scale_factor:lowest scale factor
:return: resized Img
"""
x, y, _ = img.shape
if x >= y:
ul_scale = y / 260
else:
ul_scale = x / 260
ll_scale = ul_scale / ll_scale_factor
scale = np.random.uniform(ll_scale, ul_scale)
new_img_size = [int(x / scale), int(y / scale), _]
return scipy.misc.imresize(img, new_img_size)
def GenerateValidInputImg(img,batch_size=1,lower_limit = 220):
"""
Used to generate valid input for the PEN-Net for testing
:param img: raw data
:param lower_limit: mask judgement factor
:return: valid input for PEN-Net
"""
mask_postions = []
img_size = [256, 256]
r = 0
while r < img_size[0]:
l = 0
while l < img_size[1]:
if (img[r][l][0] >= lower_limit and img[r][l][1] >= lower_limit and img[r][l][2] >= lower_limit):
mask_postions.append([r, l, 0])
l = l + 1
r = r + 1
mask = np.zeros(shape=[img_size[0], img_size[1], 1])
for mask_position in mask_postions:
mask[mask_position[0], mask_position[1], 0] = 1
input_img = np.zeros(shape=[batch_size, img_size[0], img_size[1], 3])
input_masks = np.zeros(shape=[batch_size, img_size[0], img_size[1], 1])
for i in range(batch_size):
input_img[i, :, :, :] = img
input_masks[i, :, :, :] = mask
input_img = np.concatenate([input_img, input_masks], axis=-1)
return input_img / 255, input_masks
|
{"hexsha": "d626e7dba9a9cda24f0d9b285b45db4de5dea5e2", "size": 7069, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/utils.py", "max_stars_repo_name": "qxdnfsy/PEN-Net-Keras-Img_Inpainting", "max_stars_repo_head_hexsha": "bc81f696689cb264104be94951af8405fe118450", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-02-22T02:46:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-01T08:44:36.000Z", "max_issues_repo_path": "core/utils.py", "max_issues_repo_name": "qxdnfsy/PEN-Net-Keras-Img_Inpainting", "max_issues_repo_head_hexsha": "bc81f696689cb264104be94951af8405fe118450", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-02-25T08:23:09.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-13T17:43:47.000Z", "max_forks_repo_path": "core/utils.py", "max_forks_repo_name": "quix12345/PEN-Net-Keras-Img_Inpainting", "max_forks_repo_head_hexsha": "bc81f696689cb264104be94951af8405fe118450", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-17T09:49:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-17T09:49:27.000Z", "avg_line_length": 28.7357723577, "max_line_length": 109, "alphanum_fraction": 0.6089970293, "include": true, "reason": "import numpy,import scipy", "num_tokens": 2035}
|
/* Copyright (c) 2010-2018, Delft University of Technology
* All rigths reserved
*
* This file is part of the Tudat. Redistribution and use in source and
* binary forms, with or without modification, are permitted exclusively
* under the terms of the Modified BSD license. You should have received
* a copy of the license with this file. If not, please or visit:
* http://tudat.tudelft.nl/LICENSE.
*/
#include <map>
#include <functional>
#include <Eigen/Core>
#include "Tudat/Astrodynamics/Propagators/variationalEquations.h"
#include "Tudat/Astrodynamics/Propagators/rotationalMotionQuaternionsStateDerivative.h"
#include "Tudat/Astrodynamics/OrbitDetermination/AccelerationPartials/accelerationPartial.h"
namespace tudat
{
namespace propagators
{
template< typename StateScalarType >
void VariationalEquations::getBodyInitialStatePartialMatrix(
const Eigen::Matrix< StateScalarType, Eigen::Dynamic, Eigen::Dynamic >& stateTransitionAndSensitivityMatrices,
Eigen::Block< Eigen::Matrix< StateScalarType, Eigen::Dynamic, Eigen::Dynamic > > currentMatrixDerivative )
{
setBodyStatePartialMatrix( );
// Add partials of body positions and velocities.
currentMatrixDerivative.block( 0, 0, totalDynamicalStateSize_, numberOfParameterValues_ ) =
( variationalMatrix_.template cast< StateScalarType >( ) * stateTransitionAndSensitivityMatrices );
}
//! Calculates matrix containing partial derivatives of state derivatives w.r.t. body state.
void VariationalEquations::setBodyStatePartialMatrix( )
{
// Initialize partial matrix
variationalMatrix_.setZero( );
if( dynamicalStatesToEstimate_.count( propagators::translational_state ) > 0 )
{
int startIndex = stateTypeStartIndices_.at( propagators::translational_state );
for( unsigned int i = 0; i < dynamicalStatesToEstimate_.at( propagators::translational_state ).size( ); i++ )
{
variationalMatrix_.block( startIndex + i * 6, startIndex + i * 6 + 3, 3, 3 ).setIdentity( );
}
}
if( dynamicalStatesToEstimate_.count( propagators::rotational_state ) > 0 )
{
Eigen::VectorXd rotationalStates = currentStatesPerTypeInConventionalRepresentation_.at(
propagators::rotational_state );
int startIndex = stateTypeStartIndices_.at( propagators::rotational_state );
for( unsigned int i = 0; i < dynamicalStatesToEstimate_.at( propagators::rotational_state ).size( ); i++ )
{
variationalMatrix_.block( startIndex + i * 7, startIndex + i * 7 , 4, 4 ) =
getQuaterionToQuaternionRateMatrix( rotationalStates.segment( 7 * i + 4, 3 ) );
variationalMatrix_.block( startIndex + i * 7, startIndex + i * 7 + 4, 4, 3 ) =
getAngularVelocityToQuaternionRateMatrix( rotationalStates.segment( 7 * i, 4 ) );
}
}
// Iterate over all bodies undergoing accelerations for which initial condition is to be estimated.
for( std::map< IntegratedStateType, std::vector< std::multimap< std::pair< int, int >,
std::function< void( Eigen::Block< Eigen::MatrixXd > ) > > > >::iterator
typeIterator = statePartialList_.begin( ); typeIterator != statePartialList_.end( ); typeIterator++ )
{
int startIndex = stateTypeStartIndices_.at( typeIterator->first );
int currentStateSize = getSingleIntegrationSize( typeIterator->first );
int entriesToSkipPerEntry = currentStateSize - getGeneralizedAccelerationSize( typeIterator->first );
for( unsigned int i = 0; i < typeIterator->second.size( ); i++ )
{
// Iterate over all bodies exerting an acceleration on this body.
for( statePartialIterator_ = typeIterator->second.at( i ).begin( );
statePartialIterator_ != typeIterator->second.at( i ).end( );
statePartialIterator_++ )
{
statePartialIterator_->second(
variationalMatrix_.block(
startIndex + entriesToSkipPerEntry + i* currentStateSize, statePartialIterator_->first.first,
currentStateSize - entriesToSkipPerEntry, statePartialIterator_->first.second ) );
}
}
}
for( unsigned int i = 0; i < statePartialAdditionIndices_.size( ); i++ )
{
variationalMatrix_.block( 0, statePartialAdditionIndices_.at( i ).second, totalDynamicalStateSize_, 3 ) +=
variationalMatrix_.block( 0, statePartialAdditionIndices_.at( i ).first, totalDynamicalStateSize_, 3 );
}
for( unsigned int i = 0; i < inertiaTensorsForMultiplication_.size( ); i++ )
{
variationalMatrix_.block( inertiaTensorsForMultiplication_.at( i ).first, 0, 3, totalDynamicalStateSize_ ) =
( inertiaTensorsForMultiplication_.at( i ).second( ).inverse( ) ) *
variationalMatrix_.block( inertiaTensorsForMultiplication_.at( i ).first, 0, 3, totalDynamicalStateSize_ ).eval( );
}
}
//! Function to clear reference/cached values of state derivative partials.
void VariationalEquations::clearPartials( )
{
for( stateDerivativeTypeIterator_ = stateDerivativePartialList_.begin( );
stateDerivativeTypeIterator_ != stateDerivativePartialList_.end( );
stateDerivativeTypeIterator_++ )
{
for( unsigned int i = 0; i < stateDerivativeTypeIterator_->second.size( ); i++ )
{
for( unsigned int j = 0; j < stateDerivativeTypeIterator_->second.at( i ).size( ); j++ )
{
stateDerivativeTypeIterator_->second.at( i ).at( j )->resetTime( TUDAT_NAN );
}
}
}
}
//! Function (called by constructor) to set up the statePartialList_ member from the state derivative partials
void VariationalEquations::setStatePartialFunctionList( )
{
std::pair< std::function< void( Eigen::Block< Eigen::MatrixXd > ) >, int > currentDerivativeFunction;
// Iterate over all state types
for( std::map< propagators::IntegratedStateType,
orbit_determination::StateDerivativePartialsMap >::iterator
stateDerivativeTypeIterator_ = stateDerivativePartialList_.begin( );
stateDerivativeTypeIterator_ != stateDerivativePartialList_.end( );
stateDerivativeTypeIterator_++ )
{
// Iterate over all bodies undergoing 'accelerations' for which initial state is to be estimated.
for( unsigned int i = 0; i < stateDerivativeTypeIterator_->second.size( ); i++ )
{
std::multimap< std::pair< int, int >, std::function< void( Eigen::Block< Eigen::MatrixXd > ) > >
currentBodyPartialList;
// Iterate over all 'accelerations' from single body on other single body
for( unsigned int j = 0; j < stateDerivativeTypeIterator_->second.at( i ).size( ); j++ )
{
for( std::map< propagators::IntegratedStateType,
std::vector< std::pair< std::string, std::string > > >::iterator
estimatedStateIterator = dynamicalStatesToEstimate_.begin( );
estimatedStateIterator != dynamicalStatesToEstimate_.end( );
estimatedStateIterator++ )
{
// Iterate over all bodies to see if body exerting acceleration is also to be estimated (cross-terms)
for( unsigned int k = 0; k < estimatedStateIterator->second.size( ); k++ )
{
currentDerivativeFunction = stateDerivativeTypeIterator_->second.at( i ).at( j )->
getDerivativeFunctionWrtStateOfIntegratedBody(
estimatedStateIterator->second.at( k ), estimatedStateIterator->first );
// If function is not-empty: add to list.
if( currentDerivativeFunction.second != 0 )
{
currentBodyPartialList.insert(
std::make_pair(
std::make_pair( k * getSingleIntegrationSize( estimatedStateIterator->first ) +
stateTypeStartIndices_.at( estimatedStateIterator->first ),
getSingleIntegrationSize( estimatedStateIterator->first ) ),
currentDerivativeFunction.first ) );
}
}
}
}
statePartialList_[ stateDerivativeTypeIterator_->first ].push_back( currentBodyPartialList );
}
}
}
template void VariationalEquations::getBodyInitialStatePartialMatrix< double >(
const Eigen::Matrix< double, Eigen::Dynamic, Eigen::Dynamic >& stateTransitionAndSensitivityMatrices,
Eigen::Block< Eigen::Matrix< double, Eigen::Dynamic, Eigen::Dynamic > > currentMatrixDerivative );
//#if( BUILD_EXTENDED_PRECISION_PROPAGATION_TOOLS )
template void VariationalEquations::getBodyInitialStatePartialMatrix< long double >(
const Eigen::Matrix< long double, Eigen::Dynamic, Eigen::Dynamic >& stateTransitionAndSensitivityMatrices,
Eigen::Block< Eigen::Matrix< long double, Eigen::Dynamic, Eigen::Dynamic > > currentMatrixDerivative );
//#endif
} // namespace propagators
} // namespace tudat
|
{"hexsha": "d549a2d3574ceb9dd9627d424edfccb462836f4e", "size": 9502, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Tudat/Astrodynamics/Propagators/variationalEquations.cpp", "max_stars_repo_name": "J-Westin/tudat", "max_stars_repo_head_hexsha": "82ebe9e6e2dd51d0688b77960e62e980e6b8bcb8", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Tudat/Astrodynamics/Propagators/variationalEquations.cpp", "max_issues_repo_name": "J-Westin/tudat", "max_issues_repo_head_hexsha": "82ebe9e6e2dd51d0688b77960e62e980e6b8bcb8", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Tudat/Astrodynamics/Propagators/variationalEquations.cpp", "max_forks_repo_name": "J-Westin/tudat", "max_forks_repo_head_hexsha": "82ebe9e6e2dd51d0688b77960e62e980e6b8bcb8", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 48.4795918367, "max_line_length": 130, "alphanum_fraction": 0.6429172806, "num_tokens": 2134}
|
# taion.py
# A simple python script to extract body temperature
# from lcd on a thermometer.
# This code is based on code in https://github.com/yan9yu/sdr
#
# Copyright (c) 2020, Masami Yamakawa (MONOxIT)
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
# 使用するライブラリの取り込み
import cv2
import os
import numpy as np
from PIL import Image, ExifTags
import csv
# 定数の定義
IMAGE_FILE_PATH = 'data'
DIGIT_SIZE = 16
# 7セグメントフォント データセットの読み込み
samples = np.loadtxt('dataset/generalsamples.data', np.float32)
responses = np.loadtxt('dataset/generalresponses.data', np.float32)
responses = responses.reshape((responses.size, 1))
# K近傍法で怠惰学習
model = cv2.ml.KNearest_create()
model.train(samples, cv2.ml.ROW_SAMPLE, responses)
# 入力画像ファイル一覧作成
image_paths = []
for path,dirs,files in os.walk(IMAGE_FILE_PATH):
if len(files) > 0:
for file in files:
image_paths.append(os.path.join(path,file))
print(image_paths)
# 体温データ出力用リスト用意
csv_list = []
print('繰り返し開始')
for (i, image_path) in enumerate(image_paths):
# パスのフォルダ名をnameに保存し名前ラベルとして使う
name = image_path.split(os.path.sep)[-2]
print('名前:',end='');print(name)
print('画像読み込み:',end='');print(image_path)
# 画像ファイルから撮影日時取得
img_exif = Image.open(image_path)
exif = { ExifTags.TAGS[k]: v for k, v in img_exif._getexif().items()
if k in ExifTags.TAGS }
print(exif['DateTimeOriginal'])
date_time = exif['DateTimeOriginal'].split()
date_time[0]=date_time[0].replace(':','/')
# 画像ファイル読み込み
image = cv2.imread(image_path)
# 取り込んだ画像の幅を縦横比を維持して500ピクセルに縮小
ratio = 500 / image.shape[1]
resized_image = cv2.resize(image, dsize=None, fx=ratio, fy=ratio)
image_for_checking = resized_image.copy()
# グレースケールに変換
gray = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
# ヒストグラムを平坦化
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
gray = clahe.apply(gray)
# ぼかして均一化
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
# 輪郭だけにする
edged = cv2.Canny(blurred, 50, 200, 255)
# 輪郭検出
contours, hierarchy = cv2.findContours(edged, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
min_w = 500
# 検出された輪郭の数繰り返してLCD部分検出
for cnt in contours:
#輪郭の長さを取得
arc_length = cv2.arcLength(cnt, True)
# レシオ0.02で近似図形を取得
approx = cv2.approxPolyDP(cnt, 0.02*arc_length, True)
# 近似図形の頂点が4つ(四角形)なら
if len(approx) == 4:
# 輪郭の周りを四角い枠で囲った枠の位置(XY座標)と幅と高さを得る
#[lcd_x, lcd_y, lcd_w, lcd_h] = cv2.boundingRect(cnt)
box = cv2.boundingRect(cnt)
[lcd_x, lcd_y, lcd_w, lcd_h] = box
# 縦横比がLCDのサイズのようであれば ToDo かつ既定エリアサイズ以上なら
if 2 <= lcd_w / lcd_h <= 3:
# resized_imageに赤色の四角(LCDの候補)を描画する
cv2.rectangle(image_for_checking, (lcd_x, lcd_y),
(lcd_x + lcd_w, lcd_y + lcd_h),
(0, 0, 255), 1)
# 最小サイズを記憶
if lcd_w < min_w:
min_w = lcd_w
min_box = box
# LCDのボックス座標とサイズを取得
[lcd_x, lcd_y, lcd_w, lcd_h] = min_box
# LCDに枠を描画(確認用)
cv2.rectangle(image_for_checking, (lcd_x, lcd_y),
(lcd_x + lcd_w, lcd_y + lcd_h), (0, 0, 255), 2)
# 近似図形で画像を切り取りlcdに入れる
lcd = resized_image[lcd_y:lcd_y+lcd_h, lcd_x:lcd_x+lcd_w]
# lcdのサイズを得る
height, width, channels = lcd.shape[:3]
# lcdをグレースケールに変換
gray = cv2.cvtColor(lcd, cv2.COLOR_BGR2GRAY)
# 明るさを、平均64、標準偏差16で正規化
gray = (gray - np.mean(gray))/np.std(gray)*16+64
gray = np.clip(gray, 0, 255).astype(np.uint8)
# ぼかして均一化
blurred = cv2.fastNlMeansDenoising(gray,h=8)
print(np.mean(gray))
# 画像を二値化(黒と白だけに)し、白黒反転(数値が白抜きに)
thresh = cv2.adaptiveThreshold(blurred, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 21, 2)
# LCDの中(数値部分)を切り取り、黒フレームに貼り付け、輪郭検出しやすくする。
# ToDo: Make the margins dynamic.
framed_image = np.full((height,width),0,np.uint8)
framed_image[8:height-8, 9:width-9] = thresh[8:height-8, 9:width-9]
# 白抜き数値を太くして、7セグの各セグメント間をくっつける
kernel = np.ones((3,3),np.uint8)
dilation = cv2.dilate(framed_image,kernel,iterations = 1)
# 輪郭検出
contours, hierarchy = cv2.findContours(dilation, cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
# 温度数値を入れるための辞書を用意
ondo_dict = {}
# 検出した輪郭だけ繰り返して7セグ数字部分検出しながら分類
for cnt in contours:
# 検出した輪郭の周りを四角で囲った時の枠の位置とサイズを得る
[x, y, w, h] = cv2.boundingRect(cnt)
cv2.rectangle(image_for_checking, (lcd_x+x, lcd_y+y),
(lcd_x+x+w, lcd_y+y+h), (0, 255, 0), 1)
# 幅や高さを調べて、液晶の中に表示された数値のサイズと考えられるなら
if w > width / 15 and w < width /4:
if h > height / 2:
# 四角描画
cv2.rectangle(image_for_checking, (lcd_x+x, lcd_y+y),
(lcd_x+x+w, lcd_y+y+h), (0, 255, 0), 2)
# 数値部分を切り取り
roi = thresh[y:y + h, x:x + w]
# 画像を教師データに合わせてリサイズし、1次元配列に変換
roismall = cv2.resize(roi, (DIGIT_SIZE, DIGIT_SIZE))
roismall = roismall.reshape((1, DIGIT_SIZE*DIGIT_SIZE))
roismall = np.float32(roismall)
# K近傍法 K=5で分類
retval, results, neigh_resp, dists = model.findNearest(roismall, k=5)
# 分類結果(数値)を数値の位置xをキーに辞書へ追加
ondo_dict[x] = results[0][0]
string = str(int(ondo_dict[x]))
# 分類結果を描画
cv2.putText(image_for_checking, string,
(lcd_x+x, lcd_y-2),
cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0),2)
# 辞書をキーで(座標xが小さい順、桁が大きい順に)並べ替え
sorted_ondo = sorted(ondo_dict.items())
print(sorted_ondo)
# 辞書内の数値に重みをかけて、体温値を得る
factor = 10.0
ondo = 0.0
for key, value in sorted_ondo:
ondo += value * factor
factor = factor / 10.0
print(ondo)
# csv形式の表の行を作る name, date, time, ondo
line = [name]
line += date_time
line += [ondo]
# 表に追加
csv_list.append(line)
print(csv_list)
# 確認のための画像を表示
cv2.imshow('detected', image_for_checking)
cv2.waitKey(0)
# csvファイルへ書き込み
with open('out.csv','w') as f:
writer = csv.writer(f)
writer.writerows(csv_list)
print('完了')
|
{"hexsha": "749434429096eba1aaaa47d8577e2fe37aad38d9", "size": 6746, "ext": "py", "lang": "Python", "max_stars_repo_path": "taion.py", "max_stars_repo_name": "monoxit/Thermometer-OCR", "max_stars_repo_head_hexsha": "b92e1b590c86bd66003447646fc03cff95eba6bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-07T00:18:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-07T00:18:54.000Z", "max_issues_repo_path": "taion.py", "max_issues_repo_name": "monoxit/Thermometer-OCR", "max_issues_repo_head_hexsha": "b92e1b590c86bd66003447646fc03cff95eba6bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "taion.py", "max_forks_repo_name": "monoxit/Thermometer-OCR", "max_forks_repo_head_hexsha": "b92e1b590c86bd66003447646fc03cff95eba6bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8495575221, "max_line_length": 85, "alphanum_fraction": 0.5775274237, "include": true, "reason": "import numpy", "num_tokens": 2657}
|
import os
import numpy as np
import pandas as pd
import cv2
import matplotlib.pyplot as plt
from skimage.morphology import skeletonize
from skimage import morphology
from shapely.geometry import Polygon
import matplotlib.pyplot as plt
from skimage import draw
import matplotlib as mpl
from matplotlib.colors import colorConverter
direc = '/Users/bendevlin/Desktop/images'
files = os.listdir(direc)
rad_1 = 10
rad_2 = 20
rad_3 = 35
rad_4 = 50
rad_5 = 75
rad_6 = 100
rad_7 = 115
rad_8 = 130
rad_9 = 150
rad_10 = 175
rads = [rad_1, rad_2, rad_3, rad_4, rad_5, rad_6, rad_7, rad_8, rad_9, rad_10]
df = pd.DataFrame(index = rads)
## cleaning function for images that already have the brightness bumppped (usually hit the auto button 3 times or so for 8bit images)
for file in files:
img = cv2.imread(direc +'/' + file)
## https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_photo/py_non_local_means/py_non_local_means.html
#dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
## detect where most of the background is coming from so you can get that out
#num = np.histogram(dst.flatten(), bins = 50)
## gets the value where most background is, so that we can subtract it away
#bg_val = num[1][np.argmax(num[0]) + 6]
#new = np.where(dst < bg_val, 0, dst)
skeleton = skeletonize(img)
processed = morphology.remove_small_objects(skeleton.astype(bool), min_size=50, connectivity=25).astype(int)
processed = np.where(processed > 0, 255, 0)
print(np.shape(processed))
new_arr = np.zeros(shape = (750, 750))
for i in range(len(processed)):
for j in range(len(processed)):
new_arr[i][j] = processed[i][j][1]
# fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize = (15, 18))
# ax1.imshow(new)
# ax1.set_title('Smoothed Image')
# ax2.imshow(skeleton)
# ax2.set_title('Skeleton Image')
# ax3.imshow(new_arr)
# ax3.set_title('Cleaned Final Skeleton ' + file)
# plt.show()
plt.imshow(img)
center = plt.ginput(1)
plt.close()
circles = ['1'] * len(rads)
rr = []
cc = []
w = -1
for rad in rads:
w += 1
arr = np.zeros((400, 400))
rr, cc = draw.circle_perimeter(int(center[0][1]), int(center[0][0]), radius=rad, shape=arr.shape)
arr[rr, cc] = 255
circles[w] = arr
print(circles)
intersects = []
def calc_intersection(arr):
intersects = []
for i in range(len(arr)):
for j in range(len(new_arr)):
if arr[i][j] == 255.0:
if arr[i][j] == new_arr[i][j]:
intersects = np.append(intersects, [i, j])
elif arr[i][j] != new_arr[i][j]:
pass
elif arr[i][j] != 255.0:
pass
print(len(intersects) / 2)
return(len(intersects) / 2, intersects)
z = ['l'] * len(rads)
intersections = []
it = -1
for arr in circles:
it += 1
z[it] = calc_intersection(arr)[0]
intersections = np.append(intersections, calc_intersection(arr)[1])
x, y = intersections.reshape( int(len(intersections)/2) , 2).T
color1 = colorConverter.to_rgba('white')
color2 = colorConverter.to_rgba('black')
cmap2 = mpl.colors.LinearSegmentedColormap.from_list('my_cmap2',[color1,color2],256)
cmap2._init() # create the _lut array, with rgba values
alphas = np.linspace(0, 0.8, cmap2.N+3)
cmap2._lut[:,-1] = alphas
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize = (15, 18))
plt.figure(figsize = (10, 8))
table_vals = list(zip(rads, z))
col_labels = ['dist_from_soma', '# intersections']
row_labels = rads
plt.table(cellText=table_vals, colWidths = [0.1]*3, rowLabels=row_labels, colLabels=col_labels, cellLoc = 'center', loc = 14)
plt.imshow(new_arr, origin = 'lower')
plt.imshow(circles[0], interpolation='nearest', cmap=cmap2, origin='lower')
plt.imshow(circles[1], interpolation='nearest', cmap=cmap2, origin='lower')
plt.imshow(circles[2], interpolation='nearest', cmap=cmap2, origin='lower')
plt.imshow(circles[3], interpolation='nearest', cmap=cmap2, origin='lower')
plt.imshow(circles[4], interpolation='nearest', cmap=cmap2, origin='lower')
plt.imshow(circles[5], interpolation='nearest', cmap=cmap2, origin='lower')
plt.imshow(circles[6], interpolation='nearest', cmap=cmap2, origin='lower')
plt.imshow(circles[7], interpolation='nearest', cmap=cmap2, origin='lower')
plt.imshow(circles[8], interpolation='nearest', cmap=cmap2, origin='lower')
plt.imshow(circles[9], interpolation='nearest', cmap=cmap2, origin='lower')
plt.scatter(center[0][0], center[0][1])
plt.scatter(y, x)
plt.title('Sholl Analysis ' + file)
plt.savefig('output_image_' + file + '.png')
plt.show()
df[file] = z
df.T.to_csv('output.csv')
|
{"hexsha": "3181f9411d811fdf565fbc3991deba99dd87bd89", "size": 5107, "ext": "py", "lang": "Python", "max_stars_repo_path": "archived_NOT_working/main_2 copy.py", "max_stars_repo_name": "bendevlin18/sholl-analysis-python", "max_stars_repo_head_hexsha": "edc69a649b9fb160fd081553f109146cd6da5bca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "archived_NOT_working/main_2 copy.py", "max_issues_repo_name": "bendevlin18/sholl-analysis-python", "max_issues_repo_head_hexsha": "edc69a649b9fb160fd081553f109146cd6da5bca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "archived_NOT_working/main_2 copy.py", "max_forks_repo_name": "bendevlin18/sholl-analysis-python", "max_forks_repo_head_hexsha": "edc69a649b9fb160fd081553f109146cd6da5bca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.5986842105, "max_line_length": 134, "alphanum_fraction": 0.6156256119, "include": true, "reason": "import numpy", "num_tokens": 1432}
|
program test_qhashtbl
use qhashtbl_m
use iso_c_binding, only: c_ptr, c_loc, c_f_pointer
implicit none
type value_t
integer :: nv
integer, allocatable :: val(:)
end type
type(qhashtbl_t) :: qh
type(qhashtbl_obj_t) :: hobj
type(value_t), pointer :: pval, pback
integer :: siz, i, j
logical :: found
character(len=6) :: names = "abcdef"
type(c_ptr) :: cp
! Storage size in bytes
siz = storage_size(cp) / 8
! Initialize the container
! 'range' value defines the size of table is used internally.
! It is recommended to use a value between (total_number_of_keys / 3) ~ (total_number_of_keys * 2)
call qh%new(range=10, size_data=siz)
! Put some values
do i = 1, 6
allocate(pval)
allocate(pval%val(i))
pval%nv = i
pval%val(:) = [(j,j=1,i)]
cp = c_loc(pval)
call qh%put(names(i:i), cp)
end do
! Take some values back
call qh%get(names(2:2), cp, found)
if (found) then
call c_f_pointer(cp, pback)
print *, "'", names(2:2), "' has values nv=", pback%nv, " val=", pback%val
end if
call qh%get(names(5:5), cp, found)
if (found) then
call c_f_pointer(cp, pback)
print *, "'", names(5:5), "' has values nv=", pback%nv, " val=", pback%val
end if
! We allocated memory for pointers.
! Now we have to free this memory.
call hobj%init()
do while(qh%getnext(hobj))
call hobj%getdata(cp)
call c_f_pointer(cp, pback)
print *, "Value will be deallocated: nv=", pback%nv, " val=", pback%val
deallocate(pback)
end do
! Now C pointers stored in the container point to the freed locations.
end program
|
{"hexsha": "e1e0d9604fbe144e1e425d412bf25c5811a053c4", "size": 1759, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "docs/tutorial_qhashtbl.f90", "max_stars_repo_name": "darmar-lt/qcontainers", "max_stars_repo_head_hexsha": "bb1423dded02588898530c3ac7aa709e3f4eb5c3", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2017-03-05T16:48:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-05T07:25:12.000Z", "max_issues_repo_path": "docs/tutorial_qhashtbl.f90", "max_issues_repo_name": "darmar-lt/qcontainers", "max_issues_repo_head_hexsha": "bb1423dded02588898530c3ac7aa709e3f4eb5c3", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-03-16T18:58:55.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-06T09:37:16.000Z", "max_forks_repo_path": "docs/tutorial_qhashtbl.f90", "max_forks_repo_name": "darmar-lt/qcontainers", "max_forks_repo_head_hexsha": "bb1423dded02588898530c3ac7aa709e3f4eb5c3", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2017-03-16T11:12:52.000Z", "max_forks_repo_forks_event_max_datetime": "2018-02-25T15:26:28.000Z", "avg_line_length": 25.8676470588, "max_line_length": 102, "alphanum_fraction": 0.5929505401, "num_tokens": 538}
|
# -*-coding:utf-8-*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from six.moves import xrange
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
import numpy as np
class DataSetContext(object):
def __init__(self,
datas,
labels,
fake_data=False,
one_hot=True,
dtype=dtypes.float32,
reshape=False,
seed=None):
seed1, seed2 = random_seed.get_seed(seed)
np.random.seed(seed1 if seed is None else seed2)
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError(
'Invalid image dtype %r, expected uint8 or float32' % dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert datas.shape[0] == labels.shape[0], (
'datas.shape: %s labels.shape: %s' % (datas.shape, labels.shape))
self._num_examples = datas.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert datas.shape[3] == 1
datas = datas.reshape(datas.shape[0],
datas.shape[1] * datas.shape[2])
if dtype == dtypes.float32:
datas = datas.astype(numpy.float32)
self._datas = datas
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def datas(self):
return self._datas
@property
def labels(self):
return self._labels.reshape([-1, 1])
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False, shuffle=True):
start = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = numpy.arange(self._num_examples)
numpy.random.shuffle(perm0)
self._datas = self.datas[perm0]
self._labels = self.labels[perm0]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
rest_num_examples = self._num_examples - start
datas_rest_part = self._datas[start:self._num_examples]
labels_rest_part = self._labels[start:self._num_examples]
# Shuffle the data
if shuffle:
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._datas = self.datas[perm]
self._labels = self.labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
datas_new_part = self._datas[start:end]
labels_new_part = self._labels[start:end]
return numpy.concatenate(
(datas_rest_part, datas_new_part), axis=0), numpy.concatenate(
(labels_rest_part, labels_new_part), axis=0).reshape([-1, 1])
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._datas[start:end], self._labels[start:end].reshape([-1, 1])
def read_data_sets(
train_dir,
test_factor=0.1,
fake_data=False,
one_hot=True,
dtype=dtypes.float32,
reshape=False,
validation_size=0,
seed=None):
if fake_data:
def fake():
return DataSetContext([], [], fake_data=True, one_hot=one_hot, dtype=dtype, seed=seed)
train = fake()
validation = fake()
test = fake()
return base.Datasets(train=train, validation=validation, test=test)
n_year = 52
n_class = 1
train_data = np.empty(shape=[0, n_year * 50], dtype=np.float64)
train_labels = np.empty(shape=[0, n_class], dtype=np.float64)
with open(train_dir, encoding="utf-8") as f:
cnt = 0
for line in f:
content = line.strip().split('\t')
cnt += 1
if cnt == 1:
tmp = np.zeros((1, n_class), dtype=np.float64)
tmp[0][0] = int(content[0])
train_labels = np.append(train_labels, tmp, axis=0)
elif cnt == 2:
vec = np.zeros((1, n_year * 50), dtype=np.float64)
for i in range(n_year * 50):
vec[0][i] = content[i]
train_data = np.append(train_data, vec, axis=0)
cnt = 0
f.close()
options = dict(dtype=dtype, reshape=reshape, seed=seed)
train = DataSetContext(train_data, train_labels, **options)
print(train_data.shape)
print(train_labels.shape)
return base.Datasets(train=train, validation=None, test=None)
if __name__ == "__main__":
dataset = read_data_sets("../dataset/author_context_feature.txt")
a, b = dataset.train.next_batch(64)
print(a.shape)
print(b.shape)
print(dataset)
|
{"hexsha": "1d45112d70de6bca47d4f8f2f28c4d895f1dcbb0", "size": 5572, "ext": "py", "lang": "Python", "max_stars_repo_path": "TFDataset_context.py", "max_stars_repo_name": "XURIGHT/Advisor-Advisee_SAE", "max_stars_repo_head_hexsha": "2bb0a221bea05af0ddc4ebd87e5ec86a8d14d12f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "TFDataset_context.py", "max_issues_repo_name": "XURIGHT/Advisor-Advisee_SAE", "max_issues_repo_head_hexsha": "2bb0a221bea05af0ddc4ebd87e5ec86a8d14d12f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-09-15T12:15:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-15T12:15:32.000Z", "max_forks_repo_path": "TFDataset_context.py", "max_forks_repo_name": "XURIGHT/Advisor-Advisee_SAE", "max_forks_repo_head_hexsha": "2bb0a221bea05af0ddc4ebd87e5ec86a8d14d12f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7696969697, "max_line_length": 98, "alphanum_fraction": 0.5804020101, "include": true, "reason": "import numpy", "num_tokens": 1256}
|
# gfa_parser.py assembly_graph_with_scaffolds.gfa graph_pack.grseq outdir
import sys, os, subprocess
import pandas as pd
import networkx as nx
from Bio.Seq import reverse_complement
import graphs
def get_one_type_gfa(gfa, type, outdir):
one_type_gfa = os.path.join(outdir, '{}.gfa'.format(type))
os.system('grep \'^{}\' {} > {}'.format(type, gfa, one_type_gfa))
return one_type_gfa
# 'RecordType', 'PathName', 'SegmentNames', 'Overlaps'
def one_type_gfa_to_df(one_type_gfa):
p_df = pd.read_csv(one_type_gfa,
sep="\t", header=None, usecols=[1, 2],
names=['PathName', 'SegmentNames'])
return p_df
def line_to_node(line):
fields = line.strip().split()
name = fields[1]
attr = {'seq': fields[2]}
if 'KC:i:' in line:
kmer_count = int(fields[3][5:])
attr['KC'] = kmer_count
return name, attr
# L 934049 - 36137 + 49M
def line_to_edge(line, conj_dict):
fields = line.strip().split()
u = fields[1]
if fields[2] == '-':
u = conj_dict[u]
v = fields[3]
if fields[4] == '-':
v = conj_dict[v]
attr = {'cigar': fields[5]}
return u, v, attr
def gfa_to_G(gfa, conj_dict, kmer_size):
# G = nx.DiGraph(k=kmer_size, name='gfa')
G = nx.OrderedGraph(k=kmer_size, name='gfa')
with open(gfa, 'r') as fin:
for line in fin:
record_type = line[0]
if record_type in ['#', 'H', 'C', 'P']:
continue
elif record_type == 'S':
name, attr = line_to_node(line)
G.add_node(name,
seq=attr['seq'],
cov=attr['KC'] * 1.0 / len(attr['seq']),
len=len(attr['seq']),
A=attr['seq'].count('A') * 1.0 / len(attr['seq']),
C=attr['seq'].count('C') * 1.0 / len(attr['seq']),
G=attr['seq'].count('G') * 1.0 / len(attr['seq']),
T=attr['seq'].count('T') * 1.0 / len(attr['seq']))
G.add_node(conj_dict[name],
seq=reverse_complement(attr['seq']),
cov=attr['KC'] * 1.0 / len(attr['seq']),
len=len(attr['seq']),
A=attr['seq'].count('T') * 1.0 / len(attr['seq']),
C=attr['seq'].count('G') * 1.0 / len(attr['seq']),
G=attr['seq'].count('C') * 1.0 / len(attr['seq']),
T=attr['seq'].count('A') * 1.0 / len(attr['seq']))
elif record_type == 'L':
# cov = nx.get_node_attributes(G, 'cov')
u, v, attr = line_to_edge(line, conj_dict)
G.add_edge(u, v, **attr)
nx.set_edge_attributes(G, {(u, v): {'reads_and_db': 0.05 + 0.05}})
G.add_edge(conj_dict[v], conj_dict[u], **attr)
nx.set_edge_attributes(G, {(conj_dict[v], conj_dict[u]): {'reads_and_db': 0.05 + 0.05}})
# graphs.write_G_statistics(G)
return G
def main():
# SPAdes output
gfa = sys.argv[1]
# SPAligner output
grseq = sys.argv[2]
# kmer size for graph construction
k = int(sys.argv[3])
outdir = sys.argv[4]
# Get graph from gfa file
command = 'python show_saves.py {} > {}'.format(grseq[:-3] + 'p',
os.path.join(outdir, 'graph_pack.readable.grp'))
subprocess.run(command, shell=True)
conj_dict = graphs.get_conj_dict(os.path.join(outdir, 'graph_pack.readable.grp'))
G = gfa_to_G(gfa, conj_dict, k)
# Get Adjacency matrix
# A = graphs.get_A(G)
# Get feature matrix
# features_tsv = os.path.join(outdir, 'features.tsv')
# X = graphs.get_X(G.nodes, features_tsv)
if __name__ == '__main__':
main()
|
{"hexsha": "ef80d67144e5565829f051ed1cb09ca2d3735644", "size": 3912, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/gfa_parser.py", "max_stars_repo_name": "letovesnoi/clusterassembly", "max_stars_repo_head_hexsha": "9edcab8afe5601195a40e497d06200a38daf0325", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bin/gfa_parser.py", "max_issues_repo_name": "letovesnoi/clusterassembly", "max_issues_repo_head_hexsha": "9edcab8afe5601195a40e497d06200a38daf0325", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bin/gfa_parser.py", "max_forks_repo_name": "letovesnoi/clusterassembly", "max_forks_repo_head_hexsha": "9edcab8afe5601195a40e497d06200a38daf0325", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.6194690265, "max_line_length": 104, "alphanum_fraction": 0.5086912065, "include": true, "reason": "import networkx", "num_tokens": 1074}
|
open import Agda.Primitive using (_⊔_)
import Categories.Category as Category
import Categories.Category.Cartesian as Cartesian
open import MultiSorted.AlgebraicTheory
-- Finite products indexed by contexts
module MultiSorted.Product
{o ℓ e}
(𝒞 : Category.Category o ℓ e)
{𝓈 ℴ}
{Σ : Signature {𝓈} {ℴ}}
(interp-sort : Signature.sort Σ → Category.Category.Obj 𝒞) where
open Signature Σ
open Category.Category 𝒞
open HomReasoning
interp-sort-var : {Γ : Context} → var Γ → Obj
interp-sort-var {Γ} x = interp-sort (sort-of Γ x)
record Producted : Set (o ⊔ ℓ ⊔ e ⊔ 𝓈) where
field
prod : Context → Obj
π : ∀ {Γ} (x : var Γ) → prod Γ ⇒ interp-sort-var x
tuple : ∀ Γ {B} → ((x : var Γ) → B ⇒ interp-sort-var x) → B ⇒ prod Γ
project : ∀ {Γ} {B} {x : var Γ} {fs : (y : var Γ) → B ⇒ interp-sort-var y} → π x ∘ tuple Γ fs ≈ fs x
unique : ∀ {Γ} {B} {fs : (x : var Γ) → B ⇒ interp-sort-var x} {g : B ⇒ prod Γ} → (∀ i → π i ∘ g ≈ fs i) → tuple Γ fs ≈ g
η : ∀ {Γ} → tuple Γ π ≈ id
η = unique (λ i → identityʳ)
! : ∀ {B : Obj} → B ⇒ prod ctx-empty
! {B} = tuple ctx-empty ctx-empty-absurd
!-unique : ∀ {B : Obj} {f : B ⇒ prod ctx-empty} → ! ≈ f
!-unique {f = f} = unique ctx-empty-absurd
!-unique₂ : ∀ {B : Obj} {f g : B ⇒ prod ctx-empty} → f ≈ g
!-unique₂ = (⟺ !-unique) ○ !-unique
tuple-cong : ∀ {B : Obj} {Γ} {fs gs : (x : var Γ) → B ⇒ interp-sort-var x} → (∀ i → fs i ≈ gs i) →
tuple Γ fs ≈ tuple Γ gs
tuple-cong ξ = unique (λ i → project ○ ⟺ (ξ i))
∘-distribʳ-tuple : ∀ {B C} {Γ} {fs : (x : var Γ) → B ⇒ interp-sort-var x} {g : C ⇒ B} →
tuple Γ (λ x → fs x ∘ g) ≈ tuple Γ fs ∘ g
∘-distribʳ-tuple = unique (λ i → ⟺ assoc ○ ∘-resp-≈ˡ project)
-- A cartesian category has a standard products structure (which we need not use)
module _ (cartesian-𝒞 : Cartesian.Cartesian 𝒞) where
open Cartesian.Cartesian cartesian-𝒞
standard-prod : Context → Obj
standard-prod ctx-empty = ⊤
standard-prod (ctx-slot A) = interp-sort A
standard-prod (ctx-concat Γ Δ) = standard-prod Γ × standard-prod Δ
standard-π : ∀ {Γ} (x : var Γ) → standard-prod Γ ⇒ interp-sort-var x
standard-π var-var = id
standard-π (var-inl i) = standard-π i ∘ π₁
standard-π (var-inr i) = standard-π i ∘ π₂
standard-tuple : ∀ Γ {B} → ((x : var Γ) → B ⇒ interp-sort-var x) → B ⇒ standard-prod Γ
standard-tuple ctx-empty fs = !
standard-tuple (ctx-slot _) fs = fs var-var
standard-tuple (ctx-concat Γ Δ) fs = ⟨ standard-tuple Γ (λ i → fs (var-inl i)) , standard-tuple Δ (λ j → fs (var-inr j)) ⟩
standard-project : ∀ {Γ} {B} {x : var Γ} {fs : (x : var Γ) → B ⇒ interp-sort-var x} →
standard-π x ∘ standard-tuple Γ fs ≈ fs x
standard-project {x = var-var} = identityˡ
standard-project {x = var-inl x} = assoc ○ ((∘-resp-≈ʳ project₁) ○ standard-project {x = x})
standard-project {x = var-inr x} = assoc ○ ((∘-resp-≈ʳ project₂) ○ standard-project {x = x})
standard-unique : ∀ {Γ} {B} {fs : (x : var Γ) → B ⇒ interp-sort-var x} {g : B ⇒ standard-prod Γ} → (∀ i → standard-π i ∘ g ≈ fs i) →
standard-tuple Γ fs ≈ g
standard-unique {ctx-empty} ξ = !-unique _
standard-unique {ctx-slot _} ξ = ⟺ (ξ var-var) ○ identityˡ
standard-unique {ctx-concat Γ Δ} {fs = fs} ξ =
unique
(⟺ (standard-unique (λ x → sym-assoc ○ (ξ (var-inl x)))))
(⟺ (standard-unique (λ y → sym-assoc ○ (ξ (var-inr y)))))
StandardProducted : Producted
StandardProducted =
record
{ prod = standard-prod
; π = standard-π
; tuple = standard-tuple
; project = λ {Γ} → standard-project {Γ}
; unique = standard-unique }
|
{"hexsha": "9bf57beabd160cb002d600973d7adf04a165cda7", "size": 3803, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/MultiSorted/Product.agda", "max_stars_repo_name": "cilinder/formaltt", "max_stars_repo_head_hexsha": "0a9d25e6e3965913d9b49a47c88cdfb94b55ffeb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2021-02-16T14:07:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-19T15:50:08.000Z", "max_issues_repo_path": "src/MultiSorted/Product.agda", "max_issues_repo_name": "andrejbauer/formaltt", "max_issues_repo_head_hexsha": "2aaf850bb1a262681c5a232cdefae312f921b9d4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-04-30T14:18:25.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-14T16:15:17.000Z", "max_forks_repo_path": "src/MultiSorted/Product.agda", "max_forks_repo_name": "andrejbauer/formaltt", "max_forks_repo_head_hexsha": "2aaf850bb1a262681c5a232cdefae312f921b9d4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-02-16T13:43:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-24T02:51:43.000Z", "avg_line_length": 40.4574468085, "max_line_length": 136, "alphanum_fraction": 0.5603470944, "num_tokens": 1360}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Function to test inference of the smoothing parameter of a hidden Potts-MRF.
Author: W.M.Kouw
Date: 18-09-2018
"""
import numpy as np
import numpy.random as rnd
import scipy.optimize as opt
import matplotlib.pyplot as plt
from tomopy.misc import phantom as ph
from hPottsMRF import hiddenPottsMarkovRandomField
# Generate checkerboard
L = ph.checkerboard(size=32, dtype='uint8')[0, :, :]
# Normalize board
L = np.round(L / 255.).astype('uint8')
# Create array with 3 unique values
L += L + 1
L = np.pad(L, [8, 8], mode='constant', constant_values=0)
# Generate observation matrix from label image
R = rnd.randn(*L.shape)*.2 + 1
# Observed image
Y = L + R
# Shape
H, W = Y.shape
# Corners
patch_indices = [(0, 0), (0, 10), (0, W-1),
(10, W-1), (H-1, W-1), (W-1, 10),
(H-1, 0), (10, 0), (10, 10),
(5, 5), (15, 15), ((12, 12))]
# Call instance of hPottsMRF
model = hiddenPottsMarkovRandomField(neighbourhood_size=(3, 3), num_iter=5)
# Plot segmentation
fg, ax = plt.subplots(nrows=3, ncols=4, figsize=(12, 6))
# Extract neighbourhoods
cnt = 0
for row in range(3):
for col in range(4):
# Extract neighbourhood
b = model.neighbourhood(Y, patch_indices[cnt])
cnt += 1
# Show image
ax[row, col].imshow(b)
plt.show()
|
{"hexsha": "81b2f5a33d5491ea31e77d6489556becfe6dd0ee", "size": 1372, "ext": "py", "lang": "Python", "max_stars_repo_path": "experiments/tests/test_neighbourhoods.py", "max_stars_repo_name": "wmkouw/cc-infopriors", "max_stars_repo_head_hexsha": "653079f201c8bce570dacb3479f4270ebe0de953", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-07-11T01:32:55.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-11T01:32:55.000Z", "max_issues_repo_path": "experiments/tests/test_neighbourhoods.py", "max_issues_repo_name": "wmkouw/cc-smoothprior", "max_issues_repo_head_hexsha": "653079f201c8bce570dacb3479f4270ebe0de953", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "experiments/tests/test_neighbourhoods.py", "max_forks_repo_name": "wmkouw/cc-smoothprior", "max_forks_repo_head_hexsha": "653079f201c8bce570dacb3479f4270ebe0de953", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7777777778, "max_line_length": 76, "alphanum_fraction": 0.6282798834, "include": true, "reason": "import numpy,import scipy", "num_tokens": 439}
|
import numpy as np
from tensorflow.examples.tutorials import mnist
import os
import numpy as np
class Dataset(object):
def __init__(self, images, labels=None):
self._images = images.reshape(images.shape[0], -1)
self._labels = labels
self._epochs_completed = -1
self._num_examples = images.shape[0]
# shuffle on first run
self._index_in_epoch = self._num_examples
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = self._images[perm]
if self._labels is not None:
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
if self._labels is None:
return self._images[start:end], None
else:
return self._images[start:end], self._labels[start:end]
class MnistDataset(object):
def __init__(self):
data_directory = "MNIST"
if not os.path.exists(data_directory):
os.makedirs(data_directory)
dataset = mnist.input_data.read_data_sets(data_directory)
self.train = dataset.train
# make sure that each type of digits have exactly 10 samples
sup_images = []
sup_labels = []
rnd_state = np.random.get_state()
np.random.seed(0)
for cat in range(10):
ids = np.where(self.train.labels == cat)[0]
np.random.shuffle(ids)
sup_images.extend(self.train.images[ids[:10]])
sup_labels.extend(self.train.labels[ids[:10]])
np.random.set_state(rnd_state)
self.supervised_train = Dataset(
np.asarray(sup_images),
np.asarray(sup_labels),
)
self.test = dataset.test
self.validation = dataset.validation
self.image_dim = 28 * 28
self.image_shape = (28, 28, 1)
def transform(self, data):
return data
def inverse_transform(self, data):
return data
class ShapeDataset(object):
def __init__(self, images, labels):
self._images = images.reshape(images.shape[0], -1)
self._labels = labels
self.image_dim = 28 * 28
self.image_shape = (28, 28, 1)
self._epochs_completed = -1
self._num_examples = images.shape[0]
# shuffle on first run
self._index_in_epoch = self._num_examples
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size):
"""Return the next `batch_size` examples from this data set."""
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = self._images[perm]
if self._labels is not None:
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
if self._labels is None:
return self._images[start:end], None
else:
return self._images[start:end], self._labels[start:end]
def inverse_transform(self, data):
return data
|
{"hexsha": "67ba10d171212fb4f549e1bb24aaaca559fd2049", "size": 4412, "ext": "py", "lang": "Python", "max_stars_repo_path": "vanilla_InfoGAN/infogan/misc/datasets.py", "max_stars_repo_name": "lbechberger/LearningConceputalDimensions", "max_stars_repo_head_hexsha": "332d0f520faad2e5788d658cb4f4b9cc9cfbb15d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-07-18T06:34:45.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-30T07:43:31.000Z", "max_issues_repo_path": "vanilla_InfoGAN/infogan/misc/datasets.py", "max_issues_repo_name": "lbechberger/LearningConceputalDimensions", "max_issues_repo_head_hexsha": "332d0f520faad2e5788d658cb4f4b9cc9cfbb15d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vanilla_InfoGAN/infogan/misc/datasets.py", "max_forks_repo_name": "lbechberger/LearningConceputalDimensions", "max_forks_repo_head_hexsha": "332d0f520faad2e5788d658cb4f4b9cc9cfbb15d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-08-17T20:01:51.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-17T19:12:15.000Z", "avg_line_length": 30.8531468531, "max_line_length": 71, "alphanum_fraction": 0.606980961, "include": true, "reason": "import numpy", "num_tokens": 981}
|
#include <boost/fusion/include/filter_if.hpp>
|
{"hexsha": "6d10d7403d31dadab721b5e5cfc0cdedbbcf1a50", "size": 46, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_fusion_include_filter_if.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_fusion_include_filter_if.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_fusion_include_filter_if.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 23.0, "max_line_length": 45, "alphanum_fraction": 0.8043478261, "num_tokens": 10}
|
program main
use plantfem
implicit none
type(FEMDomain_) :: domain
call domain%create(meshtype="Cube",x_num=10,y_num=10,z_num=10)
call domain%resize(x=1.0d0, y=3.0d0, z=10.0d0)
call domain%json(name="domain.json")
call domain%msh(name="domain.msh")
end program main
|
{"hexsha": "53d0635a8745e3daf914aa9f25714dc8f190e754", "size": 296, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "Tutorial/playon_fem/ex0014_CreateMeshEx6.f90", "max_stars_repo_name": "kazulagi/plantfem_min", "max_stars_repo_head_hexsha": "ba7398c031636644aef8acb5a0dad7f9b99fcb92", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2020-06-21T08:21:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T04:28:30.000Z", "max_issues_repo_path": "Tutorial/fem/ex0014_CreateMeshEx6.f90", "max_issues_repo_name": "kazulagi/plantFEM_binary", "max_issues_repo_head_hexsha": "32acf059a6d778307211718c2a512ff796b81c52", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-05-08T05:20:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T05:39:29.000Z", "max_forks_repo_path": "Tutorial/fem/ex0014_CreateMeshEx6.f90", "max_forks_repo_name": "kazulagi/plantFEM_binary", "max_forks_repo_head_hexsha": "32acf059a6d778307211718c2a512ff796b81c52", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-10-20T18:28:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-15T08:35:25.000Z", "avg_line_length": 24.6666666667, "max_line_length": 66, "alphanum_fraction": 0.6858108108, "num_tokens": 100}
|
# invert the ITMIX 2 with IV for ESA
include("itmix-setup.jl") # run this first on single process to make sure all precompilation this through
# before the parallel run starts
if nprocs()<2
if Sys.CPU_CORES>=17
addprocs(17) # /2 to get to physical cores
else
addprocs(Sys.CPU_CORES÷2 + 1) # /2 to get to physical cores
end
end
@everywhere begin
const debugging = true
include("itmix-setup.jl")
end
# whether doing a testing, semi-testing or production run
runtyp = ["test", "testmid", "prodlow", "prod"][3]
runtyp == :test && println("\n\nTEST RUN !!!!!!!!\n\n")
fit_target = [BM.FitTarget.h, BM.FitTarget.h_iv, BM.FitTarget.length][2] # making this a global to avoid below odd bug.
pl_kws = Dict{Symbol,Any}()
dir = "results_Werder/esa-14nov"
skip_iv_point_measurements = false
for gid in BM.ITMIXGlacier.(BM.itmix_glaciers_iv, 2)
if skip_iv_point_measurements && gid in BM.ITMIXGlacier.(BM.itmix_glaciers_iv_point, 2)
continue
end
println("\n\n============================ Running Glacier $(BM.getname(gid))")
for rm_iv in [true, false]
println("\n\n============================ rm_iv = $(rm_iv)")
println("\n\n============================")
gl,gb,pp,pm,pn,pl = BM.init_forward(gid; pl_kws...);
if nprocs()>1
pmap(expnr->run_one_exp(expnr, gl, gb, pp, pm, pn, pl, runtyp, fit_target; rm_iv=rm_iv, dir=dir), 0:16)
else
map(expnr->run_one_exp(expnr, gl, gb, pp, pm, pn, pl, runtyp, fit_target; rm_iv=rm_iv, dir=dir), 0:16)
endI==1 ? true : false
end
end
|
{"hexsha": "1437a4e838ed6aeaf6bd33edffa3c0f07345433d", "size": 1628, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/itmix2-parallel-ESA.jl", "max_stars_repo_name": "mauro3/BITEmodel.jl", "max_stars_repo_head_hexsha": "897eca85fc3c3b736ef49e23850b8f4bd6f2806a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-09-23T00:07:26.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-02T18:32:49.000Z", "max_issues_repo_path": "scripts/itmix2-parallel-ESA.jl", "max_issues_repo_name": "mauro3/BITEmodel.jl", "max_issues_repo_head_hexsha": "897eca85fc3c3b736ef49e23850b8f4bd6f2806a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/itmix2-parallel-ESA.jl", "max_forks_repo_name": "mauro3/BITEmodel.jl", "max_forks_repo_head_hexsha": "897eca85fc3c3b736ef49e23850b8f4bd6f2806a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-11-27T17:01:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-18T02:47:41.000Z", "avg_line_length": 34.6382978723, "max_line_length": 120, "alphanum_fraction": 0.6068796069, "num_tokens": 496}
|
import cProfile
import datetime as dt
import numpy as np
from tests.datasynthesis import unit_function_pattern
from qalatgir import fill_missing
original_data = unit_function_pattern(dt.timedelta(minutes=5))
missed_period = slice(11 * 12, 13 * 12)
deleted = original_data.iloc[missed_period]['value'].copy()
original_data.loc[missed_period, 'value'] = np.nan
fill_missing(original_data, 5)
original_data = unit_function_pattern(dt.timedelta(minutes=5))
missed_period = slice(11 * 12, 13 * 12)
deleted = original_data.iloc[missed_period]['value'].copy()
original_data.loc[missed_period, 'value'] = np.nan
print(cProfile.run('fill_missing(original_data, 5)', sort='cumtime'))
|
{"hexsha": "e76728142df7aee9c7216f3e8ba4286bbd345781", "size": 679, "ext": "py", "lang": "Python", "max_stars_repo_path": "bottleneck_analysis.py", "max_stars_repo_name": "boof-tech/qalatgir", "max_stars_repo_head_hexsha": "4f5adfc1bb4f82c1c5478fb228b4121d7f9784ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bottleneck_analysis.py", "max_issues_repo_name": "boof-tech/qalatgir", "max_issues_repo_head_hexsha": "4f5adfc1bb4f82c1c5478fb228b4121d7f9784ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bottleneck_analysis.py", "max_forks_repo_name": "boof-tech/qalatgir", "max_forks_repo_head_hexsha": "4f5adfc1bb4f82c1c5478fb228b4121d7f9784ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3333333333, "max_line_length": 69, "alphanum_fraction": 0.7849779087, "include": true, "reason": "import numpy", "num_tokens": 175}
|
import random
from math import exp
from tqdm import trange
import numpy as np
import sys
from graphical_model_learning.scores import MemoizedDecomposableScore
from graphical_model_learning.algorithms import permutation2dag
from graphical_model_learning.samplers.proposals import adjacent_transposition_proposer
sys.path.insert(1, "C:/Users/skarn/OneDrive/Documents/MIT/year_3/SuperUROP/causaldag")
def minimal_imap_mcmc(
initial_perm,
initial_dag,
ci_tester,
proposer,
scorer,
burn=1000,
num_steps=10000,
thin=1,
progress=False,
verbose=False
):
"""
Get DAG samples from the approximate posterior over minimal IMAPs.
Parameters
----------
initial_perm:
TODO
initial_dag:
TODO
ci_tester:
A conditional independence tester, which has a method is_ci taking two elements i and j, and a conditioning set
C, that returns True/False.
proposer:
A function that proposes new permutations from the current permutation.
scorer:
A function that evaluates the log-likelihood of a given DAG.
burn:
Number of burn-in steps.
num_steps:
Total number of steps to run the Markov chain.
thin:
The thinning rate, i.e., how many steps between taking samples.
Returns
-------
List[DAG]
sampled DAGs
"""
current_perm, current_dag = initial_perm, initial_dag
current_score = scorer.get_score(current_dag)
samples = []
r = trange if progress else range
for step in r(num_steps):
if step >= burn and (step - burn) % thin == 0:
samples.append((current_dag, current_perm))
proposal_dag, proposal_perm = proposer(current_dag, current_perm, ci_tester)
proposal_score = scorer.get_score(proposal_dag)
accept = proposal_score > current_score or random.random() < exp(proposal_score - current_score)
if verbose: print(proposal_perm, exp(proposal_score - current_score))
if accept:
current_dag, current_perm, current_score = proposal_dag, proposal_perm, proposal_score
return samples
def collect_stats_mcmc(
true_dag,
initial_perm,
initial_dag,
ci_tester,
proposer,
scorer,
num_steps=100,
burn=1,
progress=False,
verbose=True):
samples = minimal_imap_mcmc(
initial_perm,
initial_dag,
ci_tester,
proposer,
scorer,
num_steps=100,
burn=1
)
desired_arcs = true_dag.arcs
total_samples = len(samples)
correct_samples = np.sum([samples[i][0].arcs == desired_arcs for i in range(total_samples)])
stats = {'samples': samples, 'fraction_correct': correct_samples/total_samples}
return stats
if __name__ == '__main__':
from graphical_models import DAG
from graphical_models.rand import rand_weights
from conditional_independence import partial_correlation_suffstat, MemoizedCI_Tester
from graphical_model_learning import partial_correlation_test, local_gaussian_bge_score
d = DAG(arcs={(0, 1), (2, 1)})
# d = DAG(arcs={(0, 1), (0, 2), (0, 3), (0, 4), (1, 2), (1, 3), (1, 4), (2, 3), (2, 4), (3, 4)})
g = rand_weights(d)
samples = g.sample(1000)
suffstat = partial_correlation_suffstat(samples)
suffstat['samples'] = samples
initial_perm = [1, 2, 0]
ci_tester = MemoizedCI_Tester(partial_correlation_test, suffstat, alpha=.05)
initial_dag = permutation2dag(initial_perm, ci_tester)
# scorer = MemoizedDecomposableScore(local_gaussian_bge_score, suffstat)
scorer_bge = MemoizedDecomposableScore(local_gaussian_bge_score, suffstat)
num_meta_iterations = 30
stats_bges = [collect_stats_mcmc(
d,
initial_perm,
initial_dag,
ci_tester,
adjacent_transposition_proposer,
scorer_bge,
num_steps=100,
burn=1,
progress=False,
verbose=False
) for i in range(num_meta_iterations)]
bge_fraction_correct = np.mean([stats_bge["fraction_correct"] for stats_bge in stats_bges])
print("BGe fraction correct: ", bge_fraction_correct)
scorer_ibge = MemoizedDecomposableScore(local_bayesian_regression_bge_score, suffstat)
stats_ibges = [collect_stats_mcmc(
d,
initial_perm,
initial_dag,
ci_tester,
adjacent_transposition_proposer,
scorer_ibge,
num_steps=100,
burn=1,
progress=False,
verbose=False
) for i in range(num_meta_iterations)]
ibge_fraction_correct = np.mean([stats_ibge["fraction_correct"] for stats_ibge in stats_ibges])
print("I-BGe fraction correct: ", ibge_fraction_correct)
|
{"hexsha": "8fadb49ae8fb0502e0933bce25a6132b0dfb9199", "size": 4789, "ext": "py", "lang": "Python", "max_stars_repo_path": "graphical_model_learning/samplers/minimal_imap_mcmc.py", "max_stars_repo_name": "uhlerlab/graphical_model_learning", "max_stars_repo_head_hexsha": "19a1885af073b35d1f9b16585482af30d4db7264", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-09-12T13:41:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T14:44:04.000Z", "max_issues_repo_path": "graphical_model_learning/samplers/minimal_imap_mcmc.py", "max_issues_repo_name": "uhlerlab/graphical_model_learning", "max_issues_repo_head_hexsha": "19a1885af073b35d1f9b16585482af30d4db7264", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graphical_model_learning/samplers/minimal_imap_mcmc.py", "max_forks_repo_name": "uhlerlab/graphical_model_learning", "max_forks_repo_head_hexsha": "19a1885af073b35d1f9b16585482af30d4db7264", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-02-03T04:05:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-12T13:41:26.000Z", "avg_line_length": 31.5065789474, "max_line_length": 119, "alphanum_fraction": 0.672165379, "include": true, "reason": "import numpy", "num_tokens": 1177}
|
import numpy as np
from openmdao.api import ExplicitComponent
class Shaft(ExplicitComponent):
"""Calculates power balance for shaft"""
def initialize(self):
self.options.declare('num_ports', default=2,
desc="number shaft connections to make")
def setup(self):
num_ports = self.options['num_ports']
self.add_input('Nmech', val = 1000.0, units="rpm")
self.add_input('HPX', val = 0.0, units='hp')
self.add_input('fracLoss', val = 0.0)
self.add_output('trq_in', val=1.0, units='ft*lbf')
self.add_output('trq_out', val=1.0, units='ft*lbf')
self.add_output('trq_net', val=1.0, units='ft*lbf')
self.add_output('pwr_in', val=1.0, units='hp')
self.add_output('pwr_in_real', val=1.0, units='hp')
self.add_output('pwr_out', val=1.0, units='hp')
self.add_output('pwr_out_real', val=1.0, units='hp')
self.add_output('pwr_net', val=1.0, units='hp')
HP_to_FT_LBF_per_SEC = 550
self.convert = 2. * np.pi / 60. / HP_to_FT_LBF_per_SEC
self.trq_vars = []
for i in range(num_ports):
trq_var_name = 'trq_{:d}'.format(i)
self.add_input(trq_var_name, val=0., units='ft*lbf')
self.trq_vars.append(trq_var_name)
self.declare_partials(['trq_in', 'trq_out', 'pwr_in', 'pwr_out'], trq_var_name)
self.declare_partials('trq_net', '*')
self.declare_partials('pwr_net', '*')
self.declare_partials(['pwr_in', 'pwr_out', 'pwr_in_real', 'pwr_out_real'], '*')
def compute(self, inputs, outputs):
fracLoss = inputs['fracLoss']
HPX = inputs['HPX']
Nmech = inputs['Nmech']
trq_in = 0
trq_out = 0
for trq_var in self.trq_vars:
trq = inputs[trq_var]
if trq >= 0:
trq_in += trq
else:
trq_out += trq
trq_net = trq_in * (1. - fracLoss) + trq_out - HPX / (Nmech * self.convert)
outputs['trq_net'] = trq_net
outputs['trq_in'] = trq_in
outputs['trq_out'] = trq_out
outputs['pwr_in'] = trq_in * Nmech * self.convert
outputs['pwr_out'] = trq_out * Nmech * self.convert
outputs['pwr_net'] = trq_net * Nmech * self.convert
outputs['pwr_in_real'] = trq_in * (1. - fracLoss) * Nmech * self.convert
outputs['pwr_out_real'] = trq_out * Nmech * self.convert - HPX
def compute_partials(self, inputs, J):
num_ports = self.options['num_ports']
PortTrqs = [inputs['trq_%d'%i] for i in range(num_ports)]
fracLoss = inputs['fracLoss']
HPX = inputs['HPX']
Nmech = inputs['Nmech']
trq_in = 0
trq_out = 0
for trq_var in self.trq_vars:
trq = inputs[trq_var]
if trq >= 0:
trq_in += trq
else:
trq_out += trq
J['trq_net', 'Nmech'] = HPX * Nmech ** (-2.) / self.convert
J['trq_net', 'HPX'] = -1. / (Nmech * self.convert)
J['trq_net', 'fracLoss'] = -trq_in
J['pwr_in', 'Nmech'] = trq_in * self.convert
J['pwr_out', 'Nmech'] = trq_out * self.convert
J['pwr_in_real', 'Nmech'] = trq_in * self.convert * (1 - fracLoss)
J['pwr_in_real', 'fracLoss'] = -trq_in * self.convert * Nmech
J['pwr_out_real', 'Nmech'] = trq_out * self.convert
J['pwr_out_real', 'HPX'] = -1
J['pwr_net', 'Nmech'] = trq_in * \
(1 - fracLoss) * self.convert + trq_out * self.convert
J['pwr_net', 'HPX'] = -1
J['pwr_net', 'fracLoss'] = -trq_in * Nmech * self.convert
for i in range(num_ports):
trq_var_name = 'trq_%d'%i
if PortTrqs[i] >= 0:
J['trq_in', trq_var_name]= 1.0
J['trq_out', trq_var_name]= 0.0
J['trq_net', trq_var_name]= 1 - fracLoss
J['pwr_in', trq_var_name]= Nmech * self.convert
J['pwr_out', trq_var_name] = 0.0
J['pwr_in_real', trq_var_name]= Nmech * self.convert * (1 - fracLoss)
J['pwr_out_real', trq_var_name] = 0.0
J['pwr_net', trq_var_name]= Nmech * \
self.convert * (1 - fracLoss)
elif PortTrqs[i] < 0:
J['trq_out', trq_var_name] = 1.0
J['trq_in', trq_var_name] = 0.0
J['trq_net', trq_var_name] = 1.0
J['pwr_in', trq_var_name]= 0.
J['pwr_out', trq_var_name] = Nmech * self.convert
J['pwr_in_real', trq_var_name]= 0.
J['pwr_out_real', trq_var_name] = Nmech * self.convert
J['pwr_net', trq_var_name] = Nmech * self.convert
if __name__ == "__main__":
from openmdao.api import Problem, Group
p = Problem()
p.model = Group()
p.model.add_subsystem("shaft", Shaft(10))
p.setup()
p.run_model()
#print(p['shaft.PortTrqs'])
|
{"hexsha": "676f0569aa7d452dd82e00dfe25022084cb3f0a5", "size": 5008, "ext": "py", "lang": "Python", "max_stars_repo_path": "pycycle/elements/shaft.py", "max_stars_repo_name": "naylor-b/pyCycle", "max_stars_repo_head_hexsha": "787743b39b17443631debb145a976b0ccdee43ab", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 38, "max_stars_repo_stars_event_min_datetime": "2019-08-12T15:27:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-27T16:34:51.000Z", "max_issues_repo_path": "pycycle/elements/shaft.py", "max_issues_repo_name": "naylor-b/pyCycle", "max_issues_repo_head_hexsha": "787743b39b17443631debb145a976b0ccdee43ab", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2019-11-07T17:39:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-01T14:59:48.000Z", "max_forks_repo_path": "pycycle/elements/shaft.py", "max_forks_repo_name": "naylor-b/pyCycle", "max_forks_repo_head_hexsha": "787743b39b17443631debb145a976b0ccdee43ab", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 35, "max_forks_repo_forks_event_min_datetime": "2019-08-12T15:27:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T16:25:33.000Z", "avg_line_length": 33.8378378378, "max_line_length": 91, "alphanum_fraction": 0.5439297125, "include": true, "reason": "import numpy", "num_tokens": 1573}
|
[STATEMENT]
lemma increasing_Bseq_subseq_iff:
assumes "\<And>x y. x \<le> y \<Longrightarrow> norm (f x :: 'a::real_normed_vector) \<le> norm (f y)" "strict_mono g"
shows "Bseq (\<lambda>x. f (g x)) \<longleftrightarrow> Bseq f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Bseq (\<lambda>x. f (g x)) = Bseq f
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. Bseq (\<lambda>x. f (g x)) \<Longrightarrow> Bseq f
2. Bseq f \<Longrightarrow> Bseq (\<lambda>x. f (g x))
[PROOF STEP]
assume "Bseq (\<lambda>x. f (g x))"
[PROOF STATE]
proof (state)
this:
Bseq (\<lambda>x. f (g x))
goal (2 subgoals):
1. Bseq (\<lambda>x. f (g x)) \<Longrightarrow> Bseq f
2. Bseq f \<Longrightarrow> Bseq (\<lambda>x. f (g x))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
Bseq (\<lambda>x. f (g x))
[PROOF STEP]
obtain K where K: "\<And>x. norm (f (g x)) \<le> K"
[PROOF STATE]
proof (prove)
using this:
Bseq (\<lambda>x. f (g x))
goal (1 subgoal):
1. (\<And>K. (\<And>x. norm (f (g x)) \<le> K) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding Bseq_def
[PROOF STATE]
proof (prove)
using this:
\<exists>K>0. \<forall>n. norm (f (g n)) \<le> K
goal (1 subgoal):
1. (\<And>K. (\<And>x. norm (f (g x)) \<le> K) \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
norm (f (g ?x)) \<le> K
goal (2 subgoals):
1. Bseq (\<lambda>x. f (g x)) \<Longrightarrow> Bseq f
2. Bseq f \<Longrightarrow> Bseq (\<lambda>x. f (g x))
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
norm (f (g ?x)) \<le> K
goal (2 subgoals):
1. Bseq (\<lambda>x. f (g x)) \<Longrightarrow> Bseq f
2. Bseq f \<Longrightarrow> Bseq (\<lambda>x. f (g x))
[PROOF STEP]
fix x :: nat
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. Bseq (\<lambda>x. f (g x)) \<Longrightarrow> Bseq f
2. Bseq f \<Longrightarrow> Bseq (\<lambda>x. f (g x))
[PROOF STEP]
from filterlim_subseq[OF assms(2)]
[PROOF STATE]
proof (chain)
picking this:
filterlim g sequentially sequentially
[PROOF STEP]
obtain y where "g y \<ge> x"
[PROOF STATE]
proof (prove)
using this:
filterlim g sequentially sequentially
goal (1 subgoal):
1. (\<And>y. x \<le> g y \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto simp: filterlim_at_top eventually_at_top_linorder)
[PROOF STATE]
proof (state)
this:
x \<le> g y
goal (2 subgoals):
1. Bseq (\<lambda>x. f (g x)) \<Longrightarrow> Bseq f
2. Bseq f \<Longrightarrow> Bseq (\<lambda>x. f (g x))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
x \<le> g y
[PROOF STEP]
have "norm (f x) \<le> norm (f (g y))"
[PROOF STATE]
proof (prove)
using this:
x \<le> g y
goal (1 subgoal):
1. norm (f x) \<le> norm (f (g y))
[PROOF STEP]
using assms(1)
[PROOF STATE]
proof (prove)
using this:
x \<le> g y
?x \<le> ?y \<Longrightarrow> norm (f ?x) \<le> norm (f ?y)
goal (1 subgoal):
1. norm (f x) \<le> norm (f (g y))
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
norm (f x) \<le> norm (f (g y))
goal (2 subgoals):
1. Bseq (\<lambda>x. f (g x)) \<Longrightarrow> Bseq f
2. Bseq f \<Longrightarrow> Bseq (\<lambda>x. f (g x))
[PROOF STEP]
also
[PROOF STATE]
proof (state)
this:
norm (f x) \<le> norm (f (g y))
goal (2 subgoals):
1. Bseq (\<lambda>x. f (g x)) \<Longrightarrow> Bseq f
2. Bseq f \<Longrightarrow> Bseq (\<lambda>x. f (g x))
[PROOF STEP]
have "norm (f (g y)) \<le> K"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. norm (f (g y)) \<le> K
[PROOF STEP]
by (rule K)
[PROOF STATE]
proof (state)
this:
norm (f (g y)) \<le> K
goal (2 subgoals):
1. Bseq (\<lambda>x. f (g x)) \<Longrightarrow> Bseq f
2. Bseq f \<Longrightarrow> Bseq (\<lambda>x. f (g x))
[PROOF STEP]
finally
[PROOF STATE]
proof (chain)
picking this:
norm (f x) \<le> K
[PROOF STEP]
have "norm (f x) \<le> K"
[PROOF STATE]
proof (prove)
using this:
norm (f x) \<le> K
goal (1 subgoal):
1. norm (f x) \<le> K
[PROOF STEP]
.
[PROOF STATE]
proof (state)
this:
norm (f x) \<le> K
goal (2 subgoals):
1. Bseq (\<lambda>x. f (g x)) \<Longrightarrow> Bseq f
2. Bseq f \<Longrightarrow> Bseq (\<lambda>x. f (g x))
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
norm (f ?x2) \<le> K
goal (2 subgoals):
1. Bseq (\<lambda>x. f (g x)) \<Longrightarrow> Bseq f
2. Bseq f \<Longrightarrow> Bseq (\<lambda>x. f (g x))
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
norm (f ?x2) \<le> K
[PROOF STEP]
show "Bseq f"
[PROOF STATE]
proof (prove)
using this:
norm (f ?x2) \<le> K
goal (1 subgoal):
1. Bseq f
[PROOF STEP]
by (rule BseqI')
[PROOF STATE]
proof (state)
this:
Bseq f
goal (1 subgoal):
1. Bseq f \<Longrightarrow> Bseq (\<lambda>x. f (g x))
[PROOF STEP]
qed (use Bseq_subseq[of f g] in simp_all)
|
{"llama_tokens": 2071, "file": null, "length": 26}
|
# Testing reference values and precisions
# Each test block of varr and parr should be followed by an append to refVals, refPrecs arrays.
# e.g.
# refVals=[]
# refPrecs=[]
#
# varr = ..........
# par = ..........
#
# append!(refVals ,[ varr ] )
# append!(refPrecs,[ parr ] )
#
# varr = ..........
# par = ..........
#
# append!(refVals ,[ varr ] )
# append!(refPrecs,[ parr ] )
#
# varr = ..........
# par = ..........
#
# append!(refVals ,[ varr ] )
# append!(refPrecs,[ parr ] )
#
# etc.....
#
# Now for real!
#
#! format: off
refVals=[]
refPrecs=[]
# SC ========== Test number 1 reference values and precision match template. =======
# SC ========== /Users/chrishill/projects/clima/cm/test/Ocean/HydrostaticBoussinesq/test_ocean_gyre.jl test reference values ======================================
# BEGIN SCPRINT
# varr - reference values (from reference run)
# parr - digits match precision (hand edit as needed)
#
# [
# [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ],
# [ : : : : : : ],
# ]
varr = [
[ "Q", "u[1]", -2.18427865357219835873e-02, 4.54905817704320050709e-02, 2.91799084468619303323e-03, 9.86739717788472255056e-03 ],
[ "Q", "u[2]", -6.47928098321387119229e-02, 7.44690631237251432495e-02, -1.82439830964300966215e-03, 1.02482752029024841434e-02 ],
[ "Q", :η, -6.35241759974374819997e-01, 6.25677877693153861038e-01, -8.60068718305024041901e-04, 2.24583755068219675932e-01 ],
[ "Q", :θ, 9.03726335555428482264e-05, 9.03968461560725344839e+00, 2.49953258705220227043e+00, 2.19711762918947606238e+00 ],
[ "s_aux", :y, 0.00000000000000000000e+00, 4.00000000000000046566e+06, 2.00000000000000000000e+06, 1.18025270281469495967e+06 ],
[ "s_aux", :w, -4.50036075461459223737e-05, 4.09915220804811021062e-05, 3.06722602595587297903e-07, 8.22999489798410955438e-06 ],
[ "s_aux", :pkin, -9.00231747934270076783e-01, 0.00000000000000000000e+00, -3.32080874523538072118e-01, 2.56218992025050162908e-01 ],
[ "s_aux", :wz0, -2.22688579102610652283e-05, 1.52895210358102938127e-05, -2.78589672590168561413e-08, 7.72520757253795920232e-06 ],
[ "s_gflux", "ν∇u[1]", -1.84915426791050691892e-03, 1.81823541139391206892e-03, 4.41670652876216560588e-08, 1.43003334026535461722e-04 ],
[ "s_gflux", "ν∇u[2]", -2.18923622988041743920e-03, 1.83543406028729679316e-03, 3.98744828991449433208e-06, 3.02040505665129290977e-04 ],
[ "s_gflux", "ν∇u[3]", -1.31369743560772895408e-05, 6.60039477945146449384e-06, -1.12119891567145960325e-08, 9.06617548358093396365e-07 ],
[ "s_gflux", "ν∇u[4]", -3.29092487463896604671e-03, 3.71582885889340866492e-03, -2.53728229769815408283e-06, 2.97060576409856780689e-04 ],
[ "s_gflux", "ν∇u[5]", -3.81669587050501284836e-03, 7.74606451320633592264e-04, -5.95763172118155189140e-06, 2.09071039491200258605e-04 ],
[ "s_gflux", "ν∇u[6]", -7.41189897881484784109e-06, 1.26149616846000803353e-05, 1.41878743800085061125e-08, 1.12070628757683659611e-06 ],
[ "s_gflux", "κ∇θ[1]", -5.86968584224644812338e-05, 4.34652887121820283387e-05, -2.82719382567508372835e-06, 8.15301391557321884090e-06 ],
[ "s_gflux", "κ∇θ[2]", -3.70308432284375849564e-05, 3.11721545144766831389e-03, 9.81447035540739714035e-04, 8.31872571479874375368e-04 ],
[ "s_gflux", "κ∇θ[3]", -2.15846132870457054810e-05, 2.00382267962942220408e-05, -5.17436031104969784121e-07, 1.79150072112032684306e-06 ],
]
parr = [
[ "Q", "u[1]", 12, 12, 12, 12 ],
[ "Q", "u[2]", 12, 12, 12, 12 ],
[ "Q", :η, 12, 12, 12, 12 ],
[ "Q", :θ, 12, 12, 12, 12 ],
[ "s_aux", :y, 12, 12, 12, 12 ],
[ "s_aux", :w, 12, 12, 12, 12 ],
[ "s_aux", :pkin, 12, 12, 12, 12 ],
[ "s_aux", :wz0, 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[1]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[2]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[3]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[4]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[5]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[6]", 12, 12, 12, 12 ],
[ "s_gflux", "κ∇θ[1]", 12, 12, 12, 12 ],
[ "s_gflux", "κ∇θ[2]", 12, 12, 12, 12 ],
[ "s_gflux", "κ∇θ[3]", 12, 12, 8, 12 ],
]
# END SCPRINT
append!(refVals ,[ varr ] )
append!(refPrecs,[ parr ] )
# SC ========== Test number 2 reference values and precision match template. =======
# SC ========== /Users/chrishill/projects/clima/cm/test/Ocean/HydrostaticBoussinesq/test_ocean_gyre.jl test reference values ======================================
# BEGIN SCPRINT
# varr - reference values (from reference run)
# parr - digits match precision (hand edit as needed)
#
# [
# [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ],
# [ : : : : : : ],
# ]
varr = [
[ "Q", "u[1]", -2.14403161060213384714e-02, 4.51092912722281316751e-02, 2.91883577365915263327e-03, 9.85543151135863208789e-03 ],
[ "Q", "u[2]", -6.47849908268208068973e-02, 7.44377079062340796245e-02, -1.82528856707940708402e-03, 1.02465626974943338490e-02 ],
[ "Q", :η, -6.35241717401733962944e-01, 6.25675643032199912952e-01, -8.59997171611655590248e-04, 2.24578663698960928619e-01 ],
[ "Q", :θ, 1.21624650983828366162e-04, 9.05348870526675320036e+00, 2.49953463844110279624e+00, 2.19723475068906504148e+00 ],
[ "s_aux", :y, 0.00000000000000000000e+00, 4.00000000000000046566e+06, 2.00000000000000000000e+06, 1.18025270281469495967e+06 ],
[ "s_aux", :w, -4.48750663401728297594e-05, 4.07425796079212936126e-05, 3.05580091823959372610e-07, 8.20754136359875149074e-06 ],
[ "s_aux", :pkin, -9.00231018468760080253e-01, 0.00000000000000000000e+00, -3.32080814803801249724e-01, 2.56218661345592568779e-01 ],
[ "s_aux", :wz0, -2.24168211854045644460e-05, 1.54464254869175437296e-05, -2.71999565492576589218e-08, 7.76164707621753630128e-06 ],
[ "s_gflux", "ν∇u[1]", -1.85093041460835305498e-03, 1.81880838666876047845e-03, 4.56526424711389789871e-08, 1.42643910766357428534e-04 ],
[ "s_gflux", "ν∇u[2]", -2.20819366719671563229e-03, 1.84128108384750657951e-03, 3.98074575776293245019e-06, 3.03797417171225480129e-04 ],
[ "s_gflux", "ν∇u[3]", -1.32036367846675870225e-05, 6.56099008285145167548e-06, -1.24087351933463812712e-08, 9.05500396408119435374e-07 ],
[ "s_gflux", "ν∇u[4]", -3.28419921150994737619e-03, 3.70704618050693088249e-03, -2.54827541969113706709e-06, 2.96228406753828749521e-04 ],
[ "s_gflux", "ν∇u[5]", -3.80879251696829247351e-03, 7.74834565383260352560e-04, -5.93513695165015881065e-06, 2.08479602570907525654e-04 ],
[ "s_gflux", "ν∇u[6]", -7.39839704966590930855e-06, 1.26017207509591005658e-05, 1.46129449482955510494e-08, 1.11733219690338497597e-06 ],
[ "s_gflux", "κ∇θ[1]", -5.81601257251870891614e-05, 4.70677319813339798652e-05, -2.82659431063543911260e-06, 8.16581551142350108366e-06 ],
[ "s_gflux", "κ∇θ[2]", -3.76891063975906055088e-05, 3.10874693302757654570e-03, 9.82036601802479310747e-04, 8.31189786431030404844e-04 ],
[ "s_gflux", "κ∇θ[3]", -2.42394160392341078493e-06, 8.68196014640797840416e-08, -5.04918136536487334668e-07, 3.01622429419160335704e-07 ],
]
parr = [
[ "Q", "u[1]", 12, 12, 12, 12 ],
[ "Q", "u[2]", 12, 12, 12, 12 ],
[ "Q", :η, 12, 12, 12, 12 ],
[ "Q", :θ, 12, 12, 12, 12 ],
[ "s_aux", :y, 12, 12, 12, 12 ],
[ "s_aux", :w, 12, 12, 12, 12 ],
[ "s_aux", :pkin, 12, 12, 12, 12 ],
[ "s_aux", :wz0, 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[1]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[2]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[3]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[4]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[5]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[6]", 12, 12, 12, 12 ],
[ "s_gflux", "κ∇θ[1]", 12, 12, 12, 12 ],
[ "s_gflux", "κ∇θ[2]", 12, 12, 12, 12 ],
[ "s_gflux", "κ∇θ[3]", 12, 8, 12, 12 ],
]
append!(refVals ,[ varr ] )
append!(refPrecs,[ parr ] )
# SC ========== Test number 3 reference values and precision match template. =======
# SC ========== /Users/chrishill/projects/clima/cm/test/Ocean/HydrostaticBoussinesq/test_ocean_gyre.jl test reference values ======================================
# BEGIN SCPRINT
# varr - reference values (from reference run)
# parr - digits match precision (hand edit as needed)
#
# [
# [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ],
# [ : : : : : : ],
# ]
varr = [
[ "Q", "u[1]", -1.32150247347356725180e-01, 1.64031203423611798842e-01, 4.86341857180362744512e-03, 1.48290239846339700486e-02 ],
[ "Q", "u[2]", -6.36612598877540902809e-02, 7.44560153894480020176e-02, -2.47624053318063700596e-03, 1.31303104153666864301e-02 ],
[ "Q", :η, -6.17793649659350796455e-01, 6.06966232332265209770e-01, -1.29090689600396902117e-03, 2.25023233970694097605e-01 ],
[ "Q", :θ, 8.59363830767245827184e-05, 9.03903412174035025828e+00, 2.49947426680983575409e+00, 2.19708020497952460914e+00 ],
[ "s_aux", :y, 0.00000000000000000000e+00, 4.00000000000000046566e+06, 2.00000000000000000000e+06, 1.18025270281469495967e+06 ],
[ "s_aux", :w, -4.88109548294750840891e-05, 5.07779930498253729947e-05, 3.71427081618204492464e-07, 9.36953994676888197318e-06 ],
[ "s_aux", :pkin, -9.00072683420330310611e-01, 0.00000000000000000000e+00, -3.32075420474812310623e-01, 2.56210240101286457204e-01 ],
[ "s_aux", :wz0, -2.37163738698016553002e-05, 1.61967572887668093411e-05, -4.64606200633942812947e-08, 7.86254275820167091446e-06 ],
[ "s_gflux", "ν∇u[1]", -6.67376825614485365279e-03, 6.97979211528434052153e-03, 1.01948640467980356077e-07, 2.29183131022463946721e-04 ],
[ "s_gflux", "ν∇u[2]", -7.69310192750550202179e-03, 1.37445482375157909845e-03, -3.35376028298812290740e-05, 5.28895496282001859913e-04 ],
[ "s_gflux", "ν∇u[3]", -1.70129300483239544585e-05, 1.30704917153533933284e-05, -4.30738980896587680255e-08, 1.59157632234111246367e-06 ],
[ "s_gflux", "ν∇u[4]", -4.05738110420349878948e-03, 4.20907825808012042018e-03, 2.59008171395293793600e-06, 4.12110908407640325318e-04 ],
[ "s_gflux", "ν∇u[5]", -4.15559862073769641783e-03, 1.11805092953295753984e-03, -7.36416840356856789053e-06, 2.39482266912490057102e-04 ],
[ "s_gflux", "ν∇u[6]", -7.43240009691175527648e-06, 2.23076045923041977150e-05, 2.75166886765055948798e-08, 1.43297925696224871009e-06 ],
[ "s_gflux", "κ∇θ[1]", -7.28126206584962977356e-05, 6.51042269918416155244e-05, -3.06828578003712164183e-06, 9.88453091533718813987e-06 ],
[ "s_gflux", "κ∇θ[2]", -5.64043404057886982382e-05, 3.11699982926543850761e-03, 9.81313644046314260266e-04, 8.32013174688638057486e-04 ],
[ "s_gflux", "κ∇θ[3]", -2.16059510376949300253e-05, 2.05358490375227276093e-05, -5.17225691641381943229e-07, 1.79077714232301003982e-06 ],
]
parr = [
[ "Q", "u[1]", 12, 12, 12, 12 ],
[ "Q", "u[2]", 12, 12, 12, 12 ],
[ "Q", :η, 12, 12, 12, 12 ],
[ "Q", :θ, 12, 12, 12, 12 ],
[ "s_aux", :y, 12, 12, 12, 12 ],
[ "s_aux", :w, 12, 12, 12, 12 ],
[ "s_aux", :pkin, 12, 12, 12, 12 ],
[ "s_aux", :wz0, 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[1]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[2]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[3]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[4]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[5]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[6]", 12, 12, 12, 12 ],
[ "s_gflux", "κ∇θ[1]", 12, 12, 12, 12 ],
[ "s_gflux", "κ∇θ[2]", 12, 12, 12, 12 ],
[ "s_gflux", "κ∇θ[3]", 12, 12, 12, 12 ],
]
# END SCPRINT
# SC ====================================================================================
append!(refVals ,[ varr ] )
append!(refPrecs,[ parr ] )
# SC ========== Test number 4 reference values and precision match template. =======
# SC ========== /Users/chrishill/projects/clima/cm/test/Ocean/HydrostaticBoussinesq/test_ocean_gyre.jl test reference values ======================================
# BEGIN SCPRINT
# varr - reference values (from reference run)
# parr - digits match precision (hand edit as needed)
#
# [
# [ MPIStateArray Name, Field Name, Maximum, Minimum, Mean, Standard Deviation ],
# [ : : : : : : ],
# ]
varr = [
[ "Q", "u[1]", -1.32522061560302495398e-01, 1.64403805704383537689e-01, 4.86426609935955867742e-03, 1.48297097518645653452e-02 ],
[ "Q", "u[2]", -6.36533081952946194759e-02, 7.44240286788056537581e-02, -2.47705914433662445495e-03, 1.31287947739917917195e-02 ],
[ "Q", :η, -6.17792146223299143415e-01, 6.06964210100974344009e-01, -1.29086897946112909297e-03, 2.25017541338622450997e-01 ],
[ "Q", :θ, 1.18930697909414831350e-04, 9.05193063653280205472e+00, 2.49947692790997599843e+00, 2.19719805926361999582e+00 ],
[ "s_aux", :y, 0.00000000000000000000e+00, 4.00000000000000046566e+06, 2.00000000000000000000e+06, 1.18025270281469495967e+06 ],
[ "s_aux", :w, -4.86355979078713029592e-05, 5.05698562144389179547e-05, 3.70131833873677610995e-07, 9.34929694591741612729e-06 ],
[ "s_aux", :pkin, -9.00073156073595503912e-01, 0.00000000000000000000e+00, -3.32075359710396145196e-01, 2.56209888633661342361e-01 ],
[ "s_aux", :wz0, -2.38160453952588820092e-05, 1.63295706147758099968e-05, -4.63837554347600732956e-08, 7.90783852649017554050e-06 ],
[ "s_gflux", "ν∇u[1]", -6.68560102683574044441e-03, 6.98986179365855032908e-03, 1.00393045025810862916e-07, 2.29127846391889087837e-04 ],
[ "s_gflux", "ν∇u[2]", -7.69995573676401749708e-03, 1.37839162179005680388e-03, -3.35981134949950310548e-05, 5.29965375899606985950e-04 ],
[ "s_gflux", "ν∇u[3]", -1.70723021047607414786e-05, 1.30922704334956021486e-05, -4.44618327893026796269e-08, 1.59336218473792970699e-06 ],
[ "s_gflux", "ν∇u[4]", -4.05946426934079233201e-03, 4.21052254828468888043e-03, 2.59778123025053437326e-06, 4.12726714325575502491e-04 ],
[ "s_gflux", "ν∇u[5]", -4.14814071152546075955e-03, 1.11726677539041160600e-03, -7.34353368996733004402e-06, 2.38907876999768631733e-04 ],
[ "s_gflux", "ν∇u[6]", -7.41889880459672764628e-06, 2.22738260107801488963e-05, 2.78343092988202756278e-08, 1.42972197860415487431e-06 ],
[ "s_gflux", "κ∇θ[1]", -7.25681212396385411870e-05, 7.16011471902109551376e-05, -3.07008530876072845326e-06, 9.89983434920985542473e-06 ],
[ "s_gflux", "κ∇θ[2]", -5.79706743991101401507e-05, 3.10853728745289804858e-03, 9.81905426072233198168e-04, 8.31329782146290126484e-04 ],
[ "s_gflux", "κ∇θ[3]", -3.24307611623586362264e-06, 1.48205642064209931587e-06, -5.05122832861326802479e-07, 3.05070684012206338078e-07 ],
]
parr = [
[ "Q", "u[1]", 12, 12, 12, 12 ],
[ "Q", "u[2]", 12, 12, 12, 12 ],
[ "Q", :η, 12, 12, 12, 12 ],
[ "Q", :θ, 12, 12, 12, 12 ],
[ "s_aux", :y, 12, 12, 12, 12 ],
[ "s_aux", :w, 12, 12, 12, 12 ],
[ "s_aux", :pkin, 12, 12, 12, 12 ],
[ "s_aux", :wz0, 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[1]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[2]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[3]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[4]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[5]", 12, 12, 12, 12 ],
[ "s_gflux", "ν∇u[6]", 12, 12, 12, 12 ],
[ "s_gflux", "κ∇θ[1]", 12, 12, 12, 12 ],
[ "s_gflux", "κ∇θ[2]", 12, 12, 12, 12 ],
[ "s_gflux", "κ∇θ[3]", 12, 12, 12, 12 ],
]
# END SCPRINT
# SC ====================================================================================
append!(refVals ,[ varr ] )
append!(refPrecs,[ parr ] )
#! format: on
|
{"hexsha": "d0ab567ce460722c90a3819637f6d60efda7f2d1", "size": 16700, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/Ocean/HydrostaticBoussinesq/test_ocean_gyre_refvals.jl", "max_stars_repo_name": "leios/CLIMA", "max_stars_repo_head_hexsha": "44c45eb762b8dc4c5af091079f2d65c024cb8d27", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/Ocean/HydrostaticBoussinesq/test_ocean_gyre_refvals.jl", "max_issues_repo_name": "leios/CLIMA", "max_issues_repo_head_hexsha": "44c45eb762b8dc4c5af091079f2d65c024cb8d27", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/Ocean/HydrostaticBoussinesq/test_ocean_gyre_refvals.jl", "max_forks_repo_name": "leios/CLIMA", "max_forks_repo_head_hexsha": "44c45eb762b8dc4c5af091079f2d65c024cb8d27", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-18T14:26:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-18T14:26:29.000Z", "avg_line_length": 67.6113360324, "max_line_length": 163, "alphanum_fraction": 0.5867065868, "num_tokens": 7595}
|
#!/usr/bin/env python
# Software License Agreement (MIT License)
#
# Copyright (c) 2020, tri_star
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Meiying Qin, Jake Brawer
import os
import re
import shutil
import glob
import json
import numpy as np
import rospy
from tri_star import transformation_util
DIRNAME_ARCHIVE = "archive_{}"
"""
manage files/dirs
"""
def get_filenames_in_dir(dir_path, ext=None):
if ext:
dir_path += "/*." + ext
else:
if not dir_path.endswith("/"):
dir_path += "/*"
return [os.path.basename(i) for i in glob.glob(dir_path)]
def get_dirnames_in_dir(dir_path):
all_content = get_all_in_dir(dir_path)
abs_dir = [i for i in all_content if os.path.isdir(i)]
return [os.path.basename(i) for i in abs_dir]
def get_all_in_dir(dir_path):
return [os.path.abspath(i) for i in glob.glob(dir_path + "/*")]
def archive(base_data_dir):
# if nothing to archive, then do not do anything
to_archive = False
for name in get_all_in_dir(base_data_dir):
if re.search(DIRNAME_ARCHIVE, name) is None: # find a file or folder that is not the archive
to_archive = True
break
if not to_archive:
return
archive_index = get_index_in_dir(base_data_dir, get_re_from_file_name(DIRNAME_ARCHIVE))
archive_dir_name = DIRNAME_ARCHIVE.format(archive_index)
archive_dir_path = os.path.join(base_data_dir, archive_dir_name)
create_dir(archive_dir_path)
contents = get_all_in_dir(base_data_dir)
for content in contents:
if re.search(get_re_from_file_name(DIRNAME_ARCHIVE), os.path.basename(content)) is None: # not one of the archives
shutil.move(content, archive_dir_path)
# the default is either a directory with a number as the name, like 1,2,3
# or a file with the name 1.txt, 2.subl, etc.
def get_index_in_dir(dir_path, file_name_template="^[0-9]+$|^[0-9]+\.\S+$"):
list_file_name = get_filenames_in_dir(dir_path)
used_index = []
for file_name in list_file_name:
if not re.search(file_name_template, file_name) is None: # find the files that matches with it
# get the number in the string
matched = re.findall("[0-9]+", file_name)[0]
number = str_to_int(matched)
if number is not None:
used_index.append(number)
if len(used_index) == 0:
return 1
return max(used_index) + 1
def get_re_from_file_name(filename):
return "^" + filename.replace("{}", "[0-9]+") + "$" # add v and $ to match the entire string
# create all the directories on the path
def create_dir(dir_path):
#https://stackoverflow.com/questions/273192/how-can-i-safely-create-a-nested-directory
try:
os.makedirs(dir_path)
except OSError:
if not os.path.isdir(dir_path):
raise Exception("{} is not a directory path".format(dir_path))
"""
i/o related
"""
# https://stackoverflow.com/questions/26646362/numpy-array-is-not-json-serializable
class ToolUseEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.bool) or isinstance(obj, np.bool_):
return bool(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, transformation_util.AngleGroup):
return obj.to_json()
return json.JSONEncoder.default(self, obj)
# variable type
#TYPE_STR = "string"
#TYPE_MATRIX = "matrix"
#TYPE_ARRAY = "array"
#TYPE_LIST = "list"
#TYPE_NESTED_LIST = "nested_list"
#TYPE_INT = "int"
#TYPE_FLOAT = "float"
TYPE_LIST = "list"
TYPE_NUMPY = "numpy"
TYPE_ANGLEGROUP = "anglegroup"
def str_to_int(string):
string = string.strip()
number = None
try:
if string == "None":
number = None
else:
number = int(string)
except ValueError as e:
pass
return number
def str_to_float(string):
string = string.strip()
number = None
try:
if string == "None":
number = None
else:
number = float(string)
except ValueError as e:
pass
return number
def str_to_npmatrix(string): # e.g., "[[1 2] [3 5]]"
string = string.strip()
if string == "None":
return None
value = None
current_list = None
lists = []
number_str = ""
for char in string:
if char == "[":
if value is None:
value = []
current_list = value
lists.append(current_list)
else:
new_list = []
current_list.append(new_list)
current_list = new_list
lists.append(current_list)
elif char == " ":
if number_str == "":
pass
else:
number = str_to_float(number_str)
current_list.append(number)
number_str = ""
elif char == "]":
if number_str != "":
number = str_to_float(number_str)
current_list.append(number)
number_str = ""
lists.pop()
if len(lists) != 0:
current_list = lists[-1]
else:
number_str += char
return np.array(value)
def str_to_nparray(string): # e.g., specifically numpy array with shape (n,), like [1 2 3]
value = string.strip()
value = value.replace("[", "")
value = value.replace("]", "")
value = value.split()
value = [str_to_float(i) for i in value]
return np.array(value)
def nparray_to_str(matrix):
value = str(matrix.tolist()).replace(",", " ")
value = value.replace("\n", "")
return value
def variable_to_string_no_name(variable, variable_collection_type=None):
content = ""
if variable_collection_type is None:
if isinstance(variable, np.ndarray):
content += nparray_to_str(variable) + "\n"
else:
variable = str(variable).replace("\n", "")
content += str(variable) + "\n"
elif variable_collection_type == TYPE_LIST:
content += str(len(variable)) + "\n"
for element in variable:
if isinstance(element, np.ndarray):
content += nparray_to_str(element) + "\n"
else:
element = str(element).replace("\n", "")
content += str(element) + "\n"
elif variable_collection_type == TYPE_NESTED_LIST: # 2 layers
content += str(len(variable)) + "\n"
for element in variable:
content += variable_to_string_no_name(element, TYPE_LIST)
return content
# for saving purposes
def variable_to_string(name, variable, variable_collection_type=None):
content = name + "\n"
content += variable_to_string_no_name(variable, variable_collection_type)
return content
# variable is a str
def convert_variable(variable, variable_type):
variable = variable.strip()
if variable == "None":
variable = None
elif variable_type == TYPE_STR:
variable = variable
elif variable_type == TYPE_INT:
variable = str_to_int(variable)
elif variable_type == TYPE_FLOAT:
variable = str_to_float(variable)
elif variable_type == TYPE_MATRIX:
variable = str_to_npmatrix(variable)
elif variable_type == TYPE_ARRAY:
variable = str_to_nparray(variable)
return variable
def read_variable(file_path, name, variable_type=None, variable_collection_type=None):
json_result = {}
with open(file_path, "r") as read_file:
json_result = json.load(read_file)
variable = json_result[name]
if variable_collection_type == TYPE_LIST:
if variable_type == TYPE_NUMPY:
for i in range(len(variable)):
variable[i] = np.array(variable[i])
else:
if variable_type == TYPE_NUMPY:
variable = np.array(variable)
elif variable_type == TYPE_ANGLEGROUP:
variable = transformation_util.AngleGroup.from_json(variable)
return variable
|
{"hexsha": "0d10c0a6b260728a85875fc9f67a82948de0b43f", "size": 9371, "ext": "py", "lang": "Python", "max_stars_repo_path": "tri_star/include/tri_star/file_util.py", "max_stars_repo_name": "ScazLab/Frontiers_Robot_Tool_Use", "max_stars_repo_head_hexsha": "ebace49e88562c18b3b967ec5360a4cec4f8fe56", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tri_star/include/tri_star/file_util.py", "max_issues_repo_name": "ScazLab/Frontiers_Robot_Tool_Use", "max_issues_repo_head_hexsha": "ebace49e88562c18b3b967ec5360a4cec4f8fe56", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tri_star/include/tri_star/file_util.py", "max_forks_repo_name": "ScazLab/Frontiers_Robot_Tool_Use", "max_forks_repo_head_hexsha": "ebace49e88562c18b3b967ec5360a4cec4f8fe56", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8741496599, "max_line_length": 122, "alphanum_fraction": 0.6312026465, "include": true, "reason": "import numpy", "num_tokens": 2100}
|
#ifndef MITAMA_PANIC_HPP
#define MITAMA_PANIC_HPP
#include <stdexcept>
#include <boost/format.hpp>
#include <variant>
#include <utility>
#include <string>
#include <string_view>
namespace mitama {
class macro_use_tag_t{};
inline static constexpr macro_use_tag_t macro_use{};
class runtime_panic : public std::runtime_error
{
public:
template <class... Args>
runtime_panic(boost::format fmt, Args &&... args) noexcept
: std::runtime_error((fmt % ... % args).str()) {}
template <class... Args>
explicit runtime_panic(macro_use_tag_t, const char *func, int line, std::string fmt, Args &&... args) noexcept
: std::runtime_error(
std::string{"runtime panicked at '"} + (boost::format(fmt) % ... % [](auto&& arg [[maybe_unused]]){
using namespace std::string_view_literals;
if constexpr (std::is_same_v<std::decay_t<decltype(arg)>, std::monostate>) {
return "()"sv;
}
else {
return std::forward<decltype(arg)>(arg);
}
}(args)).str() +
(boost::format("', %1%:%2%") % std::string{func} % line).str()) {}
};
}
#define PANIC(...) \
throw ::mitama::runtime_panic { ::mitama::macro_use, __FILE__, __LINE__, __VA_ARGS__ }
#endif
|
{"hexsha": "bfd7bb6126805349b97d793ffde72f1ab474a012", "size": 1284, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/mitama/panic.hpp", "max_stars_repo_name": "agate-pris/mitama-cpp-result", "max_stars_repo_head_hexsha": "9d94f3c9b5722892496ee7c63833fe5f12392b89", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/mitama/panic.hpp", "max_issues_repo_name": "agate-pris/mitama-cpp-result", "max_issues_repo_head_hexsha": "9d94f3c9b5722892496ee7c63833fe5f12392b89", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/mitama/panic.hpp", "max_forks_repo_name": "agate-pris/mitama-cpp-result", "max_forks_repo_head_hexsha": "9d94f3c9b5722892496ee7c63833fe5f12392b89", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1818181818, "max_line_length": 112, "alphanum_fraction": 0.6113707165, "num_tokens": 319}
|
# -*- coding: utf-8 -*-
"""
Created on sun Feb 16 14:39:30 2020
@author: simran kaur
"""
#importing libraries
import numpy as np
import pandas as pd
import sys
import datawig
def missing(data):
if data.shape[0]==0:
return print("empty dataset")
col_null=data.columns[data.isnull().any()]
data_out=pd.DataFrame(0,index=np.arange(len(data)),columns=col_null)
pstatement=[]
for nul_col in col_null:
cnull=data[nul_col].isnull()
cwnull=data[nul_col].notnull()
imputer=datawig.SimpleImputer(data.columns[data.columns!=nul_col],nul_col,'imputer_model')
imputer.fit(data[cwnull])
final=imputer.predict(data[cnull])
data_out[nul_col]=final[nul_col+'_imputed']
pstatement.append("number of missing values replaced in "+ str(nul_col) + " is "+ str(final.shape[0]))
data = data.fillna(data_out)
print("\n\n\n")
for i in pstatement:
print("\n",i)
return data
def main():
if len(sys.argv)!=2:
print("Incorrect parameters.Input format:python <programName> <InputDataFile> <OutputDataFile>")
exit(1)
else:
data=pd.read_csv(sys.argv[1])
missing(data).to_csv(sys.argv[1])
if __name__ == "__main__":
main()
|
{"hexsha": "45980d5debee01bd1d715b1b7da511c2a2840496", "size": 1258, "ext": "py", "lang": "Python", "max_stars_repo_path": "missing_values-101703547-simran_kaur/missing_values.py", "max_stars_repo_name": "simrankaur7575/missing_values-101703547-simran_kaur", "max_stars_repo_head_hexsha": "5d293a7ea8a6aa73e427f4008cf9dc6fa3d9a1df", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "missing_values-101703547-simran_kaur/missing_values.py", "max_issues_repo_name": "simrankaur7575/missing_values-101703547-simran_kaur", "max_issues_repo_head_hexsha": "5d293a7ea8a6aa73e427f4008cf9dc6fa3d9a1df", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "missing_values-101703547-simran_kaur/missing_values.py", "max_forks_repo_name": "simrankaur7575/missing_values-101703547-simran_kaur", "max_forks_repo_head_hexsha": "5d293a7ea8a6aa73e427f4008cf9dc6fa3d9a1df", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7659574468, "max_line_length": 110, "alphanum_fraction": 0.6430842607, "include": true, "reason": "import numpy", "num_tokens": 336}
|
import numpy as np
import cv2
import os
from matplotlib import pyplot as plt
from tqdm import tqdm
from features_palmoil import DS_aux
class PalmOilDataset(DS_aux):
def __init__(self, args,
label_code={'No_OilPalm':0,
'Has_OilPalm':1}):
super().__init__(args,label_code)
self.stats_calc = {
'r_energy' : self.get_glcm_metrics,
'r_correlation': self.get_glcm_metrics,
'r_contrast' : self.get_glcm_metrics,
'r_homogeneity': self.get_glcm_metrics,
'g_energy' : self.get_glcm_metrics,
'h_correlation': self.get_glcm_metrics,
's_correlation': self.get_glcm_metrics,
's_contrast' : self.get_glcm_metrics
}
def norm_features(self, train_set, val_set=[]):
for i in range(self.train[0].shape[0]): #number of features
feat_mean = np.mean(train_set[...,i])
feat_std = np.std(train_set[...,i])
train_set[...,i] = train_set[...,i] - feat_mean
train_set[...,i] = train_set[...,i] / feat_std
if not len(val_set) == 0:
val_set[...,i] = val_set[...,i] - feat_mean
val_set[...,i] = val_set[...,i] / feat_std
if not len(val_set) == 0:
return train_set, val_set
else:
return train_set
def calculate_features(self, img):
features = np.zeros((8))
img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
glcm_dict = {
'r':self.get_glcm(img[...,0]),
'g':self.get_glcm(img[...,1]),
'h':self.get_glcm(img_hsv[...,0]),
's':self.get_glcm(img_hsv[...,1])
}
for idx, (stat, calc_func) in enumerate(self.stats_calc.items()):
channel = stat[0]
channel_glcm = glcm_dict[channel]
feat_val = calc_func(stat[2::],glcm=channel_glcm)
features[idx] = feat_val
return features
def calc_set_feats(self, img_files, init_set_feats):
"""
Read images, calculate features, store it in array, return array
"""
for idx, img_file in enumerate(tqdm(img_files)):
img_path = os.path.join(self.imgs_dir,img_file)
img = plt.imread(img_path)
img_feats = self.calculate_features(img)
init_set_feats[idx] = img_feats
return init_set_feats
def generate_features(self, fold, gen_full_data = False):
fold_info = self.folds[fold]
init_train_feats = np.zeros((len(fold_info['train']),8)) # 8 features were chosen
init_val_feats = np.zeros((len(fold_info['val']),8))
print(f"Calculating train set features for {fold}")
init_train_feats = self.calc_set_feats(fold_info['train'], init_train_feats)
print(f"Calculating validation set features for {fold}")
init_val_feats = self.calc_set_feats(fold_info['val'], init_val_feats)
self.train = init_train_feats
self.val = init_val_feats
self.train_labels = fold_info['train_labels']
self.val_labels = fold_info['val_labels']
if gen_full_data:
self.full_data = np.concatenate((self.train,self.val),axis=0)
self.full_data = self.norm_features(self.full_data)
self.full_data_labels = np.concatenate((self.train_labels,self.val_labels),axis=0)
self.train, self.val = self.norm_features(self.train, self.val)
def gen_test_set(self):
init_test_feats = np.zeros((len(self.imgs_test),8)) #labels: self.test_labels
init_test_feats = self.calc_set_feats(self.imgs_test, init_test_feats)
self.test = init_test_feats
self.test = self.norm_features(self.test)
def calc_clss_weights(self):
weight_dict = {}
total_num_samples = len(self.train_labels)
for clss_lbl, clss_val in self.label_code.items():
clss_num_samples = len(np.where(self.train_labels == clss_val)[0])
weight_dict[clss_val] = 1 - (clss_num_samples/total_num_samples)
return weight_dict
|
{"hexsha": "8237b225a5ce58f604be02e55aa81e7cea737276", "size": 4334, "ext": "py", "lang": "Python", "max_stars_repo_path": "palm_oil_ds.py", "max_stars_repo_name": "MartimChaves/glcm_sat_img", "max_stars_repo_head_hexsha": "d56ddb41890f0e63840487ca71f070d62e23b698", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-01-31T17:27:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T03:21:41.000Z", "max_issues_repo_path": "palm_oil_ds.py", "max_issues_repo_name": "MartimChaves/glcm_sat_img", "max_issues_repo_head_hexsha": "d56ddb41890f0e63840487ca71f070d62e23b698", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "palm_oil_ds.py", "max_forks_repo_name": "MartimChaves/glcm_sat_img", "max_forks_repo_head_hexsha": "d56ddb41890f0e63840487ca71f070d62e23b698", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.6964285714, "max_line_length": 94, "alphanum_fraction": 0.5816797416, "include": true, "reason": "import numpy", "num_tokens": 1001}
|
#include "clstm.h"
#include <assert.h>
#include <iostream>
#include <vector>
#include <memory>
#include <math.h>
#include <Eigen/Dense>
#include <string>
#include <sstream>
#include <fstream>
#include <iostream>
#include "multidim.h"
#include "pymulti.h"
#include "extras.h"
using std_string = std::string;
#define string std_string
using std::vector;
using std::shared_ptr;
using std::make_shared;
using std::unique_ptr;
using std::to_string;
using std::make_pair;
using std::cout;
using std::stoi;
using namespace Eigen;
using namespace ocropus;
using namespace pymulti;
struct Image2Image : public ITrainable {
mdarray<float> input, d_input, output, d_output;
int idepth = -1;
int odepth = -1;
Network net;
void setLearningRate(Float lr, Float momentum) {
net->setLearningRate(lr, momentum);
}
};
inline int clip(int x, int hi) {
if (x > hi) return hi;
if (x < 0) return 0;
return x;
}
void batches_of_image_0(Sequence &batches, mdarray<float> &image, int depth,
int border = 0) {
int d0 = image.dim(0), d1 = image.dim(1), d2 = image.dim(2);
assert(depth == image.dim(2));
int batch_depth = (2 * border + 1) * d2;
batches.resize(d0);
for (int i = 0; i < d0; i++) {
batches[i].resize(batch_depth, d1);
for (int j = 0; j < d1; j++) {
int l = 0;
for (int r = -border; r <= border; r++) {
for (int k = 0; k < d2; k++)
batches[i](l++, j) = image(clip(i + r, d0), j, k);
}
assert(l == batch_depth);
}
}
}
void image_of_batches_0(mdarray<float> &image, Sequence &batches, int depth,
int border = 0) {
int d0 = batches.size();
int d1 = COLS(batches[0]);
int d2 = depth;
assert((2 * border + 1) * depth == ROWS(batches[0]));
image.resize(d0, d1, d2);
for (int i = 0; i < d0; i++) {
for (int j = 0; j < d1; j++) {
int l = 0;
for (int r = -border; r <= border; r++) {
for (int k = 0; k < d2; k++)
if (i + r >= 0 && i + r < d0) image(i + r, j, k) = batches[i](l++, j);
}
assert(l == ROWS(batches[i]));
}
}
}
struct Vstrips : public Image2Image {
int border = 0;
void initialize() {
this->odepth = iattr("noutput");
this->idepth = iattr("ninput");
net.reset(make_LSTM1());
net->init(odepth, (2 * border + 1) * idepth);
}
void forward() {
assert(input.rank() == 3);
assert(input.dim(2) == idepth);
batches_of_image_0(net->inputs, input, idepth, border);
// print("inputs", net->inputs.size(), ROWS(net->inputs[0]),
// COLS(net->inputs[0]), "/", net->ninput());
net->forward();
image_of_batches_0(output, net->outputs, odepth);
}
void backward() {
assert(d_output.rank() == 3);
assert(output.dim(2) == odepth);
batches_of_image_0(net->d_outputs, d_output, odepth);
net->backward();
image_of_batches_0(d_input, net->d_inputs, idepth, border);
}
void update() { net->update(); }
};
int main1(int argc, char **argv) {
try {
mdarray<float> input, output;
Sequence seq;
read_png(input, argv[1]);
print("input", input.dim(0), input.dim(1), input.dim(2));
print("irange", input.min(), input.max());
batches_of_image_0(seq, input, 3);
print("seq", seq.size(), ROWS(seq[0]), COLS(seq[0]));
image_of_batches_0(output, seq, 3);
write_png(argv[2], input);
} catch (const char *msg) {
print("ERROR:", msg);
}
}
int main2(int argc, char **argv) {
try {
shared_ptr<Vstrips> net;
net.reset(new Vstrips());
net->border = 0;
net->init(3, 3);
read_png(net->input, argv[1]);
net->forward();
write_png(argv[2], net->input);
} catch (const char *msg) {
print("ERROR:", msg);
}
}
int main3(int argc, char **argv) {
double lrate = getrenv("lrate", 1e-3);
double momentum = getrenv("momentum", 0.9);
vector<string> files;
string line;
std::ifstream stream(argv[1]);
while (getline(stream, line)) files.push_back(line);
try {
shared_ptr<Vstrips> net;
net.reset(new Vstrips());
net->border = 0;
net->init(3, 3);
net->setLearningRate(lrate, momentum);
for (int trial = 0; trial < 1000000; trial++) {
mdarray<float> image, out;
string file = files[trial % files.size()];
print(trial, file);
read_png(image, file.c_str());
print("image", image.dim(0), image.dim(1), image.dim(2));
net->input = image;
print("input", net->input.dim(0), net->input.dim(1), net->input.dim(2));
print("irange", net->input.min(), net->input.max());
net->forward();
print("output", net->output.dim(0), net->output.dim(1),
net->output.dim(2));
print("orange", net->output.min(), net->output.max());
out = net->output;
out.clip(0.0, 1.0);
write_png("temp.png", out);
net->d_output = image;
net->d_output -= net->output;
net->backward();
net->update();
}
} catch (const char *msg) {
print("ERROR:", msg);
}
}
double sqr(double x) { return x * x; }
double norm(Sequence &seq) {
double err = 0.0;
int dim = ROWS(seq[0]), bs = COLS(seq[0]);
for (int t = 0; t < seq.size(); t++)
for (int i = 0; i < dim; i++)
for (int j = 0; j < bs; j++) err += sqr(seq[t](i, j));
return err;
}
void threshold(Sequence &seq, double threshold) {
int dim = ROWS(seq[0]);
int bs = COLS(seq[0]);
for (int t = 0; t < seq.size(); t++)
for (int i = 0; i < dim; i++)
for (int j = 0; j < bs; j++) seq[t](i, j) = (seq[t](i, j) > threshold);
}
int main4(int argc, char **argv) {
double lrate = getrenv("lrate", 1e-4);
double momentum = getrenv("momentum", 0.9);
try {
Network net;
net.reset(make_LSTM1());
net->init(2, 10, 2);
net->setLearningRate(lrate, momentum);
for (int trial = 0; trial < 1000000; trial++) {
Sequence seq;
seq.resize(20);
for (int t = 0; t < seq.size(); t++) seq[t] = Mat::Random(2, 1);
threshold(seq, 0.5);
set_inputs(net.get(), seq);
net->forward();
set_targets(net.get(), seq);
if (trial % 100 == 0) print(trial, norm(net->d_outputs));
net->backward();
net->update();
if (trial % 10000 == 0) {
Sequence &out = net->outputs;
for (int t = 0; t < seq.size(); t++)
print(t, ":", seq[t](0, 0), seq[t](1, 0), " / ", out[t](0, 0),
out[t](1, 0));
}
}
} catch (const char *msg) {
print("ERROR:", msg);
}
}
int main5(int argc, char **argv) {
double lrate = getrenv("lrate", 1e-3);
double momentum = getrenv("momentum", 0.9);
try {
Network lstm;
lstm.reset(make_LSTM1());
lstm->init(2, 2, 2);
lstm->setLearningRate(lrate, momentum);
shared_ptr<Vstrips> net;
net.reset(new Vstrips());
net->border = 0;
net->net = lstm;
net->idepth = 2;
net->odepth = 2;
for (int trial = 0; trial < 1000000; trial++) {
mdarray<float> input, output;
Sequence seq;
seq.resize(20);
for (int t = 0; t < seq.size(); t++)
seq[t] = Mat::Random(2, 1).cwiseAbs();
threshold(seq, 0.5);
input.resize((int)seq.size(), (int)COLS(seq[0]), (int)ROWS(seq[0]));
for (int t = 0; t < input.dim(0); t++)
for (int i = 0; i < input.dim(1); i++)
for (int j = 0; j < input.dim(2); j++) input(t, i, j) = seq[t](j, i);
net->input = input;
net->forward();
output = net->output;
net->d_output = input;
net->d_output -= output;
if (trial % 10000 == 0) {
print("in", input.min(), input.max(), "out", output.min(), output.max(),
"err", net->d_output.norm());
for (int t = 0; t < input.dim(0); t++)
print(t, ":", input(t, 0, 0), input(t, 0, 1), " / ", output(t, 0, 0),
output(t, 0, 1));
}
net->backward();
net->update();
}
} catch (const char *msg) {
print("ERROR:", msg);
}
}
#ifndef MAIN
#define MAIN main4
#endif
int main(int argc, char **argv) { return MAIN(argc, argv); }
|
{"hexsha": "bc635c48a61aa2df00839ccd12f15868a4ad6f3d", "size": 8015, "ext": "cc", "lang": "C++", "max_stars_repo_path": "OLD/clstmimg.cc", "max_stars_repo_name": "gilteunchoi/clstm", "max_stars_repo_head_hexsha": "e87843c9f32345d899768d801a92871c210a8054", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 848.0, "max_stars_repo_stars_event_min_datetime": "2015-01-16T13:16:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T14:07:21.000Z", "max_issues_repo_path": "OLD/clstmimg.cc", "max_issues_repo_name": "gilteunchoi/clstm", "max_issues_repo_head_hexsha": "e87843c9f32345d899768d801a92871c210a8054", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 135.0, "max_issues_repo_issues_event_min_datetime": "2015-01-21T10:17:13.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-04T18:07:24.000Z", "max_forks_repo_path": "OLD/clstmimg.cc", "max_forks_repo_name": "gilteunchoi/clstm", "max_forks_repo_head_hexsha": "e87843c9f32345d899768d801a92871c210a8054", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 250.0, "max_forks_repo_forks_event_min_datetime": "2015-01-15T02:57:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-01T13:25:21.000Z", "avg_line_length": 28.7275985663, "max_line_length": 80, "alphanum_fraction": 0.5555832813, "num_tokens": 2565}
|
import numpy as np
import pandas as pd
import glob
from functools import reduce
def load_df(path : str):
ext = path.split(".")[-1]
try:
if ext == "csv":
df = pd.read_csv(path)
elif ext == "xlsx":
df = pd.read_excel(path, engine="openpyxl")
if "Unnamed: 0" in df:
df.drop("Unnamed: 0", axis=1, inplace=True)
return df
except:
print("File format not supported!")
def split_df(df, has_ts=False, ts_start=0):
sequence_start = abs(1-ts_start)
if has_ts:
xs = [df[col].dropna().to_numpy() for col in df.copy().iloc[:,ts_start::2]]
ys = [df[col].dropna().to_numpy() for col in df.copy().iloc[:,sequence_start::2]]
return xs, ys
else:
xs = [np.arange(len(df[col].dropna())) for col in df]
ys = [df[col].dropna().to_numpy() for col in df.copy()]
return xs, ys
def check_pairing(sequence):
is_pair_all = True
for ts, val in zip(sequence[0], sequence[1]):
is_pair = len(ts)==len(val)
if is_pair_all:
is_pair_all = is_pair_all
def interpolate(sequence, cols, align_max:bool=True, ts_name:str="Time[s]", align_length:int=1000):
max_xs = reduce(lambda a, b : a if len(a) > len(b) else b, sequence[0])
if align_max:
align_length = len(max_xs)
max_val = reduce(lambda a, b : np.max(a) if np.max(a) > np.max(b) else np.max(b), sequence[0])
min_val = reduce(lambda a, b : np.min(a) if np.min(a) < np.min(b) else np.min(b), sequence[0])
target_xs = np.linspace(min_val, max_val, num=align_length)
print(f"\ntarget timestamp : length {align_length}/ min {min_val}/ max {max_val}")
result = {key:np.interp(target_xs, sequence[0][i], sequence[1][i]) for i, key in enumerate(cols)}
result[ts_name] = target_xs
res_df = pd.DataFrame(data=result)
return res_df
|
{"hexsha": "e73cfd9ff45ea1b53cc5c3dfee1f19c207ffc746", "size": 1889, "ext": "py", "lang": "Python", "max_stars_repo_path": "util.py", "max_stars_repo_name": "bigbreadguy/Data_Sequence_Intepolator", "max_stars_repo_head_hexsha": "8381cd354d7f5b5424451672c9428971856d1579", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-13T09:14:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-13T09:14:07.000Z", "max_issues_repo_path": "util.py", "max_issues_repo_name": "bigbreadguy/Data_Sequence_Interpolator", "max_issues_repo_head_hexsha": "8381cd354d7f5b5424451672c9428971856d1579", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "util.py", "max_forks_repo_name": "bigbreadguy/Data_Sequence_Interpolator", "max_forks_repo_head_hexsha": "8381cd354d7f5b5424451672c9428971856d1579", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.78, "max_line_length": 101, "alphanum_fraction": 0.6071995765, "include": true, "reason": "import numpy", "num_tokens": 530}
|
import numpy as np
from zero_play.game_state import GameState
from zero_play.playout import Playout
from zero_play.tictactoe.state import TicTacToeState
class TakeOneTwiceGame(GameState):
""" Silly game for testing multiple moves in a turn.
The game starts with a numerical value, and each player makes two moves in
each turn, where a move is subtracting one or two from the value. Win if you
bring the value to zero.
"""
game_name = 'Take One Twice'
def __init__(self, value: int, move_count: int = 0):
super().__init__()
self.value = value
self.move_count = move_count
def __repr__(self):
return f'TakeOneTwiceGame({self.value}, {self.move_count})'
def __eq__(self, other) -> bool:
if not isinstance(other, TakeOneTwiceGame):
return False
return (self.value, self.move_count % 4) == (other.value,
other.move_count % 4)
def get_valid_moves(self) -> np.ndarray:
""" Possible moves are subtracting 1 and subtracting 2. """
return np.array([self.value >= 1, self.value >= 2], dtype=bool)
def display(self, show_coordinates: bool = False) -> str:
return f'{self.move_count}: {self.value}'
def display_move(self, move: int) -> str:
return str(move+1)
def get_move_count(self) -> int:
return self.move_count
def get_spaces(self) -> np.ndarray:
return np.ndarray([self.value, self.move_count+1])
def parse_move(self, text: str) -> int:
return int(text) - 1
def make_move(self, move: int) -> 'GameState':
return TakeOneTwiceGame(self.value-move-1, self.move_count+1)
def calculate_player(self, move_count: int) -> int:
if move_count % 4 < 2:
return self.X_PLAYER
return self.O_PLAYER
def is_win(self, player: int) -> bool:
if self.value > 0:
return False
previous_move = self.move_count - 1
previous_player = self.calculate_player(previous_move)
return player == previous_player
def get_active_player(self) -> int:
return self.calculate_player(self.move_count)
def test_simulate_finished_game():
start_board = TicTacToeState("""\
XXX
OO.
...
""")
expected_value = -1
playout = Playout()
value = playout.simulate(start_board)
assert value == expected_value
def test_simulate_finished_game_for_o_player():
start_board = TicTacToeState("""\
XX.
OOO
.X.
""")
expected_value = -1
playout = Playout()
value = playout.simulate(start_board)
assert value == expected_value
def test_simulate_wins():
np.random.seed(0)
start_board = TicTacToeState("""\
XOX
XO.
O..
""")
iteration_count = 100
expected_value_total = -iteration_count / 3
expected_low = expected_value_total * 1.1
expected_high = expected_value_total * 0.9
playout = Playout()
value_total = 0
for _ in range(iteration_count):
value = playout.simulate(start_board)
value_total += value
assert expected_low < value_total < expected_high
def test_simulate_wins_and_losses():
np.random.seed(0)
start_board = TicTacToeState("""\
XOX
XO.
..O
""")
iteration_count = 200
expected_value_total = iteration_count / 3
expected_low = expected_value_total * 0.9
expected_high = expected_value_total * 1.1
playout = Playout()
value_total = 0
for _ in range(iteration_count):
value = playout.simulate(start_board)
value_total += value
assert expected_low < value_total < expected_high
def test_two_moves_per_turn():
start_board = TakeOneTwiceGame(2)
playout = Playout()
iteration_count = 10
expected_value_total = 10
value_total = 0
for _ in range(iteration_count):
value = playout.simulate(start_board)
value_total += value
assert value_total == expected_value_total
def test_long_simulation():
start_state = TakeOneTwiceGame(1500)
playout = Playout()
value = playout.simulate(start_state)
assert value == 1
|
{"hexsha": "b8815e1fcaa8ae70039fd2945bc0d601ff43f710", "size": 4125, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_playout.py", "max_stars_repo_name": "donkirkby/zero-play", "max_stars_repo_head_hexsha": "15e3afa950037cfd1f373ee4943cd8b42d4c82c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-04-30T15:44:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-07T18:37:21.000Z", "max_issues_repo_path": "tests/test_playout.py", "max_issues_repo_name": "donkirkby/zero-play", "max_issues_repo_head_hexsha": "15e3afa950037cfd1f373ee4943cd8b42d4c82c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 84, "max_issues_repo_issues_event_min_datetime": "2019-05-07T04:37:10.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-04T18:17:57.000Z", "max_forks_repo_path": "tests/test_playout.py", "max_forks_repo_name": "donkirkby/zero-play", "max_forks_repo_head_hexsha": "15e3afa950037cfd1f373ee4943cd8b42d4c82c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-04-07T18:37:25.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-07T18:37:25.000Z", "avg_line_length": 25.78125, "max_line_length": 80, "alphanum_fraction": 0.6593939394, "include": true, "reason": "import numpy", "num_tokens": 1026}
|
import csv
import os
import numpy
#return: dict, key = uniq id
#val: dict, key = column name, val = val
#example: dict: {2066053: {'affiliation': 'KAIST', 'name': 'myname'}}
def load_single_file(input_file, limit_keys=None):
with open(input_file, 'r', encoding='utf-8') as read_file:
reader = csv.reader(read_file)
column = reader.__next__()
column = [x.strip().lower() for x in column]
id_table = dict()
for line in reader:
if(len(line) == 0):
continue
val = dict()
key = None
prep_key = []
continue_id = True
for (col, item) in zip(column, line):
if continue_id and ('id' in col) and not ('ids' in col):
if item.isdigit():
prep_key.append(int(item))
else:
prep_key.append(str(item))
continue
else:
continue_id = False
val[col] = item
if(len(prep_key) == 1):
key = prep_key[0]
else:
key = tuple(prep_key)
if limit_keys is not None:
if not key in limit_keys:
continue
id_table[key] = val
return id_table
def load_all(directory = '.', file_list = None):
return_data = dict()
if file_list == None:
file_list = ['Author', 'Conference', 'Journal', 'Paper', 'PaperAuthor', 'Test', 'Train', 'Valid', 'ValidSolution']
for name in file_list:
file_name = os.path.join(directory, '{}.csv'.format(name))
return_data[name] = load_single_file(file_name)
return return_data
def load_title_lda(total_data):
lda_title_keyword = load_single_file('keyword_table.csv')
word_topic_dict = dict()
with open('lda_output.txt', 'r', encoding='utf-8') as read_file:
for line in read_file.readlines():
splitted = line.split()
word = splitted[0]
topics = splitted[1:]
topic_sum = float(sum([int(x) for x in topics]))
value_list = [ (float(x)/topic_sum) for x in topics]
value_vector = numpy.array(value_list)
word_topic_dict[int(word)] = value_vector
total_data['title_keyword'] = lda_title_keyword
total_data['title_keyword_topic'] = word_topic_dict
|
{"hexsha": "ba3e13b7ce24c7a04b344f768b97e73a431ca4eb", "size": 1976, "ext": "py", "lang": "Python", "max_stars_repo_path": "loader.py", "max_stars_repo_name": "leeopop/2015-CS570-Project", "max_stars_repo_head_hexsha": "12cb0dd3e20d8a8861290a095ad64abd6f34d6f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "loader.py", "max_issues_repo_name": "leeopop/2015-CS570-Project", "max_issues_repo_head_hexsha": "12cb0dd3e20d8a8861290a095ad64abd6f34d6f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "loader.py", "max_forks_repo_name": "leeopop/2015-CS570-Project", "max_forks_repo_head_hexsha": "12cb0dd3e20d8a8861290a095ad64abd6f34d6f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0588235294, "max_line_length": 116, "alphanum_fraction": 0.6705465587, "include": true, "reason": "import numpy", "num_tokens": 546}
|
from torch.utils.data import Dataset
from external.vqa.vqa import VQA
import re
import os
# import skimage.io as io
from PIL import Image
import numpy as np
import collections
import pickle
import torch
def _get_majority_ans(answers):
answers = list(map(lambda x: x['answer'], answers))
counter = collections.Counter(answers)
majority = counter.most_common()[0][0]
return majority
def _build_question_dictionary(vqa, min_thre=0):
"""
:param vqa: VQA instance
:param min_thre: only words that occur more than this number of times will be put in vocab
:return: word-index dictionary
"""
counter = collections.defaultdict(int)
for i, q in vqa.qqa.items():
words = re.findall(r"[\w']+", q['question'])
for word in words:
counter[word] += 1
question_dict = {}
indx = 0
for word, num in counter.items():
if num > min_thre:
question_dict[word] = indx
indx += 1
return question_dict
def _build_answer_dictionary(vqa, min_thre=0):
"""
:param vqa: VQA instance
:param min_thre: minimal times for an answer appearing in the dataset
:return: answer sequence - index dictionary
"""
counter = collections.defaultdict(int)
for ques_idx in vqa.getQuesIds():
answers = vqa.qa[ques_idx]['answers']
answer = _get_majority_ans(answers)
counter[answer] += 1
ans_dict = {}
indx = 0
for ans, num in counter.items():
if num > min_thre:
ans_dict[ans] = indx
indx += 1
return ans_dict
def _encode_question(sentence, dictionary, max_question_length=26):
"""
:param sentence: question sentence
:param dictionary: word - index dictionary
:param max_question_length: max length of a caption, in number of words. captions longer than this get clipped
:return: M x N one-hot torch tensor (M is the max number of words; N is the number of vocabularies)
"""
words = re.findall(r"[\w']+", sentence)
encode = torch.zeros((max_question_length, len(dictionary))).type(torch.FloatTensor)
for i, word in enumerate(words):
if i >= max_question_length:
break
if word in dictionary.keys():
encode[i, dictionary[word]] = 1
return encode
def _encode_answer(sentence, dictionary):
"""
:param sentence: answer sentence
:param: answer - index dictionary
:return: indices
"""
# encode = torch.zeros((len(dictionary) + 1)).type(torch.LongTensor)
idx = len(dictionary)
if sentence in dictionary.keys():
idx = dictionary[sentence]
return idx
class VqaDataset(Dataset):
"""
Load the VQA dataset using the VQA python API. We provide the necessary subset in the External folder, but you may
want to reference the full repo (https://github.com/GT-Vision-Lab/VQA) for usage examples.
"""
def __init__(self, image_dir, question_json_file_path, annotation_json_file_path, image_filename_pattern,
is_training=True, transform=None):
"""
Args:
image_dir (string): Path to the directory with COCO images
question_json_file_path (string): Path to the json file containing the question data
annotation_json_file_path (string): Path to the json file containing the annotations mapping images, questions, and
answers together
image_filename_pattern (string): The pattern the filenames of images in this dataset use (eg "COCO_train2014_{}.jpg")
"""
self.vqa = VQA(annotation_json_file_path, question_json_file_path)
self.ques_idx_list = self.vqa.getQuesIds()
self.image_dir = image_dir
self.image_filename_pattern = image_filename_pattern
if os.path.exists('ques_dictionary.pkl'):
with open('ques_dictionary.pkl', 'rb') as f:
self.dictionary = pickle.load(f)
else:
if is_training:
self.dictionary = _build_question_dictionary(self.vqa)
with open('ques_dictionary.pkl', 'wb') as f:
pickle.dump(self.dictionary, f)
else:
raise "No dictionary built from training dataset!"
if os.path.exists('ans_dictionary.pkl'):
with open('ans_dictionary.pkl', 'rb') as f:
self.answers = pickle.load(f)
else:
if is_training:
self.answers = _build_answer_dictionary(self.vqa)
with open('ans_dictionary.pkl', 'wb') as f:
pickle.dump(self.answers, f)
else:
raise "No answer list built from training dataset!"
# print(self.dictionary)
# print(self.answers)
self.image_transform = transform
def __len__(self):
return len(self.ques_idx_list)
def __getitem__(self, idx):
ques_idx = self.ques_idx_list[idx]
ann = self.vqa.loadQA(ques_idx)[0]
image_id = ann['image_id']
image_name = self.image_filename_pattern.format(str(image_id).zfill(12))
image_path = os.path.join(self.image_dir, image_name)
if os.path.splitext(image_path)[1] == '.npy':
image = np.load(image_path).T
else:
image = Image.open(image_path).convert('RGB')
image = self.image_transform(image)
question = self.vqa.qqa[ques_idx]['question']
answers = ann['answers']
best_answer = _get_majority_ans(answers)
return {
'image': image,
'image_path': image_name,
'question': question,
'answer': best_answer,
'question_encoding': _encode_question(question, self.dictionary),
'answer_encoding': _encode_answer(best_answer, self.answers),
}
|
{"hexsha": "4e85733979a2e5d5e62e38b00f2a8a6898c53cd2", "size": 5879, "ext": "py", "lang": "Python", "max_stars_repo_path": "student_code/vqa_dataset.py", "max_stars_repo_name": "Jmq14/VQA", "max_stars_repo_head_hexsha": "109a426eba8384c8e624f263ff6f52591dfc9153", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-04-26T13:13:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-25T04:02:37.000Z", "max_issues_repo_path": "student_code/vqa_dataset.py", "max_issues_repo_name": "Jmq14/VQA", "max_issues_repo_head_hexsha": "109a426eba8384c8e624f263ff6f52591dfc9153", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "student_code/vqa_dataset.py", "max_forks_repo_name": "Jmq14/VQA", "max_forks_repo_head_hexsha": "109a426eba8384c8e624f263ff6f52591dfc9153", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-08T13:03:09.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-08T13:03:09.000Z", "avg_line_length": 35.4156626506, "max_line_length": 129, "alphanum_fraction": 0.6285082497, "include": true, "reason": "import numpy", "num_tokens": 1318}
|
Describe RyanJoseph here.
OK, welllll...he lives in an Fountain Circle apartment with 3 other dudes and a Users/YawenChen girl.
|
{"hexsha": "971edf23b10d9a800d4e07a2309b522b7415baae", "size": 128, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/RyanJoseph.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/RyanJoseph.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/RyanJoseph.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.6666666667, "max_line_length": 101, "alphanum_fraction": 0.796875, "num_tokens": 33}
|
# This is a list of utility functions for dealing with image data in napari.
# todo: As those are not clEsperanto-specific, we may want to split them out
# and ship a separate package
import numpy as np
from napari import Viewer
from typing_extensions import Annotated
import napari
from napari.layers import Image, Labels, Layer
from napari_tools_menu import register_function, register_action
LayerInput = Annotated[Layer, {"label": "Image"}]
@register_function(menu="Utilities > Convert to Numpy")
def convert_to_numpy(layer : LayerInput) -> Layer:
if isinstance(layer, Labels):
return Labels(np.asarray(layer.data), name="np " + layer.name)
else:
return Image(np.asarray(layer.data), name="np " + layer.name)
def convert_image_to_labels(layer : Image) -> Layer:
return Labels(np.asarray(layer.data).astype(int), name="Labels " + layer.name)
def convert_labels_to_image(layer : Labels) -> Layer:
return Image(np.asarray(layer.data), name="Image " + layer.name)
@register_function(menu="Utilities > Convert 3D stack to 2D timelapse")
def convert_to_2d_timelapse(layer : LayerInput) -> Layer:
if isinstance(layer, Labels):
return Labels(np.expand_dims(layer.data, axis=1), name="2d+t " + layer.name)
else:
return Image(np.expand_dims(layer.data, axis=1), name="2d+t " + layer.name)
def make_labels_editable(labels : Labels) -> Labels:
return Labels(np.asarray(labels.data), name="np " + labels.name)
def reset_brightness_contrast(image: Image):
import pyclesperanto_prototype as cle
data = image.data
if "dask" in str(type(data)): # ugh
data = np.asarray(data)
image.contrast_limits = (data.min(), data.max())
def auto_brightness_contrast(image: Image, lower_percentile : float = 1, upper_percentile : float = 99):
data = np.asarray(image.data)
lp = np.percentile(data, lower_percentile)
up = np.percentile(data, upper_percentile)
image.contrast_limits = (lp, up)
@register_action(menu="Visualization > Reset Brightness / contrast (to min / max) on all selected image layers")
def reset_brightness_contrast_selected_image_layers(viewer):
for layer in viewer.layers.selection:
if isinstance(layer, napari.layers.Image):
reset_brightness_contrast(layer)
@register_action(menu="Visualization > Auto Brightness / contrast (1% .. 99% percentile) on all selected image layers")
def auto_brightness_contrast_selected_image_layers(viewer):
for layer in viewer.layers.selection:
if isinstance(layer, napari.layers.Image):
auto_brightness_contrast(layer, lower_percentile=1, upper_percentile=99)
def auto_brightness_contrast_all_images(napari_viewer : Viewer, lower_percentile : float = 1, upper_percentile : float = 99):
for layer in napari_viewer.layers:
if isinstance(layer, Image):
data = np.asarray(layer.data)
lp = np.percentile(data, lower_percentile)
up = np.percentile(data, upper_percentile)
layer.contrast_limits = (lp, up)
napari_viewer.window.remove_dock_widget(auto_brightness_contrast_all_images.native)
def split_stack(image : Image, napari_viewer : Viewer, axis : int = 0):
data = np.asarray(image.data)
for i in range(data.shape[axis]):
napari_viewer.add_image(data.take(i, axis), name=image.name + "[" + str(i) + "]")
napari_viewer.window.remove_dock_widget(split_stack.native)
@register_function(menu="Utilities > Set voxel size")
def set_voxel_size(image : LayerInput, voxel_width : float = 1, voxel_height : float = 1, voxel_depth : float = 1):
image.scale = [voxel_depth, voxel_height, voxel_width]
@register_function(menu="Utilities > Set voxel size of all layers")
def set_voxel_size_of_all_layers(napari_viewer : Viewer, voxel_width : float = 1, voxel_height : float = 1, voxel_depth : float = 1):
for layer in napari_viewer.layers:
layer.scale = [voxel_depth, voxel_height, voxel_width]
napari_viewer.window.remove_dock_widget(set_voxel_size_of_all_layers.native)
|
{"hexsha": "0297aea537f7173b341ae74e72865255802067b4", "size": 4062, "ext": "py", "lang": "Python", "max_stars_repo_path": "napari_pyclesperanto_assistant/_convert_to_numpy.py", "max_stars_repo_name": "kevinyamauchi/napari_pyclesperanto_assistant", "max_stars_repo_head_hexsha": "b068b1d89ee21c4448ab6a99c9fb2faabb127456", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "napari_pyclesperanto_assistant/_convert_to_numpy.py", "max_issues_repo_name": "kevinyamauchi/napari_pyclesperanto_assistant", "max_issues_repo_head_hexsha": "b068b1d89ee21c4448ab6a99c9fb2faabb127456", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "napari_pyclesperanto_assistant/_convert_to_numpy.py", "max_forks_repo_name": "kevinyamauchi/napari_pyclesperanto_assistant", "max_forks_repo_head_hexsha": "b068b1d89ee21c4448ab6a99c9fb2faabb127456", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.6774193548, "max_line_length": 133, "alphanum_fraction": 0.7277203348, "include": true, "reason": "import numpy", "num_tokens": 952}
|
\section{Dataset description}
\subsection{Email example (Malware detection)}
The second scenario evaluated in this article is related to the detection and analysis of malicious emails and therefore the detection of compromised user accounts. We assume having multiple emails, a spam classifier, and a display showing the graph of emails. We used ForceAtlas2, a Continuous Graph Layout Algorithm \cite{Jacomy2014} to get the locations of emails in the display (See Figure \ref{Fig:EnronData}).
We use the Enron email dataset, which has been made public by Cohen \cite{Klimt2004}, and for which Shetty and Adibi \cite{Shetty2004} performed a set of
cleansing tasks, mainly by removing a large number of duplicate emails. The final Enron dataset contains 252,759 emails sent from 17,527 users\footnote{http://www.ahschulz.de/enron-email-data/}.
\subsection{Reddit example (Detection of illegal transactions)}
The third scenario is related to the detection and monitoring of discussions related to deals of illegal merchandise on SilkRoad via Reddit. Here, we assume again having a monitoring agent, a classifier to classify discussions as being related to illegal transactions or not, and a display showing all discussions (we also use the ForceAtlas2 algorithm as shown in Figure \ref{Fig:RedditData}).
The Reddit dataset we used was crawled between October 2013 and January 2014, and contains 98,777 discussions.
%We provide more detailed statistics about the datasets described in
%Table~\ref{table:featureStatistics}.
We assume that for each of the considered scenario, we have a third party application-specific tool that predicts the probability score $S(i)$ that each information element is relevant in the context of that scenario, i.e., an email being malicious, a tweet being related to a natural disaster, or a discussion being related to an illegal transaction. This probability scoring function is task-specific and assumed to be given.
For relevance prediction in our experiments, we use a noisy corruption of ground truth in order to synthetically evaluate a range of scenarios in terms of predicted relevance noise level.
We also explicitly vary the number of relevant information elements in our ground truth to assess performance variation as a function of class imbalance. Hence, to setup a scenario with 20\% of relevant elements, we randomly select 20\% of the dataset and we assign a label value $l=1$ for each element in that set, and $l=0$ for each element out of that set. Then, for each element $j$, we assign the probability $S(j)$ of that element being relevant for scenario by introducing a random noise signal ratio as follows:
\begin{equation}
S(i) = \lambda*l+(1-\lambda)*r \; ,
\end{equation}
%we propose to simply randomly select an element $j$,
%To simulate the
%Finally, we propose to compute a global element score value $S(i)$ by introducing a noise signal ratio as follows:
where $r$ is a random noise value chosen with uniform distribution in the range $[0,1]$, and $\lambda$ is a weighting parameter that satisfies $0.5 \leq \lambda \leq 1$, which controls the signal-to-noise ratio in the final probability value. Note that for $\lambda=1$, $S(j)$ gives the ground truth probability value (perfect classifier), whereas for $\lambda=0.5$, $S(j)$ returns a complete random probability value (random classifier).
For each dataset and given proportion of positive examples, the evaluation was carried out by averaging over 10 independent runs that each select random relevant documents according to the designated proportion. We report the average ground truth F1-Score (i.e., ground truth is known in the experimental setting).
\begin{figure}[H]
\begin{centering}
\includegraphics[width=8.5cm]{imgs/legend1}
\par\end{centering}
\begin{centering}
\subfigure[$\lambda=0.6$.]{\includegraphics[width=2.9cm]{imgs/Enron_results/f1_performance_posrate_2\lyxdot 0_0\lyxdot 6}}\subfigure[$\lambda=0.9$.]{\includegraphics[width=2.9cm]{imgs/Enron_results/f1_performance_posrate_2\lyxdot 0_0\lyxdot 9}}\subfigure[$\lambda=1$.]{\includegraphics[width=2.9cm]{imgs/Enron_results/f1_performance_posrate_2\lyxdot 0_1\lyxdot 0}}
\par\end{centering}
\caption{Performance on Enron with 2\% of positive data.}
\label{fig:F1_vs_Data_Enron}
\end{figure}
\begin{figure}[H]
\begin{centering}
\includegraphics[width=8.5cm]{imgs/legend1}
\par\end{centering}
\begin{centering}
\subfigure[$\lambda=0.6$.]{\includegraphics[width=2.9cm]{imgs/Reddit_results/f1_performance_posrate_2\lyxdot 0_0\lyxdot 6}}\subfigure[$\lambda=0.9$.]{\includegraphics[width=2.9cm]{imgs/Reddit_results/f1_performance_posrate_2\lyxdot 0_0\lyxdot 9}}\subfigure[$\lambda=1$.]{\includegraphics[width=2.9cm]{imgs/Reddit_results/f1_performance_posrate_2\lyxdot 0_1\lyxdot 0}}
\par\end{centering}
\caption{Performance on Reddit with 2\% of positive data.}
\label{fig:F1_vs_Data_Reddit}
\end{figure}
\begin{figure}[H]
\begin{centering}
\includegraphics[width=8.5cm]{imgs/legend1}
\par\end{centering}
\begin{centering}
\subfigure[Enron dataset.]{\includegraphics[width=2.9cm]{imgs/Enron_results/f1_performance_posrate_10\lyxdot 0_Data_150}}\subfigure[Reddit dataset.]{\includegraphics[width=2.9cm]{imgs/Reddit_results/f1_performance_posrate_10\lyxdot 0_Data_150}}
\par\end{centering}
\caption{Performance: 10.0\% of positive data and \#data=150.}
\label{fig:F1_vs_Lambda}
\end{figure}
\begin{figure}[H]
\begin{centering}
\includegraphics[width=8.5cm]{imgs/legend1}
\par\end{centering}
\begin{centering}
\subfigure[$\lambda=0.6$.]{\includegraphics[width=4.2cm]{imgs/Enron_results/f1_performance_lambda_0\lyxdot 6_Data_150}}\subfigure[$\lambda=0.9$.]{\includegraphics[width=4.2cm]{imgs/Enron_results/f1_performance_lambda_0\lyxdot 9_Data_150}}
\par\end{centering}
\caption{Performance on Enron dataset with \#data=150.}
\label{fig:F1_vs_Pos_Enron}
\end{figure}
\begin{figure}[H]
\begin{centering}
\includegraphics[width=8.5cm]{imgs/legend1}
\par\end{centering}
\begin{centering}
\subfigure[$\lambda=0.6$.]{\includegraphics[width=4.25cm]{imgs/Reddit_results/f1_performance_lambda_0\lyxdot 6_Data_150}}\subfigure[$\lambda=0.9$.]{\includegraphics[width=4.25cm]{imgs/Reddit_results/f1_performance_lambda_0\lyxdot 9_Data_150}}
\par\end{centering}
\caption{Performance on Reddit dataset with \#data=150.}
\label{fig:F1_vs_Pos_Reddit}
\end{figure}
\begin{figure}[H]
\begin{centering}
\includegraphics[width=8.5cm]{imgs/legend2}
\par\end{centering}
\begin{centering}
\subfigure[Enron dataset.]{\includegraphics[width=2.9cm]{imgs/Enron_results/time_posrate_2\lyxdot 0_0\lyxdot 9}}\subfigure[Reddit dataset.]{\includegraphics[width=2.9cm]{imgs/Reddit_results/time_posrate_2\lyxdot 0_0\lyxdot 9}}
\par\end{centering}
\caption{Time complexity: 2\% positive data and $\lambda=0.9$.}
\label{fig:Time_vs_Data}
\end{figure}
\begin{figure}[H]
\begin{centering}
\includegraphics[width=8.5cm]{imgs/legend2}
\par\end{centering}
\begin{centering}
\subfigure[Enron dataset.]{\includegraphics[width=2.9cm]{imgs/Enron_results/time_posrate_2\lyxdot 0_Data_150}}\subfigure[Reddit dataset.]{\includegraphics[width=2.9cm]{imgs/Reddit_results/time_posrate_2\lyxdot 0_Data_150}}
\par\end{centering}
\caption{Time complexity: 2\% positive data and \#data = 150.}
\label{fig:Time_vs_Lambda}
\end{figure}
\begin{figure}[H]
\begin{centering}
\includegraphics[width=8.5cm]{imgs/legend2}
\par\end{centering}
\begin{centering}
\subfigure[$\lambda=0.6$.]{\includegraphics[width=2.9cm]{imgs/Enron_results/time_lambda_0\lyxdot 6_Data_150}}\subfigure[$\lambda=0.8$.]{\includegraphics[width=2.9cm]{imgs/Enron_results/time_lambda_0\lyxdot 8_Data_150}}\subfigure[$\lambda=1$.]{\includegraphics[width=2.9cm]{imgs/Enron_results/time_lambda_1\lyxdot 0_Data_150}}
\par\end{centering}
\caption{Time complexity on Enron with \#data=150.}
\label{fig:Time_vs_Pos_Enron}
\end{figure}
\begin{figure}[H]
\begin{centering}
\includegraphics[width=8.5cm]{imgs/legend2}
\par\end{centering}
\begin{centering}
\subfigure[$\lambda=0.6$.]{\includegraphics[width=2.9cm]{imgs/Reddit_results/time_lambda_0\lyxdot 6_Data_150}}\subfigure[$\lambda=0.8$.]{\includegraphics[width=2.9cm]{imgs/Reddit_results/time_lambda_0\lyxdot 8_Data_150}}\subfigure[$\lambda=1$.]{\includegraphics[width=2.9cm]{imgs/Reddit_results/time_lambda_1\lyxdot 0_Data_150}}
\par\end{centering}
\caption{Time complexity on Reddit with \#data=150.}
\label{fig:Time_vs_Pos_Reddit}
\end{figure}
|
{"hexsha": "6f177d3bb9dbfde4bbc343d0d529191c138fb923", "size": 8399, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Documents/CHIIR2019/Appendix.tex", "max_stars_repo_name": "D3Mlab/visir", "max_stars_repo_head_hexsha": "cd1860984dee8d7aba368857e734ad11c14124c8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-10T07:40:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-10T07:40:04.000Z", "max_issues_repo_path": "Documents/CHIIR2019/Appendix.tex", "max_issues_repo_name": "D3Mlab/viz-ir", "max_issues_repo_head_hexsha": "cd1860984dee8d7aba368857e734ad11c14124c8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Documents/CHIIR2019/Appendix.tex", "max_forks_repo_name": "D3Mlab/viz-ir", "max_forks_repo_head_hexsha": "cd1860984dee8d7aba368857e734ad11c14124c8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 64.6076923077, "max_line_length": 520, "alphanum_fraction": 0.7889034409, "num_tokens": 2532}
|
import json
import numpy as np
import re
from collections import defaultdict as dd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn import svm
#####################preprocessing###################
fp = open('devfile.json')
data = []
target = []
for line in fp:
instance = json.loads(line)
target.append(instance['lang'])
## data.append(instance['text'])
if 'location' in instance.keys():
data.append(instance['text']+instance['location'])
else:
data.append(instance['text']+'unknown')
fp1 = open('testfile.json')
for line in fp1:
instance = json.loads(line)
target.append(instance['lang'])
## data.append(instance['text'])
if 'location' in instance.keys():
data.append(instance['text']+instance['location'])
else:
data.append(instance['text']+'unknown')
categories = {'ar': 1,
'bg': 2,
'de': 3,
'en': 4,
'es': 5,
'fa': 6,
'fr': 7,
'he': 8,
'hi': 9,
'it': 10,
'ja': 11,
'ko': 12,
'mr': 13,
'ne': 14,
'nl': 15,
'ru': 16,
'th': 17,
'uk': 18,
'ur': 19,
'zh': 20,
'unk': 21}
target = np.array(target)
for i in range(len(target)):
target[i] = categories[target[i]]
print(target)
data = np.array(data)
fp.close()
fp1.close()
hv = HashingVectorizer(n_features=2000, token_pattern=r'\b\w+\b',ngram_range=(1,5), analyzer='char_wb')
X = hv.transform(data.ravel()).toarray()
transformer = TfidfTransformer(smooth_idf=False)
X2 = transformer.fit_transform(X).toarray()
########################classifier starts now###################
'''GaussianNB'''
clf1 = GaussianNB()
clf1.fit(X2[:8899], target[:8899])
score = clf1.score(X2[8899:], target[8899:])
print('NB score = '+str(score))
'''Decision tree'''
one_r = DecisionTreeClassifier()
one_r.fit(X2[:8899], target[:8899])
score = one_r.score(X2[8899:], target[8899:])
print('Decision tree score = '+str(score))
'''SVM'''
clf2 = svm.LinearSVC()
clf2.fit(X2[:8899], target[:8899])
score = clf2.score(X2[8899:], target[8899:])
print('SVM score = '+str(score))
|
{"hexsha": "3319f4052fbd67f018f47df29c8c104458c068be", "size": 2645, "ext": "py", "lang": "Python", "max_stars_repo_path": "createfile.py", "max_stars_repo_name": "abigailyuan/LIDproj", "max_stars_repo_head_hexsha": "3e34c4d78b89c9513182ab064dc4b3858f59a1d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "createfile.py", "max_issues_repo_name": "abigailyuan/LIDproj", "max_issues_repo_head_hexsha": "3e34c4d78b89c9513182ab064dc4b3858f59a1d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "createfile.py", "max_forks_repo_name": "abigailyuan/LIDproj", "max_forks_repo_head_hexsha": "3e34c4d78b89c9513182ab064dc4b3858f59a1d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4408602151, "max_line_length": 103, "alphanum_fraction": 0.5969754253, "include": true, "reason": "import numpy", "num_tokens": 660}
|
import copy
import faulthandler
import logging
import os
import platform
import sys
from typing import List
import hydra
import numpy as np
import pytorch_lightning as pl
import torch
from hydra.utils import instantiate
from pytorch_lightning import Callback, LightningDataModule, LightningModule, Trainer
from pytorch_lightning.loggers import LightningLoggerBase
from torchvision.transforms import Compose
from src.lib.config import Config, register_configs
from src.utils import utils
logger = logging.getLogger(__name__)
handler = logging.StreamHandler(stream=sys.stdout)
logger.addHandler(handler)
def handle_exception(exc_type, exc_value, exc_traceback):
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback))
sys.excepthook = handle_exception
faulthandler.enable()
# sometimes windows and matplotlib don't play well together. Therefore we have to configure win for plt:
if platform.system() == "Windows":
os.environ["KMP_DUPLICATE_LIB_OK"] = "True"
# register the structured configs:
register_configs()
# set up advanced logging:
@hydra.main(config_name="config", config_path="conf")
def main(cfg: Config):
# for integration tests main is splitted, so that there exists a not decorated version.
return inner_main(cfg)
def inner_main(cfg: Config):
utils.extras(cfg) # check if debug is activated and if so, change some trainer settings
utils.set_log_levels(cfg.log_level)
log = utils.get_logger(cfg.log_level)
# Pretty print config using Rich library
if cfg.print_config:
utils.print_config(cfg, resolve=True) # prints the complete hydra config to std-out
torch.manual_seed(cfg.random_seed) # set random seed
pl.seed_everything(cfg.random_seed)
np.random.seed(cfg.random_seed)
# Init Lightning callbacks
callbacks: List[Callback] = []
if "callbacks" in cfg:
for _, cb_conf in cfg["callbacks"].items():
if "_target_" in cb_conf:
log.info(f"Instantiating callback <{cb_conf._target_}>")
callbacks.append(hydra.utils.instantiate(cb_conf))
# Init Lightning loggers
logger: List[LightningLoggerBase] = []
if "logger" in cfg:
for _, lg_conf in cfg["logger"].items():
if "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
logger.append(hydra.utils.instantiate(lg_conf))
# Init Transformations
train_transforms: Compose = hydra.utils.instantiate(cfg.datamodule.train_transforms)
valid_transforms: Compose = hydra.utils.instantiate(cfg.datamodule.valid_transforms)
# Init Lightning datamodule
log.info(f"Instantiating datamodule <{cfg.datamodule._target_}>")
datamodule: LightningDataModule = hydra.utils.instantiate(
cfg.datamodule,
train_transforms=train_transforms,
valid_transforms=valid_transforms,
dataset=cfg.datamodule.dataset,
is_ddp=cfg.strategy is not None,
)
datamodule.setup() # manually set up the datamodule here, so an example batch can be drawn
# generate example input array:
for batch in datamodule.train_dataloader():
example_input, _ = batch
break
log.info(f"Size of one batch is: {example_input.element_size() * example_input.nelement() / 2**20} mb")
# Init Lightning model
log.info(f"Instantiating model <{cfg.lightning_module._target_}>")
model: LightningModule = hydra.utils.instantiate(
cfg.lightning_module,
optimizer=cfg.optimizer,
scheduler=cfg.scheduler,
model=cfg.model,
loss=cfg.loss,
example_input_array=example_input.detach().cpu(),
batch_size=cfg.datamodule.batch_size,
)
# load the state dict if one is provided (has to be provided for finetuning classifier in simclr):
device = "cuda" if cfg.trainer.accelerator == "gpu" else "cpu"
if cfg.load_state_dict is not None:
log.info(f"Loading model weights from {cfg.load_state_dict}")
net = copy.deepcopy(model.model.cpu())
# check state dict before loading:
this_state_dict = model.model.state_dict().copy()
len_old_state_dict = len(this_state_dict)
log.info(f"Old state dict has {len_old_state_dict} entries.")
try:
new_state_dict = torch.load(cfg.load_state_dict, map_location=torch.device(device))
except Exception as e:
log.error()
raise e
missing_keys, unexpected_keys = net.load_state_dict(new_state_dict, strict=True)
log.warning(f"Missing keys: {missing_keys}")
log.warning(f"Unexpected keys: {unexpected_keys}")
state_dict_error_count = 0
for state_key, state in net.state_dict().items():
if this_state_dict[state_key].allclose(state, atol=1e-12, rtol=1e-12):
log.error(
f"Loaded state dict params for layer '{state_key}' are same as random initialized one ("
f"Might be due to caching, if you just restarted the same model twice!)"
)
state_dict_error_count += 1
if state_dict_error_count > 0:
log.warning(
f"{state_dict_error_count} state entries are the same after init. "
f"(From a total of {len_old_state_dict} items)"
)
model.model = copy.deepcopy(net.model.to(device))
del net
log.info(f"Successfully loaded model weights from {cfg.load_state_dict}")
# log hparam metrics to tensorboard:
log.info("Logging hparams to tensorboard")
hydra_params = utils.log_hyperparameters(config=cfg, model=model)
for this_logger in logger:
if "tensorboard" in str(this_logger):
log.info("Add hparams to tensorboard")
this_logger.log_hyperparams(hydra_params, {"hp/loss": 0, "hp/epoch": 0})
else:
this_logger.log_hyperparams(hydra_params)
# Send some parameters from config to all lightning loggers
log.info("Logging hyperparameters to lightning!")
model.hydra_params = hydra_params
# Init Trainer:
log.info(f"Instantiating trainer <{cfg.trainer._target_}>")
trainer: Trainer = instantiate(
cfg.trainer, strategy=cfg.strategy, logger=logger, callbacks=callbacks, _convert_="partial"
)
# if activated in the config, start the pytorch lightning automatic batch-size and lr tuning process
if cfg.auto_tune:
log.info("Starting tuning the model")
trainer.tune(model, datamodule)
log.info("Starting training")
trainer.fit(model, datamodule) # the actual training of the NN
# Print path to best checkpoint
if trainer.checkpoint_callback.best_model_path is not None:
log.info(f"Best checkpoint path:\n{trainer.checkpoint_callback.best_model_path}")
# if at some point optuna is used, then some metric has to be returned, which optuna can optimize
return trainer.callback_metrics["hp/loss"].item()
if __name__ == "__main__":
log = utils.get_logger()
log.info("Starting python script")
main()
|
{"hexsha": "68eaa2511d30e95b33c6b3700598e8c8102f462d", "size": 7249, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "m-dml/hydra_template_project", "max_stars_repo_head_hexsha": "6186c548ad877232e4a4e0510ca81f49a59f69e2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2022-01-27T14:27:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T05:41:13.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "m-dml/hydra_template_project", "max_issues_repo_head_hexsha": "6186c548ad877232e4a4e0510ca81f49a59f69e2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "m-dml/hydra_template_project", "max_forks_repo_head_hexsha": "6186c548ad877232e4a4e0510ca81f49a59f69e2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-28T02:42:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-28T02:42:20.000Z", "avg_line_length": 37.7552083333, "max_line_length": 108, "alphanum_fraction": 0.6945785626, "include": true, "reason": "import numpy", "num_tokens": 1642}
|
from __future__ import print_function
import numpy as np
import os
from .json_utils import write_to_json
class TweenParams(object):
"""A class to store tween parameters and make an output file"""
def __init__(
self,
coords=None,
duration=5,
loop=True,
filename=None):
"""Create a new tween parameter object, allowing the user to press :code:`t` from within
the webapp to move between keyframe camera locations smoothly and automatically.
:param coords: keyframe camera coordinates, list of positions that camera
will move between. 3 acceptable input formats:
* [x,y,z] single keyframe
* [[x1,y1,z1],[x2,y2,z2],...] multiple keyframes
* [x1,y1,z1,x2,y2,z2,...] multiple flattened keyframes,
defaults to []
:type coords: list of float
:param duration: duration to approach keyframe in seconds. 3 acceptable input formats:
* d single duration (will be repeated)
* [d] single duration in list (will be repeated)
* [d1,d2,...] multiple durations (corresponding to number of keyframes or
raises an error),
defaults to 5
:type duration: float/list of float
:param loop: flag to loop after reaching the last keyframe, defaults to True
:type loop: bool, optional
:param filename: name of tween file :code:`.json` file,
defaults to ``'TweenParams.json'``
:type filename: str, optional
"""
## initialize containers
self.coordss = np.array([]).reshape(0,3)
try: iter(duration)
except: duration = np.repeat(duration,np.size(coords)//3)
self.durations = np.array([])
## store loop flag
self.loop = bool(loop)
## add keyframes if any were passed
if coords is not None:
self.addKeyframe(coords,duration)
## bind filename so js knows where to look
self.filename = 'TweenParams.json' if filename is None else filename
def addKeyframe(
self,
coords,
duration):
"""
Adds a new keyframe to an existing TweenParams object.
:param coords: keyframe camera coordinates, list of positions that camera
will move between 3 acceptable input formats:
* [x,y,z] single keyframe
* [[x1,y1,z1],[x2,y2,z2],...] multiple keyframes
* [x1,y1,z1,x2,y2,z2,...] multiple flattened keyframes
:type coords: list of float
:param duration: duration to approach keyframe, 3 acceptable input formats:
* d single duration (will be repeated)
* [d] single duration in list (will be repeated)
* [d1,d2,...] multiple durations (corresponding to number of keyframes or
raises an error)
:type duration: float/list of float
:raises np.AxisError: if len of coords is not divisible by 3
:raises np.AxisError: if len of durations does not match len of coords
"""
try:
## cast to numpy array and reorder coords at the same time for
## convenient input
coords = np.array(coords).reshape(-1,3)
except:
raise np.AxisError("coords should either be a 2d Nx3 numpy array or"+
"a 3N list/array.")
## convert duration to a 1d numpy array, however it was passed
duration = np.array(duration).reshape(-1)
if duration.shape == 1: duration = np.repeat(duration,coords.shape[0])
## ensure there is a duration per keyframe transition
## TODO: shouldn't durations be 1 less than coordss?
if duration.shape[0]!=coords.shape[0]:
raise np.AxisError(
"Mismatching coords and duration shape (%d,%d)"%(
coords.shape[0],
duration.shape[0]))
self.coordss = np.append(self.coordss,coords,axis=0)
self.durations = np.append(self.durations,duration)
def outputToDict(self):
"""Converts stored data into a single python dictionary.
:return: tween_params_dict
:rtype: dict
"""
xs,ys,zs = self.coordss.T
keyframe_dicts = [
{'x':xs[i],'y':ys[i],'z':zs[i]}
for i in range(xs.shape[0])]
rotation_dicts = [
{'x':0,'y':0,'z':0}
for i in range(xs.shape[0])]
tween_params_dict = {
'position':keyframe_dicts,
'rotation':rotation_dicts,
'duration':self.durations,
'loop':self.loop,
'loaded':True
}
return tween_params_dict
def outputToJSON(
self,
JSONdir,
JSON_prefix='',
loud=1,
write_to_disk=True,
not_reader=True):
""" Saves the current tween parameters to a JSON file.
:param JSONdir: the sub-directory that will contain your JSON files, relative
to your :code:`$HOME directory`. , defaults to :code:`$HOME/<JSON_prefix>`
:type JSONdir: str, optional
:param JSON_prefix: Prefix for any :code:`.json` files created, :code:`.json` files will be of the format:
:code:`<JSON_prefix><self.filename>.json`, defaults to ''
:type JSON_prefix: str, optional
:param loud: flag to print status information to the console, defaults to True
:type loud: bool, optional
:param write_to_disk: flag that controls whether data is saved to disk (:code:`True`)
or only converted to a string and returned (:code:`False`), defaults to True
:type write_to_disk: bool, optional
:param not_reader: flag for whether to print the Reader :code:`filenames.json` warning, defaults to True
:type write_to_disk: bool, optional
:return: filename, JSON(tween_params_dict) (either a filename if
written to disk or a JSON strs)
:rtype: str, str
"""
tween_params_dict = self.outputToDict()
## JSON_prefix+
filename = os.path.join(JSONdir,self.filename)
if loud and not_reader:
print("You will need to add this tween params filename to"+
" filenames.json if this was not called by a Reader instance.")
return filename,write_to_json(
tween_params_dict,
filename if write_to_disk else None) ## None-> returns a string
|
{"hexsha": "63ad2508e7058e346578f1421af98e3d5e6f318b", "size": 6550, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/firefly/data_reader/tween.py", "max_stars_repo_name": "agurvich/firefly", "max_stars_repo_head_hexsha": "60c8df088d7ab73071171e9efa6e235a6d072624", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/firefly/data_reader/tween.py", "max_issues_repo_name": "agurvich/firefly", "max_issues_repo_head_hexsha": "60c8df088d7ab73071171e9efa6e235a6d072624", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/firefly/data_reader/tween.py", "max_forks_repo_name": "agurvich/firefly", "max_forks_repo_head_hexsha": "60c8df088d7ab73071171e9efa6e235a6d072624", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2215568862, "max_line_length": 114, "alphanum_fraction": 0.5960305344, "include": true, "reason": "import numpy", "num_tokens": 1483}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.