text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
(* Property from Productive Use of Failure in Inductive Proof,
Andrew Ireland and Alan Bundy, JAR 1996.
This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.
Some proofs were added by Yutaka Nagashima.*)
theory TIP_prop_20
imports "../../Test_Base"
begin
datatype 'a list = nil2 | cons2 "'a" "'a list"
datatype Nat = Z | S "Nat"
fun x :: "'a list => 'a list => 'a list" where
"x (nil2) z = z"
| "x (cons2 z2 xs) z = cons2 z2 (x xs z)"
fun length :: "'a list => Nat" where
"length (nil2) = Z"
| "length (cons2 z xs) = S (length xs)"
fun even :: "Nat => bool" where
"even (Z) = True"
| "even (S (Z)) = False"
| "even (S (S z2)) = even z2"
theorem property0 :
"even (length (x y y))"
oops
end
|
{"author": "data61", "repo": "PSL", "sha": "2a71eac0db39ad490fe4921a5ce1e4344dc43b12", "save_path": "github-repos/isabelle/data61-PSL", "path": "github-repos/isabelle/data61-PSL/PSL-2a71eac0db39ad490fe4921a5ce1e4344dc43b12/UR/TIP_with_Proof/Prod/Prod/TIP_prop_20.thy"}
|
subroutine choose_model
use bio_MOD
implicit none
integer :: i
namelist /Model/ Stn, Model_ID, nutrient_uptake, grazing_formulation
character(len=100) :: format_string
! open the namelist file and read station name.
open(namlst,file='Model.nml',status='old',action='read')
read(namlst,nml=Model)
close(namlst)
if (Model_ID==NPZDdisc) then
write(6,*) 'Inflexible NPZD discrete model selected!'
NPHY = 20
else if (Model_ID==NPZDdiscFe) then
write(6,*) 'Inflexible NPZD discrete model with Iron selected!'
NPHY = 20
do_IRON = .TRUE.
else if (Model_ID==Geiderdisc) then
write(6,*) 'Geider discrete model selected!'
NPHY = 20
else if (Model_ID==EFTdisc) then
write(6,*) 'Flexible discrete model selected!'
NPHY = 20
else if (Model_ID==EFTdiscFe) then
write(6,*) 'Flexible discrete model selected!'
NPHY = 20
do_IRON = .TRUE.
else if (Model_ID==NPZDFix) then
write(6,*) 'Inflexible NPZD model selected!'
NPHY = 1
else if (Model_ID==NPZDFixIRON) then
write(6,*) 'Inflexible NPZD model with Iron selected!'
do_IRON = .TRUE.
NPHY = 1
else if (Model_ID==Geidersimple) then
write(6,*) 'Geider simple model selected!'
NPHY = 1
else if (Model_ID==GeidsimIRON) then
write(6,*) 'Geider simple model with Iron selected!'
do_IRON = .TRUE.
NPHY = 1
else if (Model_ID==EFTsimple) then
write(6,*) 'Flexible simple model selected!'
NPHY = 1
else if (Model_ID==EFTsimIRON) then
write(6,*) 'Flexible simple model with Iron selected!'
do_IRON = .TRUE.
NPHY = 1
else if (Model_ID==EFTcont) then
write(6,*) 'Flexible continous model selected!'
NPHY = 1
endif
allocate(iPHY(NPHY))
allocate(iCHL(NPHY))
allocate(oPHY(NPHY))
allocate(oCHL(NPHY))
allocate(omuNet(NPHY))
allocate(oLno3(NPHY))
allocate(oSI(NPHY))
allocate(oGraz(NPHY))
allocate(oD_PHY(NPHY))
allocate(oD_CHL(NPHY))
allocate(otheta(NPHY))
allocate(oQN(NPHY))
do i=1,NPHY
iPHY(i) = i + iNO3
enddo
iZOO = iPHY(NPHY)+1
iDET = iZOO+1
if (Model_ID==Geiderdisc .or. Model_ID==Geidersimple .or. Model_ID==GeidsimIRON) then
do i=1,NPHY
iCHL(i) = i + iDET
enddo
NVAR = iCHL(NPHY)
else if(Model_ID==EFTcont) then
iPMU = iDET + 1
iVAR = iPMU + 1
NVAR = iVAR
else
NVAR = iDET
endif
allocate(Vars(NVAR,nlev))
Vars(:,:)=0d0
if (Model_ID==Geiderdisc .or. Model_ID==Geidersimple .or. Model_ID==GeidsimIRON) then
NVsinkterms = 1 + NPHY * 2 ! Include phyto and Chl
else
NVsinkterms = 1 + NPHY
endif
allocate(Windex(NVsinkterms))
do i=1,NPHY
Windex(i)=iPHY(i)
if (Model_ID==Geiderdisc .or. Model_ID==Geidersimple .or. Model_ID==GeidsimIRON) then
Windex(i+NPHY)=iCHL(i)
endif
enddo
Windex(NVsinkterms)=iDET
! Output array matrices (the order must be consistent with Vars)
do i=1,NPHY
oPHY(i)=i+oNO3
enddo
oZOO =oPHY(NPHY)+1
oDET =oZOO+1
if(Model_ID==EFTcont) then
oPMU=oDET+1
oVAR=oPMU+1
do i=1,NPHY
oCHL(i) = i + oVAR
enddo
else
do i=1,NPHY
oCHL(i) = i + oDET
enddo
endif
! The above must match with i** indeces
oPHYt=oCHL(NPHY)+1
oCHLt=oPHYt+1
if (Model_ID==Geiderdisc .or. Model_ID==NPZDdisc .or. Model_ID==EFTdisc&
.or.Model_ID==EFTcont) then
do i=1,4
oCHLs(i)=oCHLt+i
enddo
endif
!allocate(ow_p(NPHY))
if (Model_ID==Geiderdisc .or. Model_ID==NPZDdisc .or. Model_ID==EFTdisc&
.or.Model_ID==EFTcont) then
do i=1,NPHY
omuNet(i)= oCHLs(4) + i
enddo
else
do i=1,NPHY
omuNet(i)= oCHLt + i
enddo
end if
do i=1,NPHY
oGraz(i) = omuNet(NPHY) + i
enddo
do i=1,NPHY
oLno3(i) = oGraz(NPHY) + i
enddo
do i=1,NPHY
oSI(i)=oLno3(NPHY)+i
enddo
do i=1,NPHY
oQN(i)=oSI(NPHY)+i
enddo
do i=1,NPHY
otheta(i)=oQN(NPHY)+i
enddo
oZ2N=otheta(NPHY)+1
oD2N=oZ2N+1
oPPt=oD2N+1
oD_NO3=oPPt +1
do i=1,NPHY
oD_PHY(i)=oD_NO3+i
enddo
oD_ZOO=oD_PHY(NPHY)+1
oD_DET=oD_ZOO+1
if (Model_ID==Geiderdisc .or. Model_ID==Geidersimple .or. Model_ID==GeidsimIRON) then
do i=1,NPHY
oD_CHL(i)=oD_DET+1
enddo
Nout=oD_CHL(NPHY)
else if(Model_ID==EFTcont) then
oD_PMU=oD_DET+1
oD_VAR=oD_PMU+1
Nout=oD_VAR
else
Nout=oD_DET
endif
allocate(Varout(Nout,nlev))
IF (AllocateStatus /= 0) STOP "*** Error in allocating Varout ***"
allocate(Labelout(Nout+ ow ))
Labelout(oTemp )='Temp'
Labelout(oPAR )='PAR '
Labelout(oAks )='Aks '
Labelout(oDFe )='DFe '
Labelout(ow )='w '
Labelout(oNO3+ow)='NO3 '
do i=1,NPHY
if (i < 10) then
format_string = "(A3,I1)"
else
format_string = "(A3,I2)"
endif
write(Labelout(oPHY(i) +ow), format_string) 'PHY',i
write(Labelout(oSI(i) +ow), format_string) 'SI_',i
write(Labelout(oQN(i) +ow), format_string) 'QN_',i
write(Labelout(oLno3(i) +ow), format_string) 'Lno',i
write(Labelout(otheta(i)+ow), format_string) 'The',i
write(Labelout(oCHL(i) +ow), format_string) 'CHL',i
enddo
Labelout(oZOO +ow)='ZOO'
Labelout(oDET +ow)='DET'
if (Model_ID==EFTcont) then
Labelout(oPMU+ow)='PMU'
Labelout(oVAR+ow)='VAR'
Labelout(oD_PMU+ow)='D_PMU'
Labelout(oD_VAR+ow)='D_VAR'
endif
Labelout(oPHYt+ow)='PHY_T'
Labelout(oCHLt+ow)='CHL_T'
do i=1,NPHY
if (i < 10) then
format_string = "(A3,I1)"
else
format_string = "(A3,I2)"
endif
write(Labelout(omuNet(i) + ow), format_string) 'muN',i
write(Labelout(oGraz(i) + ow), format_string) 'Gra',i
write(Labelout(oD_PHY(i) + ow), format_string) 'D_P',i
if (Model_ID==GeidsimIRON.or.Model_ID==Geiderdisc .or. Model_ID==Geidersimple) then
write(Labelout(oD_CHL(i) + ow), format_string) 'DCH',i
endif
enddo
Labelout(oZ2N + ow)='Z2N'
Labelout(oD2N + ow)='D2N'
Labelout(oPPt + ow)='NPP_T'
Labelout(oD_NO3+ ow)='D_NO3'
Labelout(oD_ZOO+ ow)='D_ZOO'
Labelout(oD_DET+ ow)='D_DET'
if(Model_ID==Geiderdisc.or.Model_ID==NPZDdisc .or.Model_ID==EFTdisc .or.&
Model_ID==EFTcont) then
do i=1,4
if (i < 10) then
format_string = "(A4,I1)"
else
format_string = "(A4,I2)"
endif
write(Labelout(oCHLs(i) +ow), format_string) 'CHLs',i
enddo
endif
do i = 1, Nout+ow
write(6,*) 'Labelout(',i,') = ',trim(Labelout(i))
enddo
! Initialize parameters
! Indices for parameters that will be used in MCMC
! For EFT models, the affinity approach not used for now
! Need to have tradeoffs for maximal growth rate (mu0) and Kn
! Common parameters:
iEp = 1
iEz = iEp + 1
imu0 = iEz + 1
iaI0 = imu0 + 1
iQ0N = iaI0 + 1
if (nutrient_uptake .eq. 1) then
iKN = iQ0N + 1
igmax = iKN + 1
elseif (nutrient_uptake.eq.2) then
iA0N = iQ0N + 1
igmax = iA0N + 1
endif
ikp = igmax + 1
iwDET = ikp + 1
irdN = iwDET + 1
imz = irdN + 1
if (Model_ID==NPZDFix.or.Model_ID==NPZDdisc.or.Model_ID==NPZDFixIRON) then
itheta = imz + 1
if (Model_ID==NPZDFix) then
NPar = itheta
else if(Model_ID == NPZDFixIRON) then
iKFe = itheta + 1
NPar = iKFe
else
ialphamu=itheta+1
if (nutrient_uptake.eq.1) then
ialphaKN=ialphamu+1
if(kill_the_winner) then
ialphaG=ialphaKN+1
NPar =ialphaG
else
NPar =ialphaKN
endif
elseif(nutrient_uptake.eq.2) then
ialphaA =ialphamu+1
if(kill_the_winner) then
ialphaG=ialphaA+1
NPar =ialphaG
else
NPar =ialphaA
endif
endif
endif
else if(Model_ID==Geiderdisc.or.Model_ID==EFTdisc.or.Model_ID==EFTcont) &
then
ialphamu =imz + 1
if (nutrient_uptake.eq.1) then
ialphaKN =ialphamu+1
if(kill_the_winner) then
ialphaG=ialphaKN+1
NPar =ialphaG
else
NPar =ialphaKN
endif
elseif(nutrient_uptake.eq.2) then
ialphaA =ialphamu+1
if(kill_the_winner) then
ialphaG=ialphaA+1
NPar =ialphaG
else
NPar =ialphaA
endif
endif
else if (Model_ID==GeidsimIRON .or. Model_ID==EFTsimIRON) then
iKFe = imz + 1
NPar = iKFe
else
NPar = imz
endif
write(6,'(I2,1x,A20)') NPar,'parameters in total to be estimated.'
allocate(params(NPar))
allocate(ParamLabel(NPar))
ParamLabel(imu0) = 'mu0hat '
if (Model_ID == NPZDdisc) then
! Update parameters on Nov. 9 2016
params(imu0) = 2.5D0
elseif (Model_ID == EFTdisc) then
params(imu0) = 5d0
else
params(imu0) = 2.5d0
endif
if(Model_ID==NPZDdisc.or.Model_ID==Geiderdisc.or. &
Model_ID==EFTdisc .or.Model_ID==EFTcont) then
ParamLabel(ialphamu) = 'alphamu'
if (Model_ID .eq. NPZDdisc) then
params(ialphamu) = 0.08
else
params(ialphamu) = 0.1
endif
if (nutrient_uptake.eq.1) then
ParamLabel(ialphaKN)= 'alphaKN'
if (Model_ID .eq. NPZDdisc) then
params(ialphaKN)= 0.26
else
params(ialphaKN)= 0.3
endif
else if (nutrient_uptake.eq.2) then
ParamLabel(ialphaA) = 'alphaA'
params(ialphaA) = -0.3
endif
endif
if (nutrient_uptake .eq. 1) then
ParamLabel(iKN ) = 'KN '
if (Model_ID .eq. NPZDdisc) then
params(iKN ) = 0.11d0
else
params(iKN ) = 1d0
endif
else if (nutrient_uptake .eq. 2) then
ParamLabel(iA0N ) = 'A0N '
params(iA0N) = 1d0
endif
ParamLabel(igmax ) = 'gmax '
params(igmax ) = 1d0
ParamLabel(iwDET ) = 'wDET '
params(iwDET ) = 2D0
ParamLabel(ikp ) = 'kp '
params(ikp ) = 5d-1
ParamLabel(irdn ) = 'rdn '
params(irdn ) = 0.1d0
ParamLabel(imz ) = 'mz '
params(imz ) = 0.15D0
ParamLabel(iEp ) = 'Ep '
params(iEp ) = 0.5
ParamLabel(iEz ) = 'Ez '
params(iEz ) = 0.6
if(Model_ID==NPZDdisc.or.Model_ID==NPZDFix .or. Model_ID==NPZDFixIRON) then
ParamLabel(itheta)='theta'
params(itheta) =0.24
endif
if(Model_ID==GeidsimIRON .or. Model_ID==EFTsimIRON .or. Model_ID==NPZDFixIRON) then
ParamLabel(iKFe) ='KFe'
params(iKFe) =0.08
endif
ParamLabel(iQ0N ) = 'Q0N '
if (Model_ID == NPZDdisc .or. Model_ID == NPZDFix .or. Model_ID == NPZDFixIRON) then
params(iQ0N ) = 0.15
else
params(iQ0N ) = 0.076
endif
ParamLabel(iaI0 ) = 'aI0 '
if (Model_ID .eq. NPZDdisc) then
params(iaI0 ) = 0.01
else
params(iaI0 ) = 0.2
endif
if((Model_ID==NPZDdisc.or.Model_ID==Geiderdisc.or. &
Model_ID==EFTdisc .or.Model_ID==EFTcont).and.kill_the_winner) then
ParamLabel(ialphaG ) = 'alphaG '
params(ialphaG)=1.1
endif
if (Model_ID==NPZDdisc.or.Model_ID==Geiderdisc.or. &
Model_ID==EFTdisc) then
call assign_PMU
endif
do i = 1, NPar
write(6,*) 'ParamLabel(',i,') = ',trim(ParamLabel(i))
enddo
end subroutine choose_model
|
{"hexsha": "7cb523d2dc5221dc0bd8e0d81dfc1a31c160f4e5", "size": 10713, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "DRAM/NPZDFix_sRun/HOT_g2/KTW/Compile/choose_model.f90", "max_stars_repo_name": "BingzhangChen/NPZDFeCONT", "max_stars_repo_head_hexsha": "0083d46dadc4fb8fed8728816755678da8294e16", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "DRAM/NPZDFix_sRun/HOT_g2/KTW/Compile/choose_model.f90", "max_issues_repo_name": "BingzhangChen/NPZDFeCONT", "max_issues_repo_head_hexsha": "0083d46dadc4fb8fed8728816755678da8294e16", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DRAM/NPZDFix_sRun/HOT_g2/KTW/Compile/choose_model.f90", "max_forks_repo_name": "BingzhangChen/NPZDFeCONT", "max_forks_repo_head_hexsha": "0083d46dadc4fb8fed8728816755678da8294e16", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.9892703863, "max_line_length": 88, "alphanum_fraction": 0.6360496593, "num_tokens": 4180}
|
from numpy.polynomial.polynomial import Polynomial
from functools import reduce
class CustomPolynomial(Polynomial):
def __init__(self, coefficients):
super(CustomPolynomial, self).__init__(coefficients)
"""
input: coefficients are a list form of [a_0, a_1, ..., a_n]
"""
# self.coef = coefficients
self.degree = len(self.coef)
@classmethod
def fromfilename(cls, *coefficients):
return cls(list(coefficients))
def __call__(self, x):
"""
override [[__call__]] to get better execution efficience
"""
# return reduce(lambda a, b: a + b, map(lambda y: y[1] * (x ** y[0]), enumerate(self.coef)))
result = 0
for i in range(self.degree):
result = result + self.coef[i] * (x ** i)
return result
def __str__(self):
res = ""
if self.coef[0] < 0:
res += " - " + str(-self.coef[0])
else:
res += str(self.coef[0])
for i in range(1, self.degree):
coeff = self.coef[i]
if coeff < 0:
res += " - " + str(-coeff) + "x^" + str(i)
else:
res += " + " + str(coeff) + "x^" + str(i)
return res
|
{"hexsha": "910370d94e866c846db335ad257e2d14fdaac34b", "size": 1253, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/share/polynomial.py", "max_stars_repo_name": "bingrao/mapreduce_sss", "max_stars_repo_head_hexsha": "be469b86f3fc133e1586149f0b850a76cb85f216", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/share/polynomial.py", "max_issues_repo_name": "bingrao/mapreduce_sss", "max_issues_repo_head_hexsha": "be469b86f3fc133e1586149f0b850a76cb85f216", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/share/polynomial.py", "max_forks_repo_name": "bingrao/mapreduce_sss", "max_forks_repo_head_hexsha": "be469b86f3fc133e1586149f0b850a76cb85f216", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4772727273, "max_line_length": 100, "alphanum_fraction": 0.5195530726, "include": true, "reason": "from numpy", "num_tokens": 326}
|
/*
* This file is part of SRS project.
*
* SRS is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* SRS is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with SRS. If not, see <http://www.gnu.org/licenses/>.
*
* Created by: Yifang Sun, Jianbin Qin
* Last modified by: Yifang Sun, Jianbin Qin
*/
#include <iostream>
#include <fstream>
#include <stdio.h>
#include <getopt.h>
#include <cstdlib>
#include <dirent.h>
#include <vector>
#include <time.h>
#include <sys/time.h>
#include <boost/math/distributions/chi_squared.hpp>
#include "SRSInMemory.h"
#define no_argument 0
#define required_argument 1
#define optional_argument 2
void usage();
bool file_exists(const char *);
bool dir_exists(const char *foldername);
double cal_thres(double c, double p_thres, int m);
template<class T>
void query_workload(SRS_In_Memory<T> * searcher, int k, int t, double thres,
char *query_file_path, char *ground_truth_file_path,
char *output_file_path);
float diff_timeval(timeval t1, timeval t2) {
return (float) (t1.tv_sec - t2.tv_sec) + (t1.tv_usec - t2.tv_usec) * 1e-6;
}
int main(int argc, char * argv[]) {
const struct option longopts[] ={
{"help", no_argument, 0, 'h'},
{"page-size", required_argument, 0, 'b'},
{"approximation-ratio", required_argument, 0, 'c'},
{"dimension", required_argument, 0, 'd'},
{"seed", required_argument, 0, 'e'},
{"ground-truth-file-path", required_argument, 0, 'g'},
{"index-dir-path", required_argument, 0, 'i'},
{"is-index", no_argument, 0, 'I'},
{"k", required_argument, 0, 'k'},
{"m", required_argument, 0, 'm'},
{"cardinality", required_argument, 0, 'n'},
{"output-file-path", required_argument, 0, 'o'},
{"query-file-path", required_argument, 0, 'q'},
{"is-query", no_argument, 0, 'Q'},
{"threshold", required_argument, 0, 'r'},
{"dataset-file-path", required_argument, 0, 's'},
{"max-number-of-points", required_argument, 0, 't'},
{"data-type", required_argument, 0, 'y'},
{0, 0, 0, 0},
};
srand(1000);
int index;
int iarg = 0;
opterr = 1; //getopt error message (off: 0)
int d = -1;
long long n = -1;
int k = -1;
int b = -1;
int m = -1;
int t = -1;
double c = -1.0;
double p_thres = -1.0;
char ground_truth_file_path[100] = "";
char query_file_path[100] = "";
char data_file_path[100] = "";
char output_file_path[100] = ""; //output file, currently not used.
char index_dir_path[100] = "";
char data_type[100] = "";
bool is_valid_command = true;
bool is_index = false;
bool is_query = false;
bool is_integer = true;
while (iarg != -1) {
iarg = getopt_long(argc, argv, "b:c:d:e:g:i:k:m:n:o:q:r:s:t:y:hIQ",
longopts, &index);
switch (iarg) {
case 'b':
if (optarg) {
b = atoi(optarg);
}
break;
case 'c':
if (optarg) {
c = atof(optarg);
}
break;
case 'd':
if (optarg) {
d = atoi(optarg);
}
break;
case 'e':
if (optarg) {
srand (atoi(optarg));}
break;
case 'g':
if (optarg) {
strcpy(ground_truth_file_path, optarg);
}
break;
case 'h':
usage();
return 0;
case 'i':
if (optarg) {
strcpy(index_dir_path, optarg);
}
break;
case 'I':
is_index = true;
break;
case 'k':
if (optarg) {
k = atoi(optarg);
}
break;
case 'm':
if (optarg) {
m = atoi(optarg);
}
break;
case 'n':
if (optarg) {
n = atoi(optarg);
}
break;
case 'o':
if (optarg) {
strcpy(output_file_path, optarg);
}
break;
case 'q':
if (optarg) {
strcpy(query_file_path, optarg);
}
break;
case 'Q':
is_query = true;
break;
case 'r':
if (optarg) {
p_thres = atof(optarg);
}
break;
case 's':
if (optarg) {
strcpy(data_file_path, optarg);
}
break;
case 't':
if (optarg) {
t = atoi(optarg);
}
break;
case 'y':
if (optarg) {
if (strcmp(optarg, "f") == 0) {
is_integer = false;
} else if (strcmp(optarg, "i") != 0) {
is_valid_command = false;
}
}
break;
}
}
if (is_index == is_query) {
is_valid_command = false;
}
if (is_index) {
if (!is_valid_command || d < 0 || m < 0 || n < 0
|| !file_exists(data_file_path) || !dir_exists(index_dir_path)) {
is_valid_command = false;
} else if (b < 0) {
if (is_integer) {
SRS_In_Memory<int> * indexer = new SRS_In_Memory<int>(index_dir_path);
indexer->build_index(n, d, m, data_file_path);
delete indexer;
} else {
SRS_In_Memory<float> * indexer = new SRS_In_Memory<float>(
index_dir_path);
indexer->build_index(n, d, m, data_file_path);
delete indexer;
}
} else {
printf("use R-tree here\n");
}
}
if (is_query) {
if (!is_valid_command || k < 0 || c < 1 || t < 0 || p_thres < 0
|| !file_exists(query_file_path) || !file_exists(ground_truth_file_path)
|| !dir_exists(index_dir_path)) {
is_valid_command = false;
} else {
char * type = new char[10];
float * temp = readParamFile(index_dir_path, n, d, m, b, type);
delete[] temp;
if (b == -1) {
if (strcmp(type, "int") == 0) {
SRS_In_Memory<int> * searcher = new SRS_In_Memory<int>(
index_dir_path);
searcher->restore_index();
query_workload(searcher, k, t, cal_thres(c, p_thres, m),
query_file_path, ground_truth_file_path,
output_file_path);
delete searcher;
} else if (strcmp(type, "float") == 0) {
SRS_In_Memory<float> * searcher = new SRS_In_Memory<float>(
index_dir_path);
searcher->restore_index();
query_workload(searcher, k, t, cal_thres(c, p_thres, m),
query_file_path, ground_truth_file_path,
output_file_path);
delete searcher;
}
delete[] type;
} else {
printf("use R-tree here\n");
}
}
}
if (!is_valid_command) {
usage();
}
return 0;
}
void usage() {
printf("SRS-Mem (v1.0)\n");
printf("Options\n");
//printf("-b {value}\tpage size in bytes (need to be a multiple of 4)\n");
printf("-c {value}\tapproximation ratio (>= 1)\n");
printf("-d {value}\tdimensionality of data\n");
printf("-e {value}\tseed for random generators\n");
printf("-g {string}\tground truth file\n");
printf("-i {string}\tsrs index path (dir)\n");
printf("-I (function)\tindex data\n");
printf("-k {value}\tnumber of neighbors wanted\n");
printf("-m {value}\tdimensionality of the projected space\n");
printf("-n {value}\tcardinality\n");
//printf("-o {string}\toutput file\n");
printf("-q {string}\tquery file\n");
printf("-Q (function)\tprocess queries\n");
printf("-r {value}\tthreshold of early termination condition\n");
printf("-s {string}\tdataset file\n");
printf("-t {value}\tmaximum number of verify points\n");
printf(
"-y {string}\tdata type (i: integer; f: floating number), default value: integer\n");
printf("\n");
printf("Usage:\n");
// printf("Index data (using R-tree)\n");
// printf("-I -b -d -i -m -n -s\n");
printf("Index data (using cover-tree)\n");
printf("-I -d -i -m -n -s [-y]\n");
printf("Process queries\n");
printf("-Q -c -g -i -k -q -r -t\n");
}
bool file_exists(const char *filename) {
std::ifstream ifile(filename);
if (!ifile) {
fprintf(stderr, "cannot open file %s\n", filename);
}
return bool(ifile);
}
bool dir_exists(const char *dirname) {
DIR * dir = opendir(dirname);
if (dir) {
closedir(dir);
return true;
}
fprintf(stderr, "cannot open dir %s\n", dirname);
return false;
}
template<class T>
void query_workload(SRS_In_Memory<T> * searcher, int k, int t, double thres,
char *query_file_path, char *ground_truth_file_path,
char *output_file_path) {
typedef typename Accumulator<T>::Type ResultType;
int qn, d, gn, gk, tmp;
FILE *qfp = fopen(query_file_path, "r");
FILE *gfp = fopen(ground_truth_file_path, "r");
fscanf(qfp, "%d %d", &qn, &d);
fscanf(gfp, "%d %d", &gn, &gk);
if (gn != qn || gk < k) {
fprintf(
stderr,
"Ground truth file not correct! Please re-generate ground truth with correct parameters.\n");
return;
}
double overall_ratio = 0.0;
double overall_time = 0.0;
T * query = new T[d];
float * gt = new float[gk];
std::vector<res_pair_raw<ResultType> > res;
for (int i = 0; i < qn; ++i) {
fscanf(qfp, "%d", &tmp);
for (int j = 0; j < d; ++j) {
fscanf(qfp, type_format<T>::format(), &query[j]);
}
fscanf(gfp, "%d", &tmp);
for (int j = 0; j < gk; ++j) {
fscanf(gfp, "%f", >[j]);
}
timeval start;
gettimeofday(&start, NULL);
searcher->knn_search(query, k, t + k - 1, thres, res);
timeval end;
gettimeofday(&end, NULL);
overall_time += diff_timeval(end, start);
double ratio = 0.0;
std::sort(res.begin(), res.end());
for (int j = 0; j < k; ++j) {
ratio += sqrt(res[j].dist) / gt[j];
}
// de-comment the following line to print out the average ratio for each query
// printf("%f\n", ratio / k);
overall_ratio += ratio;
}
//printf("=================\n");
printf("Overall Ratio: %f\n", overall_ratio / k / qn);
printf("Average Time (s): %f\n", overall_time / qn);
delete[] query;
delete[] gt;
fclose(qfp);
fclose(gfp);
}
double cal_thres(double c, double p_thres, int m) {
if (p_thres >= 1) {
return -1;
}
boost::math::chi_squared chi(m);
return boost::math::quantile(chi, p_thres) / c / c;
}
|
{"hexsha": "6440e9390befe096d1ed1afce3d577dd5df1a324", "size": 11144, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "SRS/srs.cpp", "max_stars_repo_name": "1flei/lccs-lsh", "max_stars_repo_head_hexsha": "c903e557587de366e6ad67e8681b04061989bd6f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8.0, "max_stars_repo_stars_event_min_datetime": "2020-07-18T18:49:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-13T13:34:51.000Z", "max_issues_repo_path": "SRS/srs.cpp", "max_issues_repo_name": "HuangQiang/lccs-lsh", "max_issues_repo_head_hexsha": "08a74e198f9068b07f67d08369ac5daa790ee106", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SRS/srs.cpp", "max_forks_repo_name": "HuangQiang/lccs-lsh", "max_forks_repo_head_hexsha": "08a74e198f9068b07f67d08369ac5daa790ee106", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2020-07-06T08:16:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-17T02:39:23.000Z", "avg_line_length": 29.4036939314, "max_line_length": 101, "alphanum_fraction": 0.5381371141, "num_tokens": 3052}
|
from imutils import paths
import numpy as np
import imutils
import cv2
from random import randint
new_image = cv2.imread('./dataset/%d.png' %randint(0, 1000))
# loop over the image paths
for i in range(1):
cv2.imwrite("org.jpg", new_image)
# Load the image and convert it to grayscale
image = new_image
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Add some extra padding around the image
image = cv2.copyMakeBorder(image, 20, 20, 20, 20, cv2.BORDER_CONSTANT)
# threshold the image (convert it to pure black and white)
ret,thresh = cv2.threshold(image, 115, 255, cv2.THRESH_BINARY)
dilation = cv2.erode(thresh, (17,17), iterations = 1)
dilation = cv2.bilateralFilter(dilation, 3, 75, 75)
dilation = cv2.fastNlMeansDenoising(dilation, None, 15, 15, 7)
dilation = cv2.dilate(dilation, (31,31), iterations = 1)
ret,dilation = cv2.threshold(dilation, 10, 255, cv2.THRESH_BINARY)
# Show the annotated image
cv2.imshow("Output", dilation)
cv2.imwrite("preprocess.jpg", dilation)
cv2.waitKey()
# find the contours (continuous blobs of pixels) the image
contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Hack for compatibility with different OpenCV versions
contours = contours[0] if imutils.is_cv2() else contours[1]
letter_image_regions = []
# Now we can loop through each of the four contours and extract the letter
# inside of each one
for contour in contours:
# Get the rectangle that contains the contour
(x, y, w, h) = cv2.boundingRect(contour)
# Compare the width and height of the contour to detect letters that
# are conjoined into one chunk
if w > 3 and h > 3 and w < 30:
# This is a normal letter by itself
letter_image_regions.append((x, y, w, h))
# If we found more or less than 4 letters in the captcha, our letter extraction
# didn't work correcly. Skip the image instead of saving bad training data!
if len(letter_image_regions) != 5:
continue
# Sort the detected letter images based on the x coordinate to make sure
# we are processing them from left-to-right so we match the right image
# with the right letter
letter_image_regions = sorted(letter_image_regions, key=lambda x: x[0])
# Create an output image and a list to hold our predicted letters
output = cv2.merge([image] * 3)
predictions = []
# loop over the lektters
for letter_bounding_box in letter_image_regions:
# Grab the coordinates of the letter in the image
x, y, w, h = letter_bounding_box
# Extract the letter from the original image with a 2-pixel margin around the edge
letter_image = image[y - 2:y + h + 2, x - 2:x + w + 2]
# Re-size the letter image to 20x20 pixels to match training data
#letter_image = resize_to_fit(letter_image, 20, 20)
# Turn the single image into a 4d list of images to make Keras happy
letter_image = np.expand_dims(letter_image, axis=2)
letter_image = np.expand_dims(letter_image, axis=0)
# Ask the neural network to make a prediction
#prediction = model.predict(letter_image)
# Convert the one-hot-encoded prediction back to a normal letter
#letter = lb.inverse_transform(prediction)[0]
#predictions.append(letter)
# draw the prediction on the output image
cv2.rectangle(output, (x - 2, y - 2), (x + w + 4, y + h + 4), (0, 255, 0), 1)
#cv2.putText(output, letter, (x - 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)
# Print the captcha's text
captcha_text = "".join(predictions)
print("CAPTCHA text is: {}".format(captcha_text))
# Show the annotated image
cv2.imshow("Output", output)
cv2.imwrite("contour.jpg", output)
cv2.waitKey()
|
{"hexsha": "a16f21417cf743b9021c6e49918e21d53ba57b57", "size": 3921, "ext": "py", "lang": "Python", "max_stars_repo_path": "contributed/Course-Project_winter96-097/captcha_break/show/contour.py", "max_stars_repo_name": "moh3n9595/class.vision", "max_stars_repo_head_hexsha": "cbcc65fd1f226273d26e44576ca7c3950faea75c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "contributed/Course-Project_winter96-097/captcha_break/show/contour.py", "max_issues_repo_name": "moh3n9595/class.vision", "max_issues_repo_head_hexsha": "cbcc65fd1f226273d26e44576ca7c3950faea75c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "contributed/Course-Project_winter96-097/captcha_break/show/contour.py", "max_forks_repo_name": "moh3n9595/class.vision", "max_forks_repo_head_hexsha": "cbcc65fd1f226273d26e44576ca7c3950faea75c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7019230769, "max_line_length": 100, "alphanum_fraction": 0.6702371844, "include": true, "reason": "import numpy", "num_tokens": 1052}
|
#!/bin/bash
# -*- mode: julia -*-
#=
JULIA="${JULIA:-julia --color=yes --startup-file=no}"
export JULIA_PROJECT="$(dirname ${BASH_SOURCE[0]})"
set -ex
${JULIA} -e 'using Pkg; Pkg.instantiate()'
export JULIA_LOAD_PATH="@"
exec ${JULIA} "${BASH_SOURCE[0]}" "$@"
=#
import BangBang
import JSON
import Literate
import LiterateTest
import LoadAllPackages
import OnlineStats
import Random
using Documenter
using Transducers
EXAMPLE_PAGES = [
"Tutorial: Missing values" => "tutorials/tutorial_missings.md",
"Tutorial: Parallelism" => "tutorials/tutorial_parallel.md",
"Parallel word count" => "tutorials/words.md",
"Upgrade to new `|>` of Transducers.jl 0.4.39" => "howto/upgrade-to-ixf.md",
"Empty result handling" => "howto/empty_result_handling.md",
"Writing transducers" => "howto/transducers.md",
"Writing reducibles" => "howto/reducibles.md",
"Useful patterns" => "howto/useful_patterns.md",
]
LoadAllPackages.loadall(joinpath((@__DIR__), "Project.toml"))
function transducers_literate(;
inputbase = joinpath(@__DIR__, "..", "examples"),
outputbase = joinpath(@__DIR__, "src"),
examples = EXAMPLE_PAGES,
kwargs...,
)
for (_, outpath) in examples
name, = splitext(basename(outpath))
inputfile = joinpath(inputbase, "$name.jl")
outputdir = joinpath(outputbase, dirname(outpath))
if name == "words"
config = Dict()
else
config = LiterateTest.config()
end
Literate.markdown(
inputfile,
outputdir;
config = config,
documenter = true,
kwargs...,
)
end
end
function transducers_rm_duplicated_docs()
shareddocs =
Docs.Binding.(Ref(Transducers), [:whenstart, :whencomplete, :whencombine]) .=>
Ref(Docs.Binding(Transducers, :wheninit))
for (dup, canonical) in shareddocs
@info "Simplifying docstring: $dup => $canonical"
pop!(Docs.meta(dup.mod), dup, nothing)
canstr = only(values(Docs.meta(canonical.mod)[canonical].docs))
txt = "See [`$(canonical.var)`](@ref $canonical)"
@eval dup.mod $Docs.@doc $txt $(dup.var)
end
end
function transducers_redirection_mapping()
mapping = [
# old page => new page
"manual" => "reference/manual",
"interface" => "reference/interface",
]
old_examples = [
"tutorials/tutorial_missings",
"tutorials/tutorial_parallel",
"tutorials/words",
"howto/empty_result_handling",
"howto/transducers",
"howto/reducibles",
]
for e in old_examples
push!(mapping, joinpath("examples", basename(e)) => e)
end
return mapping
end
function transducers_make_redirections(;
mapping = transducers_redirection_mapping(),
build = joinpath((@__DIR__), "build"),
)
for (old, new) in mapping
oldpath = joinpath(build, old)
newpath = joinpath(build, new)
relurl = relpath(newpath, oldpath)
html = """<meta http-equiv="refresh" content="0; url=$relurl"/>"""
mkpath(oldpath)
write(joinpath(oldpath, "index.html"), html)
end
end
function should_push_preview(event_path = get(ENV, "GITHUB_EVENT_PATH", nothing))
event_path === nothing && return false
event = JSON.parsefile(event_path)
pull_request = get(event, "pull_request", nothing)
pull_request === nothing && return false
labels = [x["name"] for x in pull_request["labels"]]
# https://developer.github.com/v3/activity/events/types/#pullrequestevent
yes = "push_preview" in labels
if yes
@info "Trying to push preview as label `push_preview` is specified." labels
else
@info "Not pushing preview as label `push_preview` is not specified." labels
end
return yes
end
Random.seed!(1234)
transducers_rm_duplicated_docs()
transducers_literate()
examples = EXAMPLE_PAGES
strict = get(ENV, "CI", "false") == "true"
doctest = get(ENV, "CI", "false") == "true"
tutorials = filter(((_, path),) -> startswith(path, "tutorials/"), examples)
howto = filter(((_, path),) -> startswith(path, "howto/"), examples)
@assert issetequal(union(tutorials, howto), examples)
@info "`makedocs` with" strict doctest
makedocs(;
modules = [Transducers],
pages = [
"Home" => "index.md",
"Reference" =>
["Manual" => "reference/manual.md", "Interface" => "reference/interface.md"],
"Tutorials" => tutorials,
"How-to guides" => howto,
"Explanation" => [
"Parallelism" => "parallelism.md", # TODO: merge this to index.md
"Comparison to iterators" => "explanation/comparison_to_iterators.md",
"Glossary" => "explanation/glossary.md",
"State machines" => "explanation/state_machines.md",
hide("Internals" => "explanation/internals.md"),
],
],
repo = "https://github.com/JuliaFolds/Transducers.jl/blob/{commit}{path}#L{line}",
sitename = "Transducers.jl",
authors = "Takafumi Arakaki",
strict = strict,
doctest = doctest,
)
transducers_make_redirections()
deploydocs(;
repo = "github.com/JuliaFolds/Transducers.jl",
push_preview = should_push_preview(),
)
|
{"hexsha": "7ba538e5c6a36199ae4386c851c280493817dcad", "size": 5252, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/make.jl", "max_stars_repo_name": "maartenvd/Transducers.jl", "max_stars_repo_head_hexsha": "1e5e530241370bda913988417c33d396630182dc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 185, "max_stars_repo_stars_event_min_datetime": "2020-06-05T03:56:37.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T22:54:59.000Z", "max_issues_repo_path": "docs/make.jl", "max_issues_repo_name": "maartenvd/Transducers.jl", "max_issues_repo_head_hexsha": "1e5e530241370bda913988417c33d396630182dc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 283, "max_issues_repo_issues_event_min_datetime": "2018-12-29T14:33:16.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-29T17:36:28.000Z", "max_forks_repo_path": "docs/make.jl", "max_forks_repo_name": "maartenvd/Transducers.jl", "max_forks_repo_head_hexsha": "1e5e530241370bda913988417c33d396630182dc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2020-07-02T09:11:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T12:08:58.000Z", "avg_line_length": 31.2619047619, "max_line_length": 89, "alphanum_fraction": 0.6351865956, "num_tokens": 1367}
|
# -------------------------------------------------------------------------------------------------
# scientific
import numpy as np
# -------------------------------------------------------------------------------------------------
# system
from math import sqrt
from PyQuantum.Common.html import *
from PyQuantum.Bipartite.Cavity import Cavity
import copy
# -------------------------------------------------------------------------------------------------
# Common
from PyQuantum.Common.Matrix import *
from PyQuantum.Common.Assert import *
from PyQuantum.Common.Print import *
# -------------------------------------------------------------------------------------------------
import pandas as pd
class Hamiltonian:
# ---------------------------------------------------------------------------------------------
def __init__(self, capacity, cavity):
Assert(isinstance(cavity, Cavity), "cavity is not Cavity", cf())
Assert(isinstance(capacity, int), "capacity is not int", cf())
Assert(capacity > 0, "capacity <=0", cf())
self.cavity = cavity
self.D = {}
# ------------
n = cavity.n
wc = cavity.wc
wa = cavity.wa
g = cavity.g
# ------------
_min = min(capacity, n)
self.states = {}
# ---------------------------------------
self.init_states(capacity, n)
# ---------------------------------------
self.size = len(self.states)
self.matrix = Matrix(self.size, self.size, dtype=np.complex128)
i = 1
for i1 in range(0, _min + 1):
for i2 in range(0, min(n, capacity - i1) + 1):
j = 1
for j1 in range(0, _min + 1):
for j2 in range(0, min(n, capacity - j1) + 1):
if i1 != j1:
p = [i1, j1]
elif i2 != j2:
p = [i2, j2]
else:
p = [1, 2]
mi = min(p[0], p[1])
kappa = sqrt((n - mi) * (mi + 1))
if abs(i1 - j1) + abs(i2 - j2) == 1:
_max = max(capacity - i1 - i2, capacity - j1 - j2)
self.matrix.data[i - 1, j - 1] = g * \
sqrt(max(capacity - i1 - i2,
capacity - j1 - j2)) * kappa
elif abs(i1 - j1) + abs(i2 - j2) == 0:
self.matrix.data[i - 1, j -
1] = (capacity - (i1 + i2)) * wc + (i1 + i2) * wa
else:
self.matrix.data[i - 1, j - 1] = 0
j += 1
i += 1
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
def get_index(self, state):
for k, v in self.states.items():
if v == state:
return k
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
def to_csv(self, filename):
self.matrix.to_csv(filename)
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
def get_states(self):
return self.states
def init_states(self, capacity, n):
_min = min(capacity, n)
count = 0
for i1 in range(0, _min + 1):
for i2 in range(0, min(n, capacity - i1) + 1):
self.states[count] = [i1, i2]
count += 1
# ---------------------------------------------------------------------------------------------
def print_states(self, title="States:"):
print(title, color="green")
print()
for k, v in self.states.items():
print(v)
print()
# ---------------------------------------------------------------------------------------------
class St:
def __init__(self, cv):
self.capacity = cv.capacity
self.n = cv.n
self.n1 = 0
self.n2 = 0
def inc(self):
if self.n2 < self.n and self.n1 + self.n2 < self.capacity:
self.n2 += 1
else:
self.n2 = 0
if self.n1 < self.n and self.n1 + self.n2 < self.capacity:
self.n1 += 1
else:
return False
return True
def print(self):
print("[" + str(self.n1) + "," + str(self.n2) + "]")
|
{"hexsha": "bf8c099ebe6efa4dbb0e34e60ebc2d42776a4114", "size": 4870, "ext": "py", "lang": "Python", "max_stars_repo_path": "build/lib/PyQuantum/Bipartite/Hamiltonian.py", "max_stars_repo_name": "alexfmsu/pyquantum", "max_stars_repo_head_hexsha": "78b09987cbfecf549e67b919bb5cb2046b21ad44", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "build/lib/PyQuantum/Bipartite/Hamiltonian.py", "max_issues_repo_name": "alexfmsu/pyquantum", "max_issues_repo_head_hexsha": "78b09987cbfecf549e67b919bb5cb2046b21ad44", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "build/lib/PyQuantum/Bipartite/Hamiltonian.py", "max_forks_repo_name": "alexfmsu/pyquantum", "max_forks_repo_head_hexsha": "78b09987cbfecf549e67b919bb5cb2046b21ad44", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-28T08:40:06.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-16T23:04:58.000Z", "avg_line_length": 32.9054054054, "max_line_length": 99, "alphanum_fraction": 0.3094455852, "include": true, "reason": "import numpy", "num_tokens": 961}
|
# PyZX - Python library for quantum circuit rewriting
# and optimization using the ZX-calculus
# Copyright (C) 2018 - Aleks Kissinger and John van de Wetering
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
if __name__ == '__main__':
sys.path.append('..')
from ..graph.graph import Graph
#from pyzx.graph.base import BaseGraph # TODO fix the right graph import - one of many - right backend etc
try:
import numpy as np
except:
np = None
SQUARE = "square"
LINE = "line"
FULLY_CONNNECTED = "fully_connected"
CIRCLE = "circle"
IBM_QX2 = "ibm_qx2"
IBM_QX3 = "ibm_qx3"
IBM_QX4 = "ibm_qx4"
IBM_QX5 = "ibm_qx5"
IBM_Q20_TOKYO = "ibm_q20_tokyo"
RIGETTI_16Q_ASPEN = "rigetti_16q_aspen"
RIGETTI_8Q_AGAVE = "rigetti_8q_agave"
architectures = [SQUARE, CIRCLE, FULLY_CONNNECTED, LINE, IBM_QX4, IBM_QX2, IBM_QX3, IBM_QX5, IBM_Q20_TOKYO, RIGETTI_8Q_AGAVE, RIGETTI_16Q_ASPEN]
dynamic_size_architectures = [FULLY_CONNNECTED, LINE, CIRCLE, SQUARE]
debug = False
class Architecture():
def __init__(self, name, coupling_graph=None, coupling_matrix=None, backend=None):
"""
Class that represents the architecture of the qubits to be taken into account when routing.
:param coupling_graph: a PyZX Graph representing the architecture, optional
:param coupling_matrix: a 2D numpy array representing the adjacency of the qubits, from which the Graph is created, optional
:param backend: The PyZX Graph backend to be used when creating it from the adjacency matrix, optional
"""
self.name = name
if coupling_graph is None:
self.graph = Graph(backend=backend)
else:
self.graph = coupling_graph
if coupling_matrix is not None:
# build the architecture graph
n = coupling_matrix.shape[0]
self.vertices = self.graph.add_vertices(n)
edges = [(self.vertices[row], self.vertices[col]) for row in range(n) for col in range(n) if
coupling_matrix[row, col] == 1]
self.graph.add_edges(edges)
else:
self.vertices = [v for v in self.graph.vertices()]
self.pre_calc_distances()
self.qubit_map = [i for i, v in enumerate(self.vertices)]
self.n_qubits = len(self.vertices)
def pre_calc_distances(self):
self.distances = {"upper": [self.floyd_warshall(until, upper=True) for until, v in enumerate(self.vertices)],
"full": [self.floyd_warshall(until, upper=False) for until, v in enumerate(self.vertices)]}
def to_quil_device(self):
# Only required here
import networkx as nx
from pyquil.device import NxDevice
edges = [edge for edge in self.graph.edges() if edge[0] in self.vertices]
topology = nx.from_edgelist(edges)
device = NxDevice(topology)
return device
def visualize(self, filename=None):
import networkx as nx
import matplotlib.pyplot as plt
plt.switch_backend('agg')
g = nx.Graph()
g.add_nodes_from(self.vertices)
g.add_edges_from(self.graph.edges())
nx.draw(g, with_labels=True, font_weight='bold')
if filename is None:
filename = self.name + ".png"
plt.savefig(filename)
def floyd_warshall(self, exclude_excl, upper=True):
"""
Implementation of the Floyd-Warshall algorithm to calculate the all-pair distances in a given graph
:param exclude_excl: index up to which qubit should be excluded from the distances
:param upper: whether use bidirectional edges or only ordered edges (src, tgt) such that src > tgt, default True
:return: a dict with for each pair of qubits in the graph, a tuple with their distance and the corresponding shortest path
"""
# https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm
distances = {}
vertices = self.vertices[exclude_excl:] if upper else self.vertices[:exclude_excl + 1]
for edge in self.graph.edges():
src, tgt = self.graph.edge_st(edge)
if src in vertices and tgt in vertices:
if upper:
distances[(src, tgt)] = (1, [(src, tgt)])
distances[(tgt, src)] = (1, [(tgt, src)])
elif src > tgt:
distances[(src, tgt)] = (1, [(src, tgt)])
else:
distances[(tgt, src)] = (1, [(tgt, src)])
for v in vertices:
distances[(v, v)] = (0, [])
for i, v0 in enumerate(vertices):
for j, v1 in enumerate(vertices if upper else vertices[:i + 1]):
for v2 in vertices if upper else vertices[: i + j + 1]:
if (v0, v1) in distances.keys():
if (v1, v2) in distances.keys():
if (v0, v2) not in distances.keys() or distances[(v0, v2)][0] > distances[(v0, v1)][0] + \
distances[(v1, v2)][0]:
distances[(v0, v2)] = (distances[(v0, v1)][0] + distances[(v1, v2)][0],
distances[(v0, v1)][1] + distances[(v1, v2)][1])
if upper:
distances[(v2, v0)] = (distances[(v0, v1)][0] + distances[(v1, v2)][0],
distances[(v2, v1)][1] + distances[(v1, v0)][1])
return distances
def steiner_tree(self, start, nodes, upper=True):
"""
Approximates the steiner tree given the architecture, a root qubit and the other qubits that should be present.
This is done using the pre-calculated all-pairs shortest distance and Prim's algorithm for creating a minimum spanning tree
:param start: The index of the root qubit to be used
:param nodes: The indices of the other qubits that should be present in the steiner tree
:param upper: Whether the steiner tree is used for creating an upper triangular matrix or a full reduction.
:yields: First yields all edges from the tree top-to-bottom, finished with None, then yields all edges from the tree bottom-up, finished with None.
"""
# Approximated by calculating the all-pairs shortest paths and then solving the mininum spanning tree over the subset of vertices and their respective shortest paths.
# https://en.wikipedia.org/wiki/Steiner_tree_problem#Approximating_the_Steiner_tree
# The all-pairs shortest paths are pre-calculated and the mimimum spanning tree is solved with Prim's algorithm
# https://en.wikipedia.org/wiki/Prim%27s_algorithm
# returns an iterator that walks the steiner tree, yielding (adj_node, leaf) pairs. If the walk is finished, it yields None
state = [start, [n for n in nodes]]
root = start
# TODO deal with qubit mapping
vertices = [root]
edges = []
debug and print(root, upper, nodes)
distances = self.distances["upper"][root] if upper else self.distances["full"][root]
steiner_pnts = []
while nodes != []:
options = [(node, v, *distances[(v, node)]) for node in nodes for v in (vertices + steiner_pnts) if
(v, node) in distances.keys()]
best_option = min(options, key=lambda x: x[2])
debug and print("Adding to tree: vertex ", best_option[0], "Edges ", best_option[3])
vertices.append(best_option[0])
edges.extend(best_option[3])
steiner = [v for edge in best_option[3] for v in edge if v not in vertices]
debug and print(steiner)
steiner_pnts.extend(steiner)
nodes.remove(best_option[0])
edges = set(edges) # remove duplicates
if debug:
print("edges:", edges)
print("nodes:", vertices)
print("steiner points:", steiner_pnts)
# First go through the tree to find and remove zeros
state += [[e for e in edges], [v for v in vertices], [s for s in steiner_pnts]]
vs = {root}
n_edges = len(edges)
yielded_edges = set()
debug_count = 0
yield_count = 0
warning = 0
while len(yielded_edges) < n_edges:
es = [e for e in edges for v in vs if e[0] == v]
old_vs = [v for v in vs]
yielded = False
for edge in es:
yield edge
vs.add(edge[1])
if edge in yielded_edges:
print("DOUBLE yielding! - should not be possible!")
yielded_edges.add(edge)
yielded = True
yield_count += 1
[vs.remove(v) for v in old_vs]
if not yielded:
debug and print("leaf!")
debug_count += 1
if debug_count > len(vertices):
print("infinite loop!", warning)
warning += 1
if yield_count > len(edges):
print("Yielded more edges than existing... This should not be possible!", warning)
warning += 1
if warning > 5:
print(state, yielded_edges)
# input("note it down")
break
yield None
# Walk the tree bottom up to remove all ones.
yield_count = 0
while len(edges) > 0:
# find leaf nodes:
debug and print(vertices, steiner_pnts, edges)
vs_to_consider = [vertex for vertex in vertices if vertex not in [e0 for e0, e1 in edges]] + \
[vertex for vertex in steiner_pnts if vertex not in [e0 for e0, e1 in edges]]
yielded = False
for v in vs_to_consider:
# Get the edge that is connected to this leaf node
for edge in [e for e in edges if e[1] == v]:
yield edge
edges.remove(edge)
yielded = True
yield_count += 1
# yield map(lambda i: self.qubit_map[i], edge)
if not yielded:
print("Infinite loop!", warning)
warning += 1
if yield_count > n_edges:
print("Yielded more edges than existing again... This should not be possible!!", warning)
warning += 1
if warning > 10:
print(state, edges, yield_count)
# input("Note it down!")
break
yield None
def dynamic_size_architecture_name(base_name, n_qubits):
return str(n_qubits) + "q-" + base_name
def connect_vertices_in_line(vertices):
return [(vertices[i], vertices[i+1]) for i in range(len(vertices)-1)]
def connect_vertices_as_grid(width, height, vertices):
if len(vertices) != width * height:
raise KeyError("To make a grid, you need vertices exactly equal to width*height, but got %d=%d*%d." % (len(vertices), width, height))
edges = connect_vertices_in_line(vertices)
horizontal_lines = [vertices[i*width: (i+1)*width] for i in range(height)]
for line1, line2 in zip(horizontal_lines, horizontal_lines[1:]):
new_edges = [(v1, v2) for v1, v2 in zip(line1[:-1], reversed(line2[1:]))]
edges.extend(new_edges)
return edges
def create_line_architecture(n_qubits, backend=None, **kwargs):
graph = Graph(backend=backend)
vertices = graph.add_vertices(n_qubits)
edges = connect_vertices_in_line(vertices)
graph.add_edges(edges)
name = dynamic_size_architecture_name(LINE, n_qubits)
return Architecture(name=name, coupling_graph=graph, backend=backend, **kwargs)
def create_circle_architecture(n_qubits, backend=None, **kwargs):
graph = Graph(backend=backend)
vertices = graph.add_vertices(n_qubits)
edges = connect_vertices_in_line(vertices)
edges.append((vertices[-1], vertices[0]))
graph.add_edges(edges)
name = dynamic_size_architecture_name(CIRCLE, n_qubits)
return Architecture(name=name, coupling_graph=graph, backend=backend, **kwargs)
def create_square_architecture(n_qubits, backend=None, **kwargs):
# No floating point errors
sqrt_qubits = 0
for n in range(n_qubits):
if n_qubits == n**2:
sqrt_qubits = n
if n**2 > n_qubits:
break
if sqrt_qubits == 0:
raise KeyError("Sqaure architecture requires a square number of qubits, but got " + str(n_qubits))
graph = Graph(backend=backend)
vertices = graph.add_vertices(n_qubits)
edges = connect_vertices_as_grid(sqrt_qubits, sqrt_qubits, vertices)
graph.add_edges(edges)
name = dynamic_size_architecture_name(SQUARE, n_qubits)
return Architecture(name=name, coupling_graph=graph, backend=backend, **kwargs)
"""
def create_9q_square_architecture(**kwargs):
m = np.array([
[0, 1, 0, 0, 0, 1, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 1],
[0, 1, 0, 1, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 1, 0]
])
return Architecture(name=SQUARE_9Q, coupling_matrix=m, **kwargs)
def create_5q_line_architecture(**kwargs):
m = np.array([
[0, 1, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 1, 0, 1, 0],
[0, 0, 1, 0, 1],
[0, 0, 0, 1, 0]
])
return Architecture(name=LINE_5Q, coupling_matrix=m, **kwargs)
"""
def create_ibm_qx2_architecture(**kwargs):
m = np.array([
[0, 1, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 1, 0, 1, 1],
[0, 0, 1, 0, 1],
[0, 0, 1, 1, 0]
])
return Architecture(IBM_QX2, coupling_matrix=m, **kwargs)
def create_ibm_qx4_architecture(**kwargs):
m = np.array([
[0, 1, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 1, 0, 1, 1],
[0, 0, 1, 0, 1],
[0, 0, 1, 1, 0]
])
return Architecture(IBM_QX4, coupling_matrix=m, **kwargs)
def create_ibm_qx3_architecture(**kwargs):
m = np.array([
#0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
[0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #0
[1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #1
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #2
[0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #3
[0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], #4
[1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], #5
[0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], #6
[0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0], #7
[0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0], #8
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0], #9
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0], #10
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0], #11
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0], #12
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0], #13
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1], #14
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0] #15
])
return Architecture(IBM_QX3, coupling_matrix=m, **kwargs)
def create_ibm_qx5_architecture(**kwargs):
m = np.array([
#0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], #0
[1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0], #1
[0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0], #2
[0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0], #3
[0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0], #4
[0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0], #5
[0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0], #6
[0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0], #7
[0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0], #8
[0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0], #9
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0], #10
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0], #11
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0], #12
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0], #13
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1], #14
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0] #15
])
return Architecture(IBM_QX5, coupling_matrix=m, **kwargs)
def create_ibm_q20_tokyo_architecture(backend=None, **kwargs):
graph = Graph(backend=backend)
vertices = graph.add_vertices(20)
edges = connect_vertices_as_grid(5, 4, vertices)
cross_edges = [
(1, 7), (2, 8),
(3, 5), (4, 6),
(6, 12), (7, 13),
(8, 10), (9, 11),
(11, 17), (12, 18),
(13, 15), (14, 16)
]
edges.extend([(vertices[v1], vertices[v2]) for v1, v2 in cross_edges])
graph.add_edges(edges)
return Architecture(name=IBM_Q20_TOKYO, coupling_graph=graph, backend=backend, **kwargs)
def create_rigetti_16q_aspen_architecture(backend=None, **kwargs):
graph = Graph(backend=backend)
vertices = graph.add_vertices(16)
edges = connect_vertices_in_line(vertices)
extra_edges = [(0, 7), (8, 15), (15, 0)]
edges += [(vertices[v1], vertices[v2]) for v1, v2 in extra_edges]
graph.add_edges(edges)
return Architecture(RIGETTI_16Q_ASPEN, coupling_graph=graph, backend=backend, **kwargs)
def create_rigetti_8q_agave_architecture(**kwargs):
m = np.array([
[0, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 0]
])
return Architecture(RIGETTI_8Q_AGAVE, coupling_matrix=m, **kwargs)
def create_fully_connected_architecture(n_qubits=None, **kwargs):
if n_qubits is None:
print("Warning: size is not given for the fully connected architecuture, using 9 as default.")
n_qubits = 9
m = np.ones(shape=(n_qubits, n_qubits))
for i in range(n_qubits):
m[i][i] = 0
name = dynamic_size_architecture_name(FULLY_CONNNECTED, n_qubits)
return Architecture(name, coupling_matrix=m, **kwargs)
def create_architecture(name, **kwargs):
# Source Rigetti architectures: https://www.rigetti.com/qpu # TODO create the architectures from names in pyquil.list_quantum_computers() <- needs mapping
# Source IBM architectures: http://iic.jku.at/files/eda/2018_tcad_mapping_quantum_circuit_to_ibm_qx.pdf
# IBM architectures are currently ignoring CNOT direction.
if isinstance(name, Architecture):
return name
if name == SQUARE:
return create_square_architecture(**kwargs)
elif name == LINE:
return create_line_architecture(**kwargs)
elif name == FULLY_CONNNECTED:
return create_fully_connected_architecture(**kwargs)
elif name == CIRCLE:
return create_circle_architecture(**kwargs)
elif name == IBM_QX2:
return create_ibm_qx2_architecture(**kwargs)
elif name == IBM_QX3:
return create_ibm_qx3_architecture(**kwargs)
elif name == IBM_QX4:
return create_ibm_qx4_architecture(**kwargs)
elif name == IBM_QX5:
return create_ibm_qx5_architecture(**kwargs)
elif name == IBM_Q20_TOKYO:
return create_ibm_q20_tokyo_architecture(**kwargs)
elif name == RIGETTI_16Q_ASPEN:
return create_rigetti_16q_aspen_architecture(**kwargs)
elif name == RIGETTI_8Q_AGAVE:
return create_rigetti_8q_agave_architecture(**kwargs)
else:
raise KeyError("name" + str(name) + "not recognized as architecture name. Please use one of", *architectures)
def colored_print_9X9(np_array):
"""
Prints a 9x9 numpy array with colors representing their distance in a 9x9 square architecture
:param np_array: the array
"""
if np_array.shape == (9,9):
CRED = '\033[91m '
CEND = '\033[0m '
CGREEN = '\33[32m '
CYELLOW = '\33[33m '
CBLUE = '\33[34m '
CWHITE = '\33[37m '
CVIOLET = '\33[35m '
color = [CBLUE, CGREEN, CVIOLET, CYELLOW, CRED]
layout = [[0,1,2,3,2,1,2,3,4],
[1,0,1,2,1,2,3,2,3],
[2,1,0,1,2,3,4,3,2],
[3,2,1,0,1,2,3,2,1],
[2,1,2,1,0,1,2,1,2],
[1,2,3,2,1,0,1,2,3],
[2,3,4,3,2,1,0,1,2],
[3,2,3,2,1,2,1,0,1],
[4,3,2,1,2,3,2,1,0]]
for i, l in enumerate(layout):
print('[', ', '.join([(color[c] + '1' if v ==1 else CWHITE + '0') for c, v in zip(l, np_array[i])]), CEND, ']')
else:
print(np_array)
if __name__ == '__main__':
n_qubits = 25
for name in dynamic_size_architectures:
arch = create_architecture(name, n_qubits=n_qubits)
arch.visualize()
arch = create_architecture(IBM_Q20_TOKYO)
arch.visualize()
|
{"hexsha": "49ef72ad4acb6bd0159c2f6fe17e52b9ad41a8af", "size": 21566, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyzx/routing/architecture.py", "max_stars_repo_name": "VladMoldoveanu/pyzx", "max_stars_repo_head_hexsha": "8a7d7dabf8492f570fc3f0851bcdb7377144dbc4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyzx/routing/architecture.py", "max_issues_repo_name": "VladMoldoveanu/pyzx", "max_issues_repo_head_hexsha": "8a7d7dabf8492f570fc3f0851bcdb7377144dbc4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyzx/routing/architecture.py", "max_forks_repo_name": "VladMoldoveanu/pyzx", "max_forks_repo_head_hexsha": "8a7d7dabf8492f570fc3f0851bcdb7377144dbc4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.8333333333, "max_line_length": 174, "alphanum_fraction": 0.5697857739, "include": true, "reason": "import numpy,import networkx", "num_tokens": 7114}
|
import h5py
import logging
import numpy as np
from pybind_isce3.core import LUT2d, DateTime, Orbit, Attitude, EulerAngles
from pybind_isce3.product import RadarGridParameters
from pybind_isce3.geometry import DEMInterpolator
from pybind_nisar.h5 import set_string
from pybind_nisar.types import complex32
from pybind_nisar.products.readers.Raw import Raw
from pybind_nisar.workflows.h5_prep import add_geolocation_grid_cubes_to_hdf5
log = logging.getLogger("SLCWriter")
# TODO refactor isce::io::setRefEpoch
def time_units(epoch: DateTime) -> str:
# XXX isce::io::*RefEpoch don't parse or serialize fraction.
if epoch.frac != 0.0:
raise ValueError("Reference epoch must have integer seconds.")
date = "{:04d}-{:02d}-{:02d}".format(epoch.year, epoch.month, epoch.day)
time = "{:02d}:{:02d}:{:02d}".format(epoch.hour, epoch.minute, epoch.second)
return "seconds since " + date + " " + time
class SLC(h5py.File):
def __init__(self, *args, band="LSAR", product="RSLC", **kw):
super().__init__(*args, **kw)
self.band = band
self.product = product
self.root = self.create_group(f"/science/{band}/{product}")
self.idpath = f"/science/{band}/identification"
self.attrs["Conventions"] = np.string_("CF-1.7")
self.attrs["contact"] = np.string_("nisarops@jpl.nasa.gov")
self.attrs["institution"] = np.string_("NASA JPL")
self.attrs["mission_name"] = np.string_("NISAR")
self.attrs["reference_document"] = np.string_("TBD")
self.attrs["title"] = np.string_("NISAR L1 RSLC Product")
def create_dataset(self, *args, **kw):
log.debug(f"Creating dataset {args[0]}")
return super().create_dataset(*args, **kw)
def create_group(self, *args, **kw):
log.debug(f"Creating group {args[0]}")
return super().create_group(*args, **kw)
def set_parameters(self, dop: LUT2d, epoch: DateTime, frequency='A'):
log.info(f"Saving Doppler for frequency {frequency}")
g = self.root.require_group("metadata/processingInformation/parameters")
# Actual LUT goes into a subdirectory, not created by serialization.
name = f"frequency{frequency}"
fg = g.require_group(name)
dop.save_to_h5(g, f"{name}/dopplerCentroid", epoch, "Hz")
# TODO veff, fmrate not used anywhere afaict except product io.
v = np.zeros_like(dop.data)
g.require_dataset("effectiveVelocity", v.shape, v.dtype, data=v)
fg.require_dataset("azimuthFMRate", v.shape, v.dtype, data=v)
# TODO weighting, ref height
if "rangeChirpWeighting" not in g:
g.require_dataset("rangeChirpWeighting", v.shape, np.float32,
data=v)
if "referenceTerrainHeight" not in g:
ref_terrain_height = np.zeros((v.shape[0]))
g.require_dataset("referenceTerrainHeight", (v.shape[0],),
np.float32, data=ref_terrain_height)
# TODO populate processingInformation/algorithms
algorithms_ds = self.root.require_group("metadata/processingInformation/algorithms")
algorithms_dataset_list = ["ISCEVersion",
"SWSTCorrection",
"azimuthCompression",
"azimuthPresumming",
"dopplerCentroidEstimation",
"driftCompensator",
"elevationAntennaPatternCorrection",
"internalCalibration",
"patchProcessing",
"postProcessing",
"rangeCellMigration",
"rangeCompression",
"rangeDependentGainCorrection",
"rangeReferenceFunctionGenerator",
"rangeSpreadingLossCorrection",
"secondaryRangeCompression"]
for algorithm in algorithms_dataset_list:
if algorithm in g:
continue
algorithms_ds.require_dataset(algorithm, (), 'S27',
data=np.string_(""))
# TODO populate processingInformation/inputs
inputs_ds = self.root.require_group("metadata/processingInformation/inputs")
inputs_dataset_list = ["l0bGranules",
"orbitFiles",
"attitudeFiles",
"auxcalFiles",
"configFiles",
"demFiles"]
for inp in inputs_dataset_list:
if inp in g:
continue
inputs_ds.require_dataset(inp, (), 'S1', data=np.string_(""))
def swath(self, frequency="A") -> h5py.Group:
return self.root.require_group(f"swaths/frequency{frequency}")
def add_polarization(self, frequency="A", pol="HH"):
assert len(pol) == 2 and pol[0] in "HVLR" and pol[1] in "HV"
g = self.swath(frequency)
name = "listOfPolarizations"
if name in g:
pols = np.array(g[name])
assert(pol not in pols)
pols = np.append(pols, [pol])
del g[name]
else:
pols = np.array([pol], dtype="S2")
dset = g.create_dataset(name, data=pols)
desc = f"List of polarization layers with frequency{frequency}"
dset.attrs["description"] = np.string_(desc)
def create_image(self, frequency="A", pol="HH", **kw) -> h5py.Dataset:
log.info(f"Creating SLC image for frequency={frequency} pol={pol}")
assert len(pol) == 2 and pol[0] in "HVLR" and pol[1] in "HV"
self.add_polarization(frequency, pol)
kw.setdefault("dtype", complex32)
dset = self.swath(frequency).create_dataset(pol, **kw)
dset.attrs["description"] = np.string_(f"Focused SLC image ({pol})")
dset.attrs["units"] = np.string_("DN")
return dset
def update_swath(self, t: np.array, epoch: DateTime, r: np.array,
fc: float, frequency="A"):
g = self.swath(frequency)
# Time scale is in parent of group. Use require_dataset to assert
# matching time scale on repeated calls.
d = g.parent.require_dataset("zeroDopplerTime", t.shape, t.dtype, data=t)
d.attrs["units"] = np.string_(time_units(epoch))
d.attrs["description"] = np.string_(
"CF compliant dimension associated with azimuth time")
d = g.parent.require_dataset("zeroDopplerTimeSpacing", (), float)
d[()] = t[1] - t[0]
d.attrs["units"] = np.string_("seconds")
d.attrs["description"] = np.string_("Time interval in the along track"
" direction for raster layers. This is same as the spacing between"
" consecutive entries in the zeroDopplerTime array")
d = g.require_dataset("slantRange", r.shape, r.dtype, data=r)
d.attrs["units"] = np.string_("meters")
d.attrs["description"] = np.string_("CF compliant dimension associated"
" with slant range")
d = g.require_dataset("slantRangeSpacing", (), float)
d[()] = r[1] - r[0]
d.attrs["units"] = np.string_("meters")
d.attrs["description"] = np.string_("Slant range spacing of grid. Same"
" as difference between consecutive samples in slantRange array")
d = g.require_dataset("processedCenterFrequency", (), float)
d[()] = fc
d.attrs["units"] = np.string_("Hz")
d.attrs["description"] = np.string_("Center frequency of the processed"
" image in Hz")
# TODO other parameters filled with bogus values for now, no units
g.require_dataset("acquiredCenterFrequency", (), float)[()] = fc
g.require_dataset("acquiredRangeBandwidth", (), float)[()] = 20e6
g.require_dataset("nominalAcquisitionPRF", (), float)[()] = 1910.
g.require_dataset("numberOfSubSwaths", (), int)[()] = 1
g.require_dataset("processedAzimuthBandwidth", (), float)[()] = 1200.
g.require_dataset("processedRangeBandwidth", (), float)[()] = 20e6
g.require_dataset("sceneCenterAlongTrackSpacing", (), float)[()] = 4.
g.require_dataset("sceneCenterGroundRangeSpacing", (), float)[()] = 12.
d = g.require_dataset("validSamplesSubSwath1", (len(t), 2), 'int32')
d[:] = (0, len(r))
def set_orbit(self, orbit: Orbit, accel=None, type="Custom"):
log.info("Writing orbit to SLC")
g = self.root.require_group("metadata/orbit")
orbit.save_to_h5(g)
# interpMethod not in L1 spec. Delete it?
# Add description attributes. Should these go in saveToH5 method?
g["time"].attrs["description"] = np.string_("Time vector record. This"
" record contains the time corresponding to position, velocity,"
" acceleration records")
g["position"].attrs["description"] = np.string_("Position vector"
" record. This record contains the platform position data with"
" respect to WGS84 G1762 reference frame")
g["velocity"].attrs["description"] = np.string_("Velocity vector"
" record. This record contains the platform velocity data with"
" respect to WGS84 G1762 reference frame")
# Orbit source/type
d = g.require_dataset("orbitType", (), "S10", data=np.string_(type))
d.attrs["description"] = np.string_("PrOE (or) NOE (or) MOE (or) POE"
" (or) Custom")
# acceleration not stored in isce3 Orbit class.
if accel is None:
log.warning("Populating orbit/acceleration with zeros")
accel = np.zeros_like(orbit.velocity)
shape = orbit.velocity.shape
if accel.shape != shape:
raise ValueError("Acceleration dims must match orbit fields.")
d = g.require_dataset("acceleration", shape, float, data=accel)
d.attrs["description"] = np.string_("Acceleration vector record. This"
" record contains the platform acceleration data with respect to"
" WGS84 G1762 reference frame")
d.attrs["units"] = np.string_("meters per second squared")
def set_attitude(self, attitude: Attitude, epoch: DateTime, type="Custom"):
log.info("Writing attitude to SLC")
g = self.root.require_group("metadata/attitude")
d = g.require_dataset("attitudeType", (), "S10", data=np.string_(type))
d.attrs["description"] = np.string_("PrOE (or) NOE (or) MOE (or) POE"
" (or) Custom")
t = np.asarray(attitude.time)
d = g.require_dataset("time", t.shape, t.dtype, data=t)
d.attrs["description"] = np.string_("Time vector record. This record"
" contains the time corresponding to attitude and quaternion"
" records")
d.attrs["units"] = np.string_(time_units(epoch))
# TODO attitude rates
n = len(attitude.time)
qdot = np.zeros((n, 3))
d = g.require_dataset("angularVelocity", (n,3), float, data=qdot)
d.attrs["units"] = np.string_("radians per second")
d.attrs["description"] = np.string_("Attitude angular velocity vectors"
" (wx, wy, wz)")
qv = np.array([[q.w, q.x, q.y, q.z] for q in attitude.quaternions])
d = g.require_dataset("quaternions", qv.shape, qv.dtype, data=qv)
d.attrs["units"] = np.string_("unitless")
d.attrs["description"] = np.string_("Attitude quaternions"
" (q0, q1, q2, q3)")
rpy = np.asarray([[e.roll, e.pitch, e.yaw] for e in
[EulerAngles(q) for q in attitude.quaternions]])
d = g.require_dataset("eulerAngles", rpy.shape, rpy.dtype, data=rpy)
d.attrs["units"] = np.string_("radians")
d.attrs["description"] = np.string_("Attitude Euler angles"
" (roll, pitch, yaw)")
def copy_identification(self, raw: Raw, track: int = 0, frame: int = 0,
polygon: str = None, start_time: DateTime = None,
end_time: DateTime = None):
"""Copy the identification metadata from a L0B product. Bounding
polygon and start/end time will be updated if not None.
"""
log.info(f"Populating identification based on {raw.filename}")
# Most parameters are just copies of input ID.
if self.idpath in self.root:
del self.root[self.idpath]
with h5py.File(raw.filename, 'r', libver='latest', swmr=True) as fd:
self.root.copy(fd[raw.IdentificationPath], self.idpath)
g = self.root[self.idpath]
# Of course product type is different.
d = set_string(g, "productType", self.product)
d.attrs["description"] = np.string_("Product type")
# L0B doesn't know about track/frame, so have to add it.
d = g.require_dataset("trackNumber", (), 'uint8', data=track)
d.attrs["units"] = np.string_("unitless")
d.attrs["description"] = np.string_("Track number")
d = g.require_dataset("frameNumber", (), 'uint16', data=frame)
d.attrs["units"] = np.string_("unitless")
d.attrs["description"] = np.string_("Frame number")
# Polygon different due to reskew and possibly multiple input L0Bs.
if polygon is not None:
d = set_string(g, "boundingPolygon", polygon)
d.attrs["epsg"] = 4326
d.attrs["description"] = np.string_("OGR compatible WKT"
" representation of bounding polygon of the image")
d.attrs["ogr_geometry"] = np.string_("polygon")
else:
log.warning("SLC bounding polygon not updated. Using L0B polygon.")
# Start/end time can be customized via runconfig and generally are
# different anyway due to reskew.
if start_time is not None:
d = set_string(g, "zeroDopplerStartTime", start_time.isoformat())
d.attrs["description"] = np.string_("Azimuth start time of product")
else:
log.warning("SLC start time not updated. Using L0B start time.")
if end_time is not None:
d = set_string(g, "zeroDopplerEndTime", end_time.isoformat())
d.attrs["description"] = np.string_("Azimuth stop time of product")
else:
log.warning("SLC end time not updated. Using L0B end time.")
def set_geolocation_grid(self, orbit: Orbit, grid: RadarGridParameters,
doppler: LUT2d, epsg=4326, dem=DEMInterpolator(),
threshold=1e-8, maxiter=50, delta_range=10.0):
log.info(f"Creating geolocationGrid.")
# TODO Get DEM stats. Until then just span all Earthly values.
heights = np.linspace(-500, 9000, 20)
# Figure out decimation factors that give < 500 m spacing.
max_spacing = 500.
t = (grid.sensing_mid +
(grid.ref_epoch - orbit.reference_epoch).total_seconds())
_, v = orbit.interpolate(t)
dx = np.linalg.norm(v) / grid.prf
tskip = int(np.floor(max_spacing / dx))
rskip = int(np.floor(max_spacing / grid.range_pixel_spacing))
grid = grid[::tskip, ::rskip]
group_name = f"{self.root.name}/metadata/geolocationGrid"
rslc_doppler = LUT2d() # RSLCs are zero-Doppler by definition
# Change spelling of geo2rdr params
tol = dict(
threshold_geo2rdr = threshold,
numiter_geo2rdr = maxiter,
delta_range = delta_range,
)
add_geolocation_grid_cubes_to_hdf5(self, group_name, grid, heights,
orbit, doppler, rslc_doppler, epsg, **tol)
def add_calibration_section(self, frequency, pol,
az_time_orig_vect: np.array,
epoch: DateTime,
slant_range_orig_vect: np.array):
assert len(pol) == 2 and pol[0] in "HVLR" and pol[1] in "HV"
calibration_section_sampling = 50
g = self.root.require_group("metadata/calibrationInformation")
if "zeroDopplerTime" in g:
t = g['zeroDopplerTime']
else:
t = az_time_orig_vect[0:-1:calibration_section_sampling]
d = g.require_dataset("zeroDopplerTime", t.shape, t.dtype, data=t)
d.attrs["units"] = np.string_(time_units(epoch))
d.attrs["description"] = np.string_(
"CF compliant dimension associated with azimuth time")
if "slantRange" in g:
r = g['slantRange']
else:
r = slant_range_orig_vect[0:-1:calibration_section_sampling]
d = g.require_dataset("slantRange", r.shape, r.dtype, data=r)
d.attrs["units"] = np.string_("meters")
d.attrs["description"] = np.string_("CF compliant dimension associated"
" with slant range")
dummy_array = np.ones((t.size, r.size), dtype=np.float32)
if "geometry/beta0" not in g:
d = g.require_dataset(f"geometry/beta0", dummy_array.shape,
np.float32, data=dummy_array)
d.attrs["description"] = np.string_(
"2D LUT to convert DN to beta 0 assuming as a function"
" of zero doppler time and slant range")
if "geometry/sigma0" not in g:
d = g.require_dataset(f"geometry/sigma0", dummy_array.shape,
np.float32, data=dummy_array)
d.attrs["description"] = np.string_(
"2D LUT to convert DN to sigma 0 assuming as a function"
" of zero doppler time and slant range")
if "geometry/gamma0" not in g:
d = g.require_dataset(f"geometry/gamma0", dummy_array.shape,
np.float32, data=dummy_array)
d.attrs["description"] = np.string_(
"2D LUT to convert DN to gamma 0 as a function of zero"
" doppler time and slant range")
d = g.require_dataset(
f"frequency{frequency}/{pol}/elevationAntennaPattern",
dummy_array.shape, np.float32, data=dummy_array)
d.attrs["description"] = np.string_(
"Complex two-way elevation antenna pattern")
dummy_array = np.zeros((t.size, r.size))
d = g.require_dataset(
f"frequency{frequency}/{pol}/nes0",
dummy_array.shape, np.float32, data=dummy_array)
d.attrs["description"] = np.string_(
"Thermal noise equivalent sigma0")
|
{"hexsha": "5a846aafbf6b2f6a8adbcc53ab400aceb5cd427b", "size": 19071, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/packages/pybind_nisar/products/writers/SLC.py", "max_stars_repo_name": "isce-framework/isce3", "max_stars_repo_head_hexsha": "59cdd2c659a4879367db5537604b0ca93d26b372", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 64, "max_stars_repo_stars_event_min_datetime": "2019-08-06T19:22:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T17:11:46.000Z", "max_issues_repo_path": "python/packages/pybind_nisar/products/writers/SLC.py", "max_issues_repo_name": "isce-framework/isce3", "max_issues_repo_head_hexsha": "59cdd2c659a4879367db5537604b0ca93d26b372", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-09-01T22:46:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-04T00:05:28.000Z", "max_forks_repo_path": "python/packages/pybind_nisar/products/writers/SLC.py", "max_forks_repo_name": "isce-framework/isce3", "max_forks_repo_head_hexsha": "59cdd2c659a4879367db5537604b0ca93d26b372", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 29, "max_forks_repo_forks_event_min_datetime": "2019-08-05T21:40:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T00:17:03.000Z", "avg_line_length": 50.1868421053, "max_line_length": 92, "alphanum_fraction": 0.583241571, "include": true, "reason": "import numpy", "num_tokens": 4424}
|
'''
Test Sciris printing functions.
'''
import numpy as np
import sciris as sc
def test_colorize():
sc.heading('Test text colorization')
sc.colorize(showhelp=True)
print('Simple example:')
sc.colorize('green', 'hi')
print('More complicated example:')
sc.colorize(['yellow', 'bgblack'])
print('Hello world')
print('Goodbye world')
sc.colorize('reset') # Colorize all output in between
bluearray = sc.colorize(color='blue', string=str(range(5)), output=True)
print("This should be blue: " + bluearray)
print('This should be a rainbow:')
sc.printred( 'This should be red')
sc.printyellow( 'This should be yellow')
sc.printgreen( 'This should be green')
sc.printcyan( 'This should be cyan')
sc.printblue( 'This should be blue')
sc.printmagenta('This should be magenta')
return
def test_printing():
sc.heading('Test printing functions')
example = sc.prettyobj()
example.data = sc.vectocolor(10)
print('sc.pr():')
sc.pr(example)
print('sc.pp():')
sc.pp(example.data)
string = sc.pp(example.data, doprint=False)
print('sc.printdata():')
sc.printdata(example.data)
o = sc.objdict()
print('\nTesting print')
sc.printv('test print', 1, 2)
print('\nTesting objatt')
o.att = sc.objatt(o)
print('\nTesting prepr')
sc.prepr(o)
print('\nTesting printarr')
sc.printarr(np.random.rand(3,4,5))
print('\nTesting printvars')
a = range(5)
b = 'example'
sc.printvars(locals(), ['a','b'], color='green')
print('\nTesting slacknotification')
sc.slacknotification(webhook='http://invalid.hooks.slack.com.test', message='Test notification to nowhere')
print('↑↑↑ Will raise a error since not a valid webhook, this is OK')
print('\nTesting printtologfile')
sc.printtologfile('Test message')
print('\nTesting sigfig')
sc.sigfig(np.random.rand(10), SI=True, sep='.')
print('\nTesting capture')
str1 = 'I am string 1'
str2 = 'I am string 2'
with sc.capture() as txt1:
print(str1)
txt2 = sc.capture().start()
print(str2)
txt2.stop()
# print() appends a newline character which we have to remove for the comparison
assert txt1.rstrip() == str1
assert txt2.rstrip() == str2
return o
def test_prepr():
sc.heading('Test pretty representation of an object')
n_attrs = 500
myobj = sc.prettyobj()
for i in range(n_attrs):
key = f'attr{i:03d}'
setattr(myobj, key, i**2)
print(myobj)
print('Testing pretty representation of an object using slots')
class Foo:
__slots__ = ['bar']
def __init__(self):
self.bar = 1
x = Foo()
print(sc.prepr(x))
print(sc.prepr(x, maxtime=0))
class Bar:
def skip(self): pass
print(sc.prepr(Bar()))
for tf in [True, False]:
sc.objrepr(x, showid=tf, showmeth=tf, showprop=tf, showatt=tf)
return myobj
def test_progress_bar():
sc.heading('Progress bar and percent complete')
n = 50
for i in range(n):
sc.progressbar(i+1, n)
sc.timedsleep(0.5/n, verbose=False)
for i in range(n):
sc.percentcomplete(i, n, stepsize=10) # will print on every 50th iteration
return i
#%% Run as a script
if __name__ == '__main__':
sc.tic()
bluearray = test_colorize()
printing = test_printing()
myobj = test_prepr()
ind = test_progress_bar()
sc.toc()
print('Done.')
|
{"hexsha": "f88127afd2cbfc6499e07dd46845200b513c1a6f", "size": 3527, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_printing.py", "max_stars_repo_name": "optimamodel/sciris", "max_stars_repo_head_hexsha": "fc0148fd9352e443a1c9b1a790275bc2904b30b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_printing.py", "max_issues_repo_name": "optimamodel/sciris", "max_issues_repo_head_hexsha": "fc0148fd9352e443a1c9b1a790275bc2904b30b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2018-03-27T21:47:13.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-28T00:50:00.000Z", "max_forks_repo_path": "tests/test_printing.py", "max_forks_repo_name": "optimamodel/sciris", "max_forks_repo_head_hexsha": "fc0148fd9352e443a1c9b1a790275bc2904b30b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-09-05T07:57:39.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-05T07:57:39.000Z", "avg_line_length": 23.357615894, "max_line_length": 111, "alphanum_fraction": 0.6195066629, "include": true, "reason": "import numpy", "num_tokens": 953}
|
#include <boost/fusion/container/vector/vector20.hpp>
|
{"hexsha": "f6501b4c598f6eb8c7ce212555f1601b78ab02b8", "size": 54, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_fusion_container_vector_vector20.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_fusion_container_vector_vector20.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_fusion_container_vector_vector20.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 27.0, "max_line_length": 53, "alphanum_fraction": 0.8148148148, "num_tokens": 11}
|
import numpy as np
import matplotlib.pyplot as plt
# plt.switch_backend('agg')
def plot_sequence_heatmap(data, filename=None, title=None, x_label="Positions", y_label="Nucleotides", y_ticks=None):
""" Plots heatmap of sequence matrix (rows corresponds nucleotides, columns correspond to genomic location). """
if y_ticks is None:
y_ticks = ['A', 'C', 'G', 'T', '-', 'N']
plt.figure(figsize=(20, 5))
plt.imshow(data, interpolation="nearest", cmap='Blues')
plt.xticks([], [])
plt.xlabel(x_label)
plt.yticks(np.arange(data.shape[0]), y_ticks[0:data.shape[0]])
plt.ylabel(y_label)
plt.ylim([data.shape[0] - 0.5, -0.5])
#plt.colorbar()
if title is not None:
plt.title(title)
if filename is not None:
plt.savefig(filename)
else:
plt.show()
if __name__ == '__main__':
print("Creating dummy sequence matrix...")
data = np.random.randint(0, 10, size=(6, 50))
# Display the marginal one-hot sequence matrix
plot_sequence_heatmap(data, title="Example Heatmap")
# Save the marginal one-hot sequence matrix
print("Saving the matrix heatmap...")
plot_sequence_heatmap(data, filename="plots/heatmap_example_dummy.png", title="Example Heatmap")
|
{"hexsha": "24c816312c8e24bc26efd768cb84122db947f7e3", "size": 1250, "ext": "py", "lang": "Python", "max_stars_repo_path": "virusnn/plotting.py", "max_stars_repo_name": "Lamicc/Corona_mutants", "max_stars_repo_head_hexsha": "89ab80da65f23e42906e0ab57da6ca50f001cdfb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "virusnn/plotting.py", "max_issues_repo_name": "Lamicc/Corona_mutants", "max_issues_repo_head_hexsha": "89ab80da65f23e42906e0ab57da6ca50f001cdfb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "virusnn/plotting.py", "max_forks_repo_name": "Lamicc/Corona_mutants", "max_forks_repo_head_hexsha": "89ab80da65f23e42906e0ab57da6ca50f001cdfb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0512820513, "max_line_length": 117, "alphanum_fraction": 0.6648, "include": true, "reason": "import numpy", "num_tokens": 312}
|
########################################################################################################################
# Module: tests/test_resampling.py
# Description: Tests for resampling schemes.
#
# Web: https://github.com/SamDuffield/bayesian-traffic
########################################################################################################################
import unittest
import numpy as np
import numpy.testing as npt
from bmm.src.inference.particles import MMParticles
from bmm.src.inference import resampling
class TestMultinomial(unittest.TestCase):
def test_array_trivial(self):
array = np.arange(10)
weights = np.zeros(10)
weights[0] = 1
npt.assert_array_equal(resampling.multinomial(array, weights), np.zeros(10))
def test_array_repeated(self):
array = np.arange(10)
weights = np.arange(1, 11)
weights = weights / weights.sum()
repeated_resample = np.array([resampling.multinomial(array, weights) for _ in range(10000)])
empirical_weights = np.array([(repeated_resample == i).mean() for i in array])
npt.assert_array_almost_equal(weights, empirical_weights, decimal=2)
def test_list_trivial(self):
tlist = [a for a in range(10)]
weights = np.zeros(10)
weights[0] = 1
self.assertEqual(resampling.multinomial(tlist, weights), [0 for _ in range(10)])
def test_list_repeated(self):
tlist = [a for a in range(10)]
weights = np.arange(1, 11)
weights = weights / weights.sum()
repeated_resample = np.array([resampling.multinomial(tlist, weights) for _ in range(10000)])
empirical_weights = np.array([(repeated_resample == i).mean() for i in tlist])
npt.assert_array_almost_equal(weights, empirical_weights, decimal=2)
def test_mmparticles_trivial(self):
init_array = np.zeros((3, 6))
init_array += np.arange(3).reshape(3, 1)
mmp = MMParticles(init_array)
weights = np.array([0, 1, 0])
mmp_resampled = resampling.multinomial(mmp, weights)
for i in range(3):
npt.assert_array_equal(mmp_resampled[i], np.array([[0, 1, 1, 1, 1, 1, 1, 0]]))
def test_mmparticles_repeated(self):
init_array = np.zeros((10, 6))
init_array += np.arange(10).reshape(10, 1)
mmp = MMParticles(init_array)
weights = np.arange(1, 11)
weights = weights / weights.sum()
repeated_resample = [resampling.multinomial(mmp, weights) for _ in range(10000)]
repeated_resample_arr = np.array([p.particles for p in repeated_resample])[:, :, 0, 1]
empirical_weights = np.array([(repeated_resample_arr == i).mean() for i in np.arange(10)])
npt.assert_array_almost_equal(weights, empirical_weights, decimal=2)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "9c039df8a7f4895fef06d6e909523c2b94d63064", "size": 2864, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_resampling.py", "max_stars_repo_name": "SamDuffield/bayesian-map-matching", "max_stars_repo_head_hexsha": "f1557f584d4411f1ee77665dae3644111b3a3a42", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-12-09T10:39:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T19:09:06.000Z", "max_issues_repo_path": "tests/test_resampling.py", "max_issues_repo_name": "SamDuffield/bayesian-map-matching", "max_issues_repo_head_hexsha": "f1557f584d4411f1ee77665dae3644111b3a3a42", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-09-21T17:43:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-11T14:52:25.000Z", "max_forks_repo_path": "tests/test_resampling.py", "max_forks_repo_name": "SamDuffield/bayesian-map-matching", "max_forks_repo_head_hexsha": "f1557f584d4411f1ee77665dae3644111b3a3a42", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-03T12:29:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-06T16:10:25.000Z", "avg_line_length": 41.5072463768, "max_line_length": 120, "alphanum_fraction": 0.6068435754, "include": true, "reason": "import numpy", "num_tokens": 696}
|
(*:maxLineLen=78:*)
theory Sessions
imports Base
begin
chapter \<open>Isabelle sessions and build management \label{ch:session}\<close>
text \<open>
An Isabelle \<^emph>\<open>session\<close> consists of a collection of related theories that may
be associated with formal documents (\chref{ch:present}). There is also a
notion of \<^emph>\<open>persistent heap\<close> image to capture the state of a session,
similar to object-code in compiled programming languages. Thus the concept
of session resembles that of a ``project'' in common IDE environments, but
the specific name emphasizes the connection to interactive theorem proving:
the session wraps-up the results of user-interaction with the prover in a
persistent form.
Application sessions are built on a given parent session, which may be built
recursively on other parents. Following this path in the hierarchy
eventually leads to some major object-logic session like \<open>HOL\<close>, which itself
is based on \<open>Pure\<close> as the common root of all sessions.
Processing sessions may take considerable time. Isabelle build management
helps to organize this efficiently. This includes support for parallel build
jobs, in addition to the multithreaded theory and proof checking that is
already provided by the prover process itself.
\<close>
section \<open>Session ROOT specifications \label{sec:session-root}\<close>
text \<open>
Session specifications reside in files called \<^verbatim>\<open>ROOT\<close> within certain
directories, such as the home locations of registered Isabelle components or
additional project directories given by the user.
The ROOT file format follows the lexical conventions of the \<^emph>\<open>outer syntax\<close>
of Isabelle/Isar, see also @{cite "isabelle-isar-ref"}. This defines common
forms like identifiers, names, quoted strings, verbatim text, nested
comments etc. The grammar for @{syntax chapter_def}, @{syntax chapter_entry}
and @{syntax session_entry} is given as syntax diagram below. Each ROOT file
may contain multiple specifications like this. Chapters help to organize
browser info (\secref{sec:info}), but have no formal meaning. The default
chapter is ``\<open>Unsorted\<close>''. Chapter definitions, which are optional, allow to
associate additional information.
Isabelle/jEdit @{cite "isabelle-jedit"} includes a simple editing mode
\<^verbatim>\<open>isabelle-root\<close> for session ROOT files, which is enabled by default for any
file of that name.
\<^rail>\<open>
@{syntax_def chapter_def}: @'chapter_definition' @{syntax name} \<newline>
groups? description?
;
@{syntax_def chapter_entry}: @'chapter' @{syntax name}
;
@{syntax_def session_entry}: @'session' @{syntax system_name} groups? dir? '=' \<newline>
(@{syntax system_name} '+')? description? options? \<newline>
sessions? directories? (theories*) \<newline>
(document_theories?) (document_files*) \<newline>
(export_files*) (export_classpath?)
;
groups: '(' (@{syntax name} +) ')'
;
dir: @'in' @{syntax embedded}
;
description: @'description' @{syntax text}
;
options: @'options' opts
;
opts: '[' ( (@{syntax name} '=' value | @{syntax name}) + ',' ) ']'
;
value: @{syntax name} | @{syntax real}
;
sessions: @'sessions' (@{syntax system_name}+)
;
directories: @'directories' (dir+)
;
theories: @'theories' opts? (theory_entry+)
;
theory_entry: @{syntax system_name} ('(' @'global' ')')?
;
document_theories: @'document_theories' (@{syntax name}+)
;
document_files: @'document_files' ('(' dir ')')? (@{syntax embedded}+)
;
export_files: @'export_files' ('(' dir ')')? ('[' nat ']')? \<newline>
(@{syntax embedded}+)
;
export_classpath: @'export_classpath' (@{syntax embedded}*)
\<close>
\<^descr> \isakeyword{chapter{\isacharunderscorekeyword}definition}~\<open>A (groups)\<close>
associates a collection of groups with chapter \<open>A\<close>. All sessions that belong
to this chapter will automatically become members of these groups.
\<^descr> \isakeyword{session}~\<open>A = B + body\<close> defines a new session \<open>A\<close> based on
parent session \<open>B\<close>, with its content given in \<open>body\<close> (imported sessions and
theories). Note that a parent (like \<open>HOL\<close>) is mandatory in practical
applications: only Isabelle/Pure can bootstrap itself from nothing.
All such session specifications together describe a hierarchy (graph) of
sessions, with globally unique names. The new session name \<open>A\<close> should be
sufficiently long and descriptive to stand on its own in a potentially large
library.
\<^descr> \isakeyword{session}~\<open>A (groups)\<close> indicates a collection of groups where
the new session is a member. Group names are uninterpreted and merely follow
certain conventions. For example, the Isabelle distribution tags some
important sessions by the group name called ``\<open>main\<close>''. Other projects may
invent their own conventions, but this requires some care to avoid clashes
within this unchecked name space.
\<^descr> \isakeyword{session}~\<open>A\<close>~\isakeyword{in}~\<open>dir\<close> specifies an explicit
directory for this session; by default this is the current directory of the
\<^verbatim>\<open>ROOT\<close> file.
All theory files are located relatively to the session directory. The prover
process is run within the same as its current working directory.
\<^descr> \isakeyword{description}~\<open>text\<close> is a free-form description for this
session (or chapter), e.g. for presentation purposes.
\<^descr> \isakeyword{options}~\<open>[x = a, y = b, z]\<close> defines separate options
(\secref{sec:system-options}) that are used when processing this session,
but \<^emph>\<open>without\<close> propagation to child sessions. Note that \<open>z\<close> abbreviates \<open>z =
true\<close> for Boolean options.
\<^descr> \isakeyword{sessions}~\<open>names\<close> specifies sessions that are \<^emph>\<open>imported\<close> into
the current name space of theories. This allows to refer to a theory \<open>A\<close>
from session \<open>B\<close> by the qualified name \<open>B.A\<close> --- although it is loaded again
into the current ML process, which is in contrast to a theory that is
already present in the \<^emph>\<open>parent\<close> session.
Theories that are imported from other sessions are excluded from the current
session document.
\<^descr> \isakeyword{directories}~\<open>dirs\<close> specifies additional directories for
import of theory files via \isakeyword{theories} within \<^verbatim>\<open>ROOT\<close> or
\<^theory_text>\<open>imports\<close> within a theory; \<open>dirs\<close> are relative to the main session
directory (cf.\ \isakeyword{session} \dots \isakeyword{in}~\<open>dir\<close>). These
directories need to be exclusively assigned to a unique session, without
implicit sharing of file-system locations.
\<^descr> \isakeyword{theories}~\<open>options names\<close> specifies a block of theories that
are processed within an environment that is augmented by the given options,
in addition to the global session options given before. Any number of blocks
of \isakeyword{theories} may be given. Options are only active for each
\isakeyword{theories} block separately.
A theory name that is followed by \<open>(\<close>\isakeyword{global}\<open>)\<close> is treated
literally in other session specifications or theory imports --- the normal
situation is to qualify theory names by the session name; this ensures
globally unique names in big session graphs. Global theories are usually the
entry points to major logic sessions: \<open>Pure\<close>, \<open>Main\<close>, \<open>Complex_Main\<close>,
\<open>HOLCF\<close>, \<open>IFOL\<close>, \<open>FOL\<close>, \<open>ZF\<close>, \<open>ZFC\<close> etc. Regular Isabelle applications
should not claim any global theory names.
\<^descr> \isakeyword{document_theories}~\<open>names\<close> specifies theories from other
sessions that should be included in the generated document source directory.
These theories need to be explicit imports in the current session, or
implicit imports from the underlying hierarchy of parent sessions. The
generated \<^verbatim>\<open>session.tex\<close> file is not affected: the session's {\LaTeX} setup
needs to \<^verbatim>\<open>\input{\<close>\<open>\<dots>\<close>\<^verbatim>\<open>}\<close> generated \<^verbatim>\<open>.tex\<close> files separately.
\<^descr> \isakeyword{document_files}~\<open>(\<close>\isakeyword{in}~\<open>base_dir) files\<close> lists
source files for document preparation, typically \<^verbatim>\<open>.tex\<close> and \<^verbatim>\<open>.sty\<close> for
{\LaTeX}. Only these explicitly given files are copied from the base
directory to the document output directory, before formal document
processing is started (see also \secref{sec:tool-document}). The local path
structure of the \<open>files\<close> is preserved, which allows to reconstruct the
original directory hierarchy of \<open>base_dir\<close>. The default \<open>base_dir\<close> is
\<^verbatim>\<open>document\<close> within the session root directory.
\<^descr> \isakeyword{export_files}~\<open>(\<close>\isakeyword{in}~\<open>target_dir) [number]
patterns\<close> specifies theory exports that may get written to the file-system,
e.g. via @{tool_ref build} with option \<^verbatim>\<open>-e\<close> (\secref{sec:tool-build}). The
\<open>target_dir\<close> specification is relative to the session root directory; its
default is \<^verbatim>\<open>export\<close>. Exports are selected via \<open>patterns\<close> as in @{tool_ref
export} (\secref{sec:tool-export}). The number given in brackets (default:
0) specifies the prefix of elements that should be removed from each name:
it allows to reduce the resulting directory hierarchy at the danger of
overwriting files due to loss of uniqueness.
\<^descr> \isakeyword{export_classpath}~\<open>patterns\<close> specifies export artifacts that
should be included into the local Java/Scala classpath of this session
context. This is only relevant for tools that allow dynamic loading of
service classes (\secref{sec:scala-build}), while most other Isabelle/Scala
tools require global configuration during system startup. An empty list of
\<open>patterns\<close> defaults to \<^verbatim>\<open>"*:classpath/*.jar"\<close>, which fits to the naming
convention of JAR modules produced by the Isabelle/Isar command
\<^theory_text>\<open>scala_build_generated_files\<close> @{cite "isabelle-isar-ref"}.
\<close>
subsubsection \<open>Examples\<close>
text \<open>
See \<^file>\<open>~~/src/HOL/ROOT\<close> for a diversity of practically relevant situations,
although it uses relatively complex quasi-hierarchic naming conventions like
\<^verbatim>\<open>HOL-SPARK\<close>, \<^verbatim>\<open>HOL-SPARK-Examples\<close>. An alternative is to use unqualified
names that are relatively long and descriptive, as in the Archive of Formal
Proofs (\<^url>\<open>https://isa-afp.org\<close>), for example.
\<close>
section \<open>System build options \label{sec:system-options}\<close>
text \<open>
See \<^file>\<open>~~/etc/options\<close> for the main defaults provided by the Isabelle
distribution. Isabelle/jEdit @{cite "isabelle-jedit"} includes a simple
editing mode \<^verbatim>\<open>isabelle-options\<close> for this file-format.
The following options are particularly relevant to build Isabelle sessions,
in particular with document preparation (\chref{ch:present}).
\<^item> @{system_option_def "browser_info"} controls output of HTML browser
info, see also \secref{sec:info}.
\<^item> @{system_option_def "document"} controls document output for a
particular session or theory; \<^verbatim>\<open>document=pdf\<close> or \<^verbatim>\<open>document=true\<close> means
enabled, \<^verbatim>\<open>document=""\<close> or \<^verbatim>\<open>document=false\<close> means disabled (especially
for particular theories).
\<^item> @{system_option_def "document_output"} specifies an alternative
directory for generated output of the document preparation system; the
default is within the @{setting "ISABELLE_BROWSER_INFO"} hierarchy as
explained in \secref{sec:info}. See also @{tool mkroot}, which generates a
default configuration with output readily available to the author of the
document.
\<^item> @{system_option_def "document_echo"} informs about document file names
during session presentation.
\<^item> @{system_option_def "document_variants"} specifies document variants as
a colon-separated list of \<open>name=tags\<close> entries. The default name is
\<^verbatim>\<open>document\<close>, without additional tags.
Tags are specified as a comma separated list of modifier/name pairs and
tell {\LaTeX} how to interpret certain Isabelle command regions:
``\<^verbatim>\<open>+\<close>\<open>foo\<close>'' (or just ``\<open>foo\<close>'') means to keep, ``\<^verbatim>\<open>-\<close>\<open>foo\<close>'' to drop,
and ``\<^verbatim>\<open>/\<close>\<open>foo\<close>'' to fold text tagged as \<open>foo\<close>. The builtin default is
equivalent to the tag specification
``\<^verbatim>\<open>+document,+theory,+proof,+ML,+visible,-invisible,+important,+unimportant\<close>'';
see also the {\LaTeX} macros \<^verbatim>\<open>\isakeeptag\<close>, \<^verbatim>\<open>\isadroptag\<close>, and
\<^verbatim>\<open>\isafoldtag\<close>, in \<^file>\<open>~~/lib/texinputs/isabelle.sty\<close>.
In contrast, \<^verbatim>\<open>document_variants=document:outline=/proof,/ML\<close> indicates
two documents: the one called \<^verbatim>\<open>document\<close> with default tags, and the other
called \<^verbatim>\<open>outline\<close> where proofs and ML sections are folded.
Document variant names are just a matter of conventions. It is also
possible to use different document variant names (without tags) for
different document root entries, see also \secref{sec:tool-document}.
\<^item> @{system_option_def "document_tags"} specifies alternative command tags
as a comma-separated list of items: either ``\<open>command\<close>\<^verbatim>\<open>%\<close>\<open>tag\<close>'' for a
specific command, or ``\<^verbatim>\<open>%\<close>\<open>tag\<close>'' as default for all other commands. This
is occasionally useful to control the global visibility of commands via
session options (e.g.\ in \<^verbatim>\<open>ROOT\<close>).
\<^item> @{system_option_def "document_comment_latex"} enables regular {\LaTeX}
\<^verbatim>\<open>comment.sty\<close>, instead of the historic version for plain {\TeX}
(default). The latter is much faster, but in conflict with {\LaTeX}
classes like Dagstuhl
LIPIcs\<^footnote>\<open>\<^url>\<open>https://github.com/dagstuhl-publishing/styles\<close>\<close>.
\<^item> @{system_option_def "document_bibliography"} explicitly enables the use
of \<^verbatim>\<open>bibtex\<close>; the default is to check the presence of \<^verbatim>\<open>root.bib\<close>, but it
could have a different name.
\<^item> @{system_option_def "document_heading_prefix"} specifies a prefix for
the {\LaTeX} macro names generated from Isar commands like \<^theory_text>\<open>chapter\<close>,
\<^theory_text>\<open>section\<close> etc. The default is \<^verbatim>\<open>isamarkup\<close>, e.g. \<^theory_text>\<open>section\<close> becomes
\<^verbatim>\<open>\isamarkupsection\<close>.
\<^item> @{system_option_def "threads"} determines the number of worker threads
for parallel checking of theories and proofs. The default \<open>0\<close> means that a
sensible maximum value is determined by the underlying hardware. For
machines with many cores or with hyperthreading, this sometimes requires
manual adjustment (on the command-line or within personal settings or
preferences, not within a session \<^verbatim>\<open>ROOT\<close>).
\<^item> @{system_option_def "condition"} specifies a comma-separated list of
process environment variables (or Isabelle settings) that are required for
the subsequent theories to be processed. Conditions are considered
``true'' if the corresponding environment value is defined and non-empty.
\<^item> @{system_option_def "timeout"} and @{system_option_def "timeout_scale"}
specify a real wall-clock timeout for the session as a whole: the two
values are multiplied and taken as the number of seconds. Typically,
@{system_option "timeout"} is given for individual sessions, and
@{system_option "timeout_scale"} as global adjustment to overall hardware
performance.
The timer is controlled outside the ML process by the JVM that runs
Isabelle/Scala. Thus it is relatively reliable in canceling processes that
get out of control, even if there is a deadlock without CPU time usage.
\<^item> @{system_option_def "profiling"} specifies a mode for global ML
profiling. Possible values are the empty string (disabled), \<^verbatim>\<open>time\<close> for
\<^ML>\<open>profile_time\<close> and \<^verbatim>\<open>allocations\<close> for \<^ML>\<open>profile_allocations\<close>.
Results appear near the bottom of the session log file.
\<^item> @{system_option_def system_log} specifies an optional log file for
low-level messages produced by \<^ML>\<open>Output.system_message\<close> in
Isabelle/ML; the standard value ``\<^verbatim>\<open>-\<close>'' refers to console progress of the
build job.
\<^item> @{system_option_def "system_heaps"} determines the directories for
session heap images: \<^path>\<open>$ISABELLE_HEAPS\<close> is the user directory and
\<^path>\<open>$ISABELLE_HEAPS_SYSTEM\<close> the system directory (usually within the
Isabelle application). For \<^verbatim>\<open>system_heaps=false\<close>, heaps are stored in the
user directory and may be loaded from both directories. For
\<^verbatim>\<open>system_heaps=true\<close>, store and load happens only in the system directory.
The @{tool_def options} tool prints Isabelle system options. Its
command-line usage is:
@{verbatim [display]
\<open>Usage: isabelle options [OPTIONS] [MORE_OPTIONS ...]
Options are:
-b include $ISABELLE_BUILD_OPTIONS
-g OPTION get value of OPTION
-l list options
-x FILE export to FILE in YXML format
Report Isabelle system options, augmented by MORE_OPTIONS given as
arguments NAME=VAL or NAME.\<close>}
The command line arguments provide additional system options of the form
\<open>name\<close>\<^verbatim>\<open>=\<close>\<open>value\<close> or \<open>name\<close> for Boolean options.
Option \<^verbatim>\<open>-b\<close> augments the implicit environment of system options by the ones
of @{setting ISABELLE_BUILD_OPTIONS}, cf.\ \secref{sec:tool-build}.
Option \<^verbatim>\<open>-g\<close> prints the value of the given option. Option \<^verbatim>\<open>-l\<close> lists all
options with their declaration and current value.
Option \<^verbatim>\<open>-x\<close> specifies a file to export the result in YXML format, instead
of printing it in human-readable form.
\<close>
section \<open>Invoking the build process \label{sec:tool-build}\<close>
text \<open>
The @{tool_def build} tool invokes the build process for Isabelle sessions.
It manages dependencies between sessions, related sources of theories and
auxiliary files, and target heap images. Accordingly, it runs instances of
the prover process with optional document preparation. Its command-line
usage is:\<^footnote>\<open>Isabelle/Scala provides the same functionality via
\<^scala_method>\<open>isabelle.Build.build\<close>.\<close>
@{verbatim [display]
\<open>Usage: isabelle build [OPTIONS] [SESSIONS ...]
Options are:
-B NAME include session NAME and all descendants
-D DIR include session directory and select its sessions
-N cyclic shuffling of NUMA CPU nodes (performance tuning)
-P DIR enable HTML/PDF presentation in directory (":" for default)
-R refer to requirements of selected sessions
-S soft build: only observe changes of sources, not heap images
-X NAME exclude sessions from group NAME and all descendants
-a select all sessions
-b build heap images
-c clean build
-d DIR include session directory
-e export files from session specification into file-system
-f fresh build
-g NAME select session group NAME
-j INT maximum number of parallel jobs (default 1)
-k KEYWORD check theory sources for conflicts with proposed keywords
-l list session source files
-n no build -- test dependencies only
-o OPTION override Isabelle system OPTION (via NAME=VAL or NAME)
-v verbose
-x NAME exclude session NAME and all descendants
Build and manage Isabelle sessions, depending on implicit settings:
ISABELLE_TOOL_JAVA_OPTIONS="..."
ISABELLE_BUILD_OPTIONS="..."
ML_PLATFORM="..."
ML_HOME="..."
ML_SYSTEM="..."
ML_OPTIONS="..."\<close>}
\<^medskip>
Isabelle sessions are defined via session ROOT files as described in
(\secref{sec:session-root}). The totality of sessions is determined by
collecting such specifications from all Isabelle component directories
(\secref{sec:components}), augmented by more directories given via options
\<^verbatim>\<open>-d\<close>~\<open>DIR\<close> on the command line. Each such directory may contain a session
\<^verbatim>\<open>ROOT\<close> file with several session specifications.
Any session root directory may refer recursively to further directories of
the same kind, by listing them in a catalog file \<^verbatim>\<open>ROOTS\<close> line-by-line. This
helps to organize large collections of session specifications, or to make
\<^verbatim>\<open>-d\<close> command line options persistent (e.g.\ in
\<^verbatim>\<open>$ISABELLE_HOME_USER/ROOTS\<close>).
\<^medskip>
The subset of sessions to be managed is determined via individual \<open>SESSIONS\<close>
given as command-line arguments, or session groups that are given via one or
more options \<^verbatim>\<open>-g\<close>~\<open>NAME\<close>. Option \<^verbatim>\<open>-a\<close> selects all sessions. The build tool
takes session dependencies into account: the set of selected sessions is
completed by including all ancestors.
\<^medskip>
One or more options \<^verbatim>\<open>-B\<close>~\<open>NAME\<close> specify base sessions to be included (all
descendants wrt.\ the session parent or import graph).
\<^medskip>
One or more options \<^verbatim>\<open>-x\<close>~\<open>NAME\<close> specify sessions to be excluded (all
descendants wrt.\ the session parent or import graph). Option \<^verbatim>\<open>-X\<close> is
analogous to this, but excluded sessions are specified by session group
membership.
\<^medskip>
Option \<^verbatim>\<open>-R\<close> reverses the selection in the sense that it refers to its
requirements: all ancestor sessions excluding the original selection. This
allows to prepare the stage for some build process with different options,
before running the main build itself (without option \<^verbatim>\<open>-R\<close>).
\<^medskip>
Option \<^verbatim>\<open>-D\<close> is similar to \<^verbatim>\<open>-d\<close>, but selects all sessions that are defined
in the given directories.
\<^medskip>
Option \<^verbatim>\<open>-S\<close> indicates a ``soft build'': the selection is restricted to
those sessions that have changed sources (according to actually imported
theories). The status of heap images is ignored.
\<^medskip>
The build process depends on additional options
(\secref{sec:system-options}) that are passed to the prover eventually. The
settings variable @{setting_ref ISABELLE_BUILD_OPTIONS} allows to provide
additional defaults, e.g.\ \<^verbatim>\<open>ISABELLE_BUILD_OPTIONS="document=pdf
threads=4"\<close>. Moreover, the environment of system build options may be
augmented on the command line via \<^verbatim>\<open>-o\<close>~\<open>name\<close>\<^verbatim>\<open>=\<close>\<open>value\<close> or \<^verbatim>\<open>-o\<close>~\<open>name\<close>,
which abbreviates \<^verbatim>\<open>-o\<close>~\<open>name\<close>\<^verbatim>\<open>=true\<close> for Boolean or string options.
Multiple occurrences of \<^verbatim>\<open>-o\<close> on the command-line are applied in the given
order.
\<^medskip>
Option \<^verbatim>\<open>-P\<close> enables PDF/HTML presentation in the given directory, where
``\<^verbatim>\<open>-P:\<close>'' refers to the default @{setting_ref ISABELLE_BROWSER_INFO} (or
@{setting_ref ISABELLE_BROWSER_INFO_SYSTEM}). This applies only to
explicitly selected sessions; note that option \<^verbatim>\<open>-R\<close> allows to select all
requirements separately.
\<^medskip>
Option \<^verbatim>\<open>-b\<close> ensures that heap images are produced for all selected
sessions. By default, images are only saved for inner nodes of the hierarchy
of sessions, as required for other sessions to continue later on.
\<^medskip>
Option \<^verbatim>\<open>-c\<close> cleans the selected sessions (all descendants wrt.\ the session
parent or import graph) before performing the specified build operation.
\<^medskip>
Option \<^verbatim>\<open>-e\<close> executes the \isakeyword{export_files} directives from the ROOT
specification of all explicitly selected sessions: the status of the session
build database needs to be OK, but the session could have been built
earlier. Using \isakeyword{export_files}, a session may serve as abstract
interface for add-on build artefacts, but these are only materialized on
explicit request: without option \<^verbatim>\<open>-e\<close> there is no effect on the physical
file-system yet.
\<^medskip>
Option \<^verbatim>\<open>-f\<close> forces a fresh build of all selected sessions and their
requirements.
\<^medskip>
Option \<^verbatim>\<open>-n\<close> omits the actual build process after the preparatory stage
(including optional cleanup). Note that the return code always indicates the
status of the set of selected sessions.
\<^medskip>
Option \<^verbatim>\<open>-j\<close> specifies the maximum number of parallel build jobs (prover
processes). Each prover process is subject to a separate limit of parallel
worker threads, cf.\ system option @{system_option_ref threads}.
\<^medskip>
Option \<^verbatim>\<open>-N\<close> enables cyclic shuffling of NUMA CPU nodes. This may help
performance tuning on Linux servers with separate CPU/memory modules.
\<^medskip>
Option \<^verbatim>\<open>-v\<close> increases the general level of verbosity.
\<^medskip>
Option \<^verbatim>\<open>-l\<close> lists the source files that contribute to a session.
\<^medskip>
Option \<^verbatim>\<open>-k\<close> specifies a newly proposed keyword for outer syntax. It is
possible to use option \<^verbatim>\<open>-k\<close> repeatedly to check multiple keywords. The
theory sources are checked for conflicts wrt.\ this hypothetical change of
syntax, e.g.\ to reveal occurrences of identifiers that need to be quoted.
\<close>
subsubsection \<open>Examples\<close>
text \<open>
Build a specific logic image:
@{verbatim [display] \<open> isabelle build -b HOLCF\<close>}
\<^smallskip>
Build the main group of logic images:
@{verbatim [display] \<open> isabelle build -b -g main\<close>}
\<^smallskip>
Build all descendants (and requirements) of \<^verbatim>\<open>FOL\<close> and \<^verbatim>\<open>ZF\<close>:
@{verbatim [display] \<open> isabelle build -B FOL -B ZF\<close>}
\<^smallskip>
Build all sessions where sources have changed (ignoring heaps):
@{verbatim [display] \<open> isabelle build -a -S\<close>}
\<^smallskip>
Provide a general overview of the status of all Isabelle sessions, without
building anything:
@{verbatim [display] \<open> isabelle build -a -n -v\<close>}
\<^smallskip>
Build all sessions with HTML browser info and PDF document preparation:
@{verbatim [display] \<open> isabelle build -a -o browser_info -o document\<close>}
\<^smallskip>
Build all sessions with a maximum of 8 parallel prover processes and 4
worker threads each (on a machine with many cores):
@{verbatim [display] \<open> isabelle build -a -j8 -o threads=4\<close>}
\<^smallskip>
Build some session images with cleanup of their descendants, while retaining
their ancestry:
@{verbatim [display] \<open> isabelle build -b -c HOL-Library HOL-Algebra\<close>}
\<^smallskip>
HTML/PDF presentation for sessions that happen to be properly built already,
without rebuilding anything except the missing browser info:
@{verbatim [display] \<open> isabelle build -a -n -o browser_info\<close>}
\<^smallskip>
Clean all sessions without building anything:
@{verbatim [display] \<open> isabelle build -a -n -c\<close>}
\<^smallskip>
Build all sessions from some other directory hierarchy, according to the
settings variable \<^verbatim>\<open>AFP\<close> that happens to be defined inside the Isabelle
environment:
@{verbatim [display] \<open> isabelle build -D '$AFP'\<close>}
\<^smallskip>
Inform about the status of all sessions required for AFP, without building
anything yet:
@{verbatim [display] \<open> isabelle build -D '$AFP' -R -v -n\<close>}
\<close>
section \<open>Print messages from build database \label{sec:tool-log}\<close>
text \<open>
The @{tool_def "log"} tool prints prover messages from the build
database of the given session. Its command-line usage is:
@{verbatim [display]
\<open>Usage: isabelle log [OPTIONS] [SESSIONS ...]
Options are:
-H REGEX filter messages by matching against head
-M REGEX filter messages by matching against body
-T NAME restrict to given theories (multiple options possible)
-U output Unicode symbols
-m MARGIN margin for pretty printing (default: 76.0)
-o OPTION override Isabelle system OPTION (via NAME=VAL or NAME)
-v print all messages, including information etc.
Print messages from the build database of the given sessions, without any
checks against current sources nor session structure: results from old
sessions or failed builds can be printed as well.
Multiple options -H and -M are conjunctive: all given patterns need to
match. Patterns match any substring, but ^ or $ may be used to match the
start or end explicitly.\<close>}
The specified session databases are taken as is, with formal checking
against current sources: There is \<^emph>\<open>no\<close> implicit build process involved, so
it is possible to retrieve error messages from a failed session as well. The
order of messages follows the source positions of source files; thus the
result is mostly deterministic, independent of the somewhat erratic
evaluation of parallel processing.
\<^medskip> Option \<^verbatim>\<open>-o\<close> allows to change system options, as in @{tool build}
(\secref{sec:tool-build}). This may affect the storage space for the build
database, notably via @{system_option system_heaps}, or @{system_option
build_database_server} and its relatives.
\<^medskip> Option \<^verbatim>\<open>-T\<close> restricts output to given theories: multiple entries are
possible by repeating this option on the command-line. The default is to
refer to \<^emph>\<open>all\<close> theories used in the original session build process.
\<^medskip> Options \<^verbatim>\<open>-m\<close> and \<^verbatim>\<open>-U\<close> modify pretty printing and output of Isabelle
symbols. The default is for an old-fashioned ASCII terminal at 80 characters
per line (76 + 4 characters to prefix warnings or errors).
\<^medskip> Option \<^verbatim>\<open>-v\<close> prints all messages from the session database that are
normally inlined into the source text, including information messages etc.
\<^medskip> Options \<^verbatim>\<open>-H\<close> and \<^verbatim>\<open>-M\<close> filter messages according to their header or body
content, respectively. The header follows a very basic format that makes it
easy to match message kinds (e.g. \<^verbatim>\<open>Warning\<close> or \<^verbatim>\<open>Error\<close>) and file names
(e.g. \<^verbatim>\<open>src/HOL/Nat.thy\<close>). The body is usually pretty-printed, but for
matching it is treated like one long line: blocks are ignored and breaks are
turned into plain spaces (according to their formal width).
The syntax for patters follows regular expressions of the Java
platform.\<^footnote>\<open>\<^url>\<open>https://docs.oracle.com/en/java/javase/17/docs/api/java.base/java/util/regex/Pattern.html\<close>\<close>
\<close>
subsubsection \<open>Examples\<close>
text \<open>
Print messages from theory \<^verbatim>\<open>HOL.Nat\<close> of session \<^verbatim>\<open>HOL\<close>, using Unicode
rendering of Isabelle symbols and a margin of 100 characters:
@{verbatim [display] \<open> isabelle log -T HOL.Nat -U -m 100 HOL\<close>}
Print warnings about ambiguous input (inner syntax) of session
\<^verbatim>\<open>HOL-Library\<close>, which is built beforehand:
@{verbatim [display] \<open> isabelle build HOL-Library
isabelle log -H "Warning" -M "Ambiguous input" HOL-Library\<close>}
Print all errors from all sessions, e.g. from a partial build of
Isabelle/AFP:
@{verbatim [display] \<open> isabelle log -H "Error" $(isabelle sessions -a -d AFP/thys)\<close>}
\<close>
section \<open>Retrieve theory exports \label{sec:tool-export}\<close>
text \<open>
The @{tool_def "export"} tool retrieves theory exports from the session
database. Its command-line usage is: @{verbatim [display]
\<open>Usage: isabelle export [OPTIONS] SESSION
Options are:
-O DIR output directory for exported files (default: "export")
-d DIR include session directory
-l list exports
-n no build of session
-o OPTION override Isabelle system OPTION (via NAME=VAL or NAME)
-p NUM prune path of exported files by NUM elements
-x PATTERN extract files matching pattern (e.g. "*:**" for all)
List or export theory exports for SESSION: named blobs produced by
isabelle build. Option -l or -x is required; option -x may be repeated.
The PATTERN language resembles glob patterns in the shell, with ? and *
(both excluding ":" and "/"), ** (excluding ":"), and [abc] or [^abc],
and variants {pattern1,pattern2,pattern3}.\<close>}
\<^medskip>
The specified session is updated via @{tool build}
(\secref{sec:tool-build}), with the same options \<^verbatim>\<open>-d\<close>, \<^verbatim>\<open>-o\<close>. The option
\<^verbatim>\<open>-n\<close> suppresses the implicit build process: it means that a potentially
outdated session database is used!
\<^medskip>
Option \<^verbatim>\<open>-l\<close> lists all stored exports, with compound names
\<open>theory\<close>\<^verbatim>\<open>:\<close>\<open>name\<close>.
\<^medskip>
Option \<^verbatim>\<open>-x\<close> extracts stored exports whose compound name matches the given
pattern. Note that wild cards ``\<^verbatim>\<open>?\<close>'' and ``\<^verbatim>\<open>*\<close>'' do not match the
separators ``\<^verbatim>\<open>:\<close>'' and ``\<^verbatim>\<open>/\<close>''; the wild card \<^verbatim>\<open>**\<close> matches over directory
name hierarchies separated by ``\<^verbatim>\<open>/\<close>''. Thus the pattern ``\<^verbatim>\<open>*:**\<close>'' matches
\<^emph>\<open>all\<close> theory exports. Multiple options \<^verbatim>\<open>-x\<close> refer to the union of all
specified patterns.
Option \<^verbatim>\<open>-O\<close> specifies an alternative output directory for option \<^verbatim>\<open>-x\<close>: the
default is \<^verbatim>\<open>export\<close> within the current directory. Each theory creates its
own sub-directory hierarchy, using the session-qualified theory name.
Option \<^verbatim>\<open>-p\<close> specifies the number of elements that should be pruned from
each name: it allows to reduce the resulting directory hierarchy at the
danger of overwriting files due to loss of uniqueness.
\<close>
section \<open>Dump PIDE session database \label{sec:tool-dump}\<close>
text \<open>
The @{tool_def "dump"} tool dumps information from the cumulative PIDE
session database (which is processed on the spot). Its command-line usage
is: @{verbatim [display]
\<open>Usage: isabelle dump [OPTIONS] [SESSIONS ...]
Options are:
-A NAMES dump named aspects (default: ...)
-B NAME include session NAME and all descendants
-D DIR include session directory and select its sessions
-O DIR output directory for dumped files (default: "dump")
-R refer to requirements of selected sessions
-X NAME exclude sessions from group NAME and all descendants
-a select all sessions
-b NAME base logic image (default "Pure")
-d DIR include session directory
-g NAME select session group NAME
-o OPTION override Isabelle system OPTION (via NAME=VAL or NAME)
-v verbose
-x NAME exclude session NAME and all descendants
Dump cumulative PIDE session database, with the following aspects:
...\<close>}
\<^medskip> Options \<^verbatim>\<open>-B\<close>, \<^verbatim>\<open>-D\<close>, \<^verbatim>\<open>-R\<close>, \<^verbatim>\<open>-X\<close>, \<^verbatim>\<open>-a\<close>, \<^verbatim>\<open>-d\<close>, \<^verbatim>\<open>-g\<close>, \<^verbatim>\<open>-x\<close> and the
remaining command-line arguments specify sessions as in @{tool build}
(\secref{sec:tool-build}): the cumulative PIDE database of all their loaded
theories is dumped to the output directory of option \<^verbatim>\<open>-O\<close> (default: \<^verbatim>\<open>dump\<close>
in the current directory).
\<^medskip> Option \<^verbatim>\<open>-b\<close> specifies an optional base logic image, for improved
scalability of the PIDE session. Its theories are only processed if it is
included in the overall session selection.
\<^medskip> Option \<^verbatim>\<open>-o\<close> overrides Isabelle system options as for @{tool build}
(\secref{sec:tool-build}).
\<^medskip> Option \<^verbatim>\<open>-v\<close> increases the general level of verbosity.
\<^medskip> Option \<^verbatim>\<open>-A\<close> specifies named aspects of the dump, as a comma-separated
list. The default is to dump all known aspects, as given in the command-line
usage of the tool. The underlying Isabelle/Scala operation
\<^scala_method>\<open>isabelle.Dump.dump\<close> takes aspects as user-defined
operations on the final PIDE state and document version. This allows to
imitate Prover IDE rendering under program control.
\<close>
subsubsection \<open>Examples\<close>
text \<open>
Dump all Isabelle/ZF sessions (which are rather small):
@{verbatim [display] \<open> isabelle dump -v -B ZF\<close>}
\<^smallskip>
Dump the quite substantial \<^verbatim>\<open>HOL-Analysis\<close> session, with full bootstrap
from Isabelle/Pure:
@{verbatim [display] \<open> isabelle dump -v HOL-Analysis\<close>}
\<^smallskip>
Dump all sessions connected to HOL-Analysis, using main Isabelle/HOL as
basis:
@{verbatim [display] \<open> isabelle dump -v -b HOL -B HOL-Analysis\<close>}
This results in uniform PIDE markup for everything, except for the
Isabelle/Pure bootstrap process itself. Producing that on the spot requires
several GB of heap space, both for the Isabelle/Scala and Isabelle/ML
process (in 64bit mode). Here are some relevant settings (\secref{sec:boot})
for such ambitious applications:
@{verbatim [display]
\<open> ISABELLE_TOOL_JAVA_OPTIONS="-Xms4g -Xmx32g -Xss16m"
ML_OPTIONS="--minheap 4G --maxheap 32G"
\<close>}
\<close>
section \<open>Update theory sources based on PIDE markup \label{sec:tool-update}\<close>
text \<open>
The @{tool_def "update"} tool updates theory sources based on markup that is
produced from a running PIDE session (similar to @{tool dump}
\secref{sec:tool-dump}). Its command-line usage is: @{verbatim [display]
\<open>Usage: isabelle update [OPTIONS] [SESSIONS ...]
Options are:
-B NAME include session NAME and all descendants
-D DIR include session directory and select its sessions
-R refer to requirements of selected sessions
-X NAME exclude sessions from group NAME and all descendants
-a select all sessions
-b NAME base logic image (default "Pure")
-d DIR include session directory
-g NAME select session group NAME
-o OPTION override Isabelle system OPTION (via NAME=VAL or NAME)
-u OPT override "update" option: shortcut for "-o update_OPT"
-v verbose
-x NAME exclude session NAME and all descendants
Update theory sources based on PIDE markup.\<close>}
\<^medskip> Options \<^verbatim>\<open>-B\<close>, \<^verbatim>\<open>-D\<close>, \<^verbatim>\<open>-R\<close>, \<^verbatim>\<open>-X\<close>, \<^verbatim>\<open>-a\<close>, \<^verbatim>\<open>-d\<close>, \<^verbatim>\<open>-g\<close>, \<^verbatim>\<open>-x\<close> and the
remaining command-line arguments specify sessions as in @{tool build}
(\secref{sec:tool-build}) or @{tool dump} (\secref{sec:tool-dump}).
\<^medskip> Option \<^verbatim>\<open>-b\<close> specifies an optional base logic image, for improved
scalability of the PIDE session. Its theories are only processed if it is
included in the overall session selection.
\<^medskip> Option \<^verbatim>\<open>-v\<close> increases the general level of verbosity.
\<^medskip> Option \<^verbatim>\<open>-o\<close> overrides Isabelle system options as for @{tool build}
(\secref{sec:tool-build}). Option \<^verbatim>\<open>-u\<close> refers to specific \<^verbatim>\<open>update\<close>
options, by relying on naming convention: ``\<^verbatim>\<open>-u\<close>~\<open>OPT\<close>'' is a shortcut for
``\<^verbatim>\<open>-o\<close>~\<^verbatim>\<open>update_\<close>\<open>OPT\<close>''.
\<^medskip> The following \<^verbatim>\<open>update\<close> options are supported:
\<^item> @{system_option update_inner_syntax_cartouches} to update inner syntax
(types, terms, etc.)~to use cartouches, instead of double-quoted strings
or atomic identifiers. For example, ``\<^theory_text>\<open>lemma \<doublequote>x =
x\<doublequote>\<close>'' is replaced by ``\<^theory_text>\<open>lemma \<open>x = x\<close>\<close>'', and ``\<^theory_text>\<open>assume
A\<close>'' is replaced by ``\<^theory_text>\<open>assume \<open>A\<close>\<close>''.
\<^item> @{system_option update_mixfix_cartouches} to update mixfix templates to
use cartouches instead of double-quoted strings. For example, ``\<^theory_text>\<open>(infixl
\<doublequote>+\<doublequote> 65)\<close>'' is replaced by ``\<^theory_text>\<open>(infixl \<open>+\<close>
65)\<close>''.
\<^item> @{system_option update_control_cartouches} to update antiquotations to
use the compact form with control symbol and cartouche argument. For
example, ``\<open>@{term \<doublequote>x + y\<doublequote>}\<close>'' is replaced by
``\<open>\<^term>\<open>x + y\<close>\<close>'' (the control symbol is literally \<^verbatim>\<open>\<^term>\<close>.)
\<^item> @{system_option update_path_cartouches} to update file-system paths to
use cartouches: this depends on language markup provided by semantic
processing of parsed input.
It is also possible to produce custom updates in Isabelle/ML, by reporting
\<^ML>\<open>Markup.update\<close> with the precise source position and a replacement
text. This operation should be made conditional on specific system options,
similar to the ones above. Searching the above option names in ML sources of
\<^dir>\<open>$ISABELLE_HOME/src/Pure\<close> provides some examples.
Updates can be in conflict by producing nested or overlapping edits: this
may require to run @{tool update} multiple times.
\<close>
subsubsection \<open>Examples\<close>
text \<open>
Update some cartouche notation in all theory sources required for session
\<^verbatim>\<open>HOL-Analysis\<close> (and ancestors):
@{verbatim [display] \<open> isabelle update -u mixfix_cartouches HOL-Analysis\<close>}
\<^smallskip> Update the same for all application sessions based on \<^verbatim>\<open>HOL-Analysis\<close> ---
using its image as starting point (for reduced resource requirements):
@{verbatim [display] \<open> isabelle update -u mixfix_cartouches -b HOL-Analysis -B HOL-Analysis\<close>}
\<^smallskip> Update sessions that build on \<^verbatim>\<open>HOL-Proofs\<close>, which need to be run
separately with special options as follows:
@{verbatim [display] \<open> isabelle update -u mixfix_cartouches -l HOL-Proofs -B HOL-Proofs
-o record_proofs=2\<close>}
\<^smallskip> See also the end of \secref{sec:tool-dump} for hints on increasing
Isabelle/ML heap sizes for very big PIDE processes that include many
sessions, notably from the Archive of Formal Proofs.
\<close>
section \<open>Explore sessions structure\<close>
text \<open>
The @{tool_def "sessions"} tool explores the sessions structure. Its
command-line usage is:
@{verbatim [display]
\<open>Usage: isabelle sessions [OPTIONS] [SESSIONS ...]
Options are:
-B NAME include session NAME and all descendants
-D DIR include session directory and select its sessions
-R refer to requirements of selected sessions
-X NAME exclude sessions from group NAME and all descendants
-a select all sessions
-b follow session build dependencies (default: source imports)
-d DIR include session directory
-g NAME select session group NAME
-x NAME exclude session NAME and all descendants
Explore the structure of Isabelle sessions and print result names in
topological order (on stdout).\<close>}
Arguments and options for session selection resemble @{tool build}
(\secref{sec:tool-build}).
\<close>
subsubsection \<open>Examples\<close>
text \<open>
All sessions of the Isabelle distribution:
@{verbatim [display] \<open> isabelle sessions -a\<close>}
\<^medskip>
Sessions that are imported by \<^verbatim>\<open>ZF\<close>:
@{verbatim [display] \<open> isabelle sessions ZF\<close>}
\<^medskip>
Sessions that are required to build \<^verbatim>\<open>ZF\<close>:
@{verbatim [display] \<open> isabelle sessions -b ZF\<close>}
\<^medskip>
Sessions that are based on \<^verbatim>\<open>ZF\<close> (and imported by it):
@{verbatim [display] \<open> isabelle sessions -B ZF\<close>}
\<^medskip>
All sessions of Isabelle/AFP (based in directory \<^path>\<open>AFP\<close>):
@{verbatim [display] \<open> isabelle sessions -D AFP/thys\<close>}
\<^medskip>
Sessions required by Isabelle/AFP (based in directory \<^path>\<open>AFP\<close>):
@{verbatim [display] \<open> isabelle sessions -R -D AFP/thys\<close>}
\<close>
section \<open>Synchronize source repositories and session images for Isabelle and AFP\<close>
text \<open>
The @{tool_def sync} tool synchronizes a local Isabelle and AFP source
repository, possibly with prebuilt \<^verbatim>\<open>.jar\<close> files and session images. Its
command-line usage is: @{verbatim [display]
\<open>Usage: isabelle sync [OPTIONS] TARGET
Options are:
-A ROOT include AFP with given root directory (":" for $AFP_BASE)
-H purge heaps directory on target
-I NAME include session heap image and build database
(based on accidental local state)
-J preserve *.jar files
-P protect spaces in target file names: more robust, less portable
-S PATH SSH control path for connection multiplexing
-T thorough treatment of file content and directory times
-a REV explicit AFP revision (default: state of working directory)
-n no changes: dry-run
-p PORT SSH port
-r REV explicit revision (default: state of working directory)
-v verbose
Synchronize Isabelle + AFP repositories, based on "isabelle hg_sync".\<close>}
The approach is to apply @{tool hg_sync} (see \secref{sec:tool-hg-sync}) on
the underlying Isabelle repository, and an optional AFP repository.
Consequently, the Isabelle installation needs to be a Mercurial repository
clone: a regular download of the Isabelle distribution is not sufficient!
On the target side, AFP is placed into @{setting ISABELLE_HOME} as immediate
sub-directory with the literal name \<^verbatim>\<open>AFP\<close>; thus it can be easily included
elsewhere, e.g. @{tool build}~\<^verbatim>\<open>-d\<close>~\<^verbatim>\<open>'~~/AFP'\<close> on the remote side.
\<^medskip> Options \<^verbatim>\<open>-P\<close>, \<^verbatim>\<open>-S\<close>, \<^verbatim>\<open>-T\<close>, \<^verbatim>\<open>-n\<close>, \<^verbatim>\<open>-p\<close>, \<^verbatim>\<open>-v\<close> are the same as the
underlying @{tool hg_sync}.
\<^medskip> Options \<^verbatim>\<open>-r\<close> and \<^verbatim>\<open>-a\<close> are the same as option \<^verbatim>\<open>-r\<close> for @{tool hg_sync},
but for the Isabelle and AFP repositories, respectively. The AFP version is
only used if a corresponding repository is given via option \<^verbatim>\<open>-A\<close>, either
with explicit root directory, or as \<^verbatim>\<open>-A:\<close> to refer to \<^verbatim>\<open>$AFP_BASE\<close> (this
assumes AFP as component of the local Isabelle installation). If no AFP
repository is given, an existing \<^verbatim>\<open>AFP\<close> directory on the target remains
unchanged.
\<^medskip> Option \<^verbatim>\<open>-J\<close> uploads existing \<^verbatim>\<open>.jar\<close> files from the working directory,
which are usually Isabelle/Scala/Java modules under control of @{tool
scala_build} via \<^verbatim>\<open>etc/build.props\<close> (see also \secref{sec:scala-build}).
Thus the dependency management is accurate: bad uploads will be rebuilt
eventually (or ignored). This might fail for very old Isabelle versions,
when going into the past via option \<^verbatim>\<open>-r\<close>: here it is better to omit option
\<^verbatim>\<open>-J\<close> and thus purge \<^verbatim>\<open>.jar\<close> files on the target (because they do not belong
to the repository).
\<^medskip> Option \<^verbatim>\<open>-I\<close> uploads a collection of session images. The set of \<^verbatim>\<open>-I\<close>
options specifies the end-points in the session build graph, including all
required ancestors. The result collection is uploaded using the underlying
\<^verbatim>\<open>rsync\<close> policies, so unchanged images are not sent again. Session images
are assembled within the target \<^verbatim>\<open>heaps\<close> directory: this scheme fits
together with @{tool build}~\<^verbatim>\<open>-o system_heaps\<close>. Images are taken as-is from
the local Isabelle installation, regardless of option \<^verbatim>\<open>-r\<close>. Upload of bad
images could waste time and space, but running e.g. @{tool build} on the
target will check dependencies accurately and rebuild outdated images on
demand.
\<^medskip> Option \<^verbatim>\<open>-H\<close> tells the underlying \<^verbatim>\<open>rsync\<close> process to purge the \<^verbatim>\<open>heaps\<close>
directory on the target, before uploading new images via option \<^verbatim>\<open>-I\<close>. The
default is to work monotonically: old material that is not overwritten
remains unchanged. Over time, this may lead to unused garbage, due to
changes in session names or the Poly/ML version. Option \<^verbatim>\<open>-H\<close> helps to avoid
wasting file-system space.
\<close>
subsubsection \<open>Examples\<close>
text \<open>
For quick testing of Isabelle + AFP on a remote machine, upload changed
sources, jars, and local sessions images for \<^verbatim>\<open>HOL\<close>:
@{verbatim [display] \<open> isabelle sync -A: -I HOL -J testmachine:test/isabelle_afp\<close>}
Assuming that the local \<^verbatim>\<open>HOL\<close> hierarchy has been up-to-date, and the local
and remote ML platforms coincide, a remote @{tool build} will proceed
without building \<^verbatim>\<open>HOL\<close> again.
\<^medskip> Here is a variation for extra-clean testing of Isabelle + AFP: no option
\<^verbatim>\<open>-J\<close>, but option \<^verbatim>\<open>-T\<close> to disable the default ``quick check'' of \<^verbatim>\<open>rsync\<close>
(which only inspects file sizes and date stamps); existing heaps are
deleted:
@{verbatim [display] \<open> isabelle sync -A: -T -H testmachine:test/isabelle_afp\<close>}
\<close>
end
|
{"author": "m-fleury", "repo": "isabelle-emacs", "sha": "756c662195e138a1941d22d4dd7ff759cbf6b6b9", "save_path": "github-repos/isabelle/m-fleury-isabelle-emacs", "path": "github-repos/isabelle/m-fleury-isabelle-emacs/isabelle-emacs-756c662195e138a1941d22d4dd7ff759cbf6b6b9/src/Doc/System/Sessions.thy"}
|
# -*- coding: utf-8 -*
import numpy as np
import preprocess
import logging
import math
import random
import io
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger("paddle")
logger.setLevel(logging.INFO)
class NumpyRandomInt(object):
def __init__(self, a, b, buf_size=1000):
self.idx = 0
self.buffer = np.random.random_integers(a, b, buf_size)
self.a = a
self.b = b
def __call__(self):
if self.idx == len(self.buffer):
self.buffer = np.random.random_integers(self.a, self.b,
len(self.buffer))
self.idx = 0
result = self.buffer[self.idx]
self.idx += 1
return result
class Word2VecReader(object):
def __init__(self,
dict_path,
data_path,
filelist,
trainer_id,
trainer_num,
window_size=5):
self.window_size_ = window_size
self.data_path_ = data_path
self.filelist = filelist
self.trainer_id = trainer_id
self.trainer_num = trainer_num
word_all_count = 0
id_counts = []
word_id = 0
with io.open(dict_path, 'r', encoding='utf-8') as f:
for line in f:
word, count = line.split()[0], int(line.split()[1])
word_id += 1
id_counts.append(count)
word_all_count += count
self.word_all_count = word_all_count
self.corpus_size_ = word_all_count
self.dict_size = len(id_counts)
self.id_counts_ = id_counts
print("corpus_size:", self.corpus_size_)
self.id_frequencys = [
float(count) / word_all_count for count in self.id_counts_
]
print("dict_size = " + str(self.dict_size) + " word_all_count = " + str(
word_all_count))
self.random_generator = NumpyRandomInt(1, self.window_size_ + 1)
def get_context_words(self, words, idx):
"""
Get the context word list of target word.
words: the words of the current line
idx: input word index
window_size: window size
"""
target_window = self.random_generator()
start_point = idx - target_window # if (idx - target_window) > 0 else 0
if start_point < 0:
start_point = 0
end_point = idx + target_window
targets = words[start_point:idx] + words[idx + 1:end_point + 1]
return targets
def train(self):
def nce_reader():
for file in self.filelist:
with io.open(
self.data_path_ + "/" + file, 'r',
encoding='utf-8') as f:
logger.info("running data in {}".format(self.data_path_ +
"/" + file))
count = 1
for line in f:
if self.trainer_id == count % self.trainer_num:
word_ids = [int(w) for w in line.split()]
for idx, target_id in enumerate(word_ids):
context_word_ids = self.get_context_words(
word_ids, idx)
for context_id in context_word_ids:
yield [target_id], [context_id]
count += 1
return nce_reader
|
{"hexsha": "92d51706abce63463943189ee1d620248cd07126", "size": 3553, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo/quant/quant_embedding/reader.py", "max_stars_repo_name": "ZichaoGuo/PaddleSlim", "max_stars_repo_head_hexsha": "2550fb4ec86aee6155c1c8a2c9ab174e239918a3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 926, "max_stars_repo_stars_event_min_datetime": "2019-12-16T05:06:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T07:22:10.000Z", "max_issues_repo_path": "demo/quant/quant_embedding/reader.py", "max_issues_repo_name": "ZichaoGuo/PaddleSlim", "max_issues_repo_head_hexsha": "2550fb4ec86aee6155c1c8a2c9ab174e239918a3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 327, "max_issues_repo_issues_event_min_datetime": "2019-12-16T06:04:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T11:08:18.000Z", "max_forks_repo_path": "demo/quant/quant_embedding/reader.py", "max_forks_repo_name": "ZichaoGuo/PaddleSlim", "max_forks_repo_head_hexsha": "2550fb4ec86aee6155c1c8a2c9ab174e239918a3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 262, "max_forks_repo_forks_event_min_datetime": "2020-01-02T02:19:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T04:56:16.000Z", "avg_line_length": 33.2056074766, "max_line_length": 80, "alphanum_fraction": 0.5187165775, "include": true, "reason": "import numpy", "num_tokens": 763}
|
# Authors: Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import pyeparse as pp
fname = '../pyeparse/tests/data/test_raw.edf'
raw = pp.read_raw(fname)
# visualize initial calibration
raw.plot_calibration(title='5-Point Calibration')
# create heatmap
raw.plot_heatmap(start=3., stop=60.)
# find events and epoch data
events = raw.find_events('SYNCTIME', event_id=1)
tmin, tmax, event_id = -0.5, 1.5, 1
epochs = pp.Epochs(raw, events=events, event_id=event_id, tmin=tmin,
tmax=tmax)
# access pandas data frame and plot single epoch
fig, ax = plt.subplots()
ax.plot(epochs[3].get_data('xpos')[0], epochs[3].get_data('ypos')[0])
# iterate over and access numpy arrays.
# find epochs withouth loss of tracking / blinks
print(len([e for e in epochs if not np.isnan(e).any()]))
fig, ax = plt.subplots()
ax.set_title('Superimposed saccade responses')
n_trials = 12 # first 12 trials
for epoch in epochs[:n_trials]:
ax.plot(epochs.times * 1e3, epoch[0].T)
time_mask = epochs.times > 0
times = epochs.times * 1e3
fig, ax = plt.subplots()
ax.plot(times[time_mask], epochs.data[0, 0, time_mask])
ax.set_title('Post baseline saccade (X, pos)')
# plot single trials
epochs.plot(picks=['xpos'], draw_discrete='saccades')
plt.show()
|
{"hexsha": "a8d4d348ca415ef24037df093d1f3905ba41e84d", "size": 1328, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/plot_from_raw_to_epochs.py", "max_stars_repo_name": "Eric89GXL/pyeparse", "max_stars_repo_head_hexsha": "c1907c39276aacb0fad80034d69b537b07f82786", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/plot_from_raw_to_epochs.py", "max_issues_repo_name": "Eric89GXL/pyeparse", "max_issues_repo_head_hexsha": "c1907c39276aacb0fad80034d69b537b07f82786", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/plot_from_raw_to_epochs.py", "max_forks_repo_name": "Eric89GXL/pyeparse", "max_forks_repo_head_hexsha": "c1907c39276aacb0fad80034d69b537b07f82786", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.56, "max_line_length": 69, "alphanum_fraction": 0.7138554217, "include": true, "reason": "import numpy", "num_tokens": 388}
|
''' Applying Adversarial Auto-encoder for Estimating Human Walking Gait Index
BSD 2-Clause "Simplified" License
Author: Trong-Nguyen Nguyen'''
import argparse, sys, os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from utils import *
from ops import *
### https://github.com/rothk/Stabilizing_GANs
def Discriminator_Regularizer(D1_logits, D1_arg, D2_logits, D2_arg):
D1 = tf.nn.sigmoid(D1_logits)
D2 = tf.nn.sigmoid(D2_logits)
grad_D1_logits = tf.gradients(D1_logits, D1_arg)[0]
grad_D2_logits = tf.gradients(D2_logits, D2_arg)[0]
grad_D1_logits_norm = tf.norm(tf.reshape(grad_D1_logits, [mb_size,-1]), axis=1, keepdims=True)
grad_D2_logits_norm = tf.norm(tf.reshape(grad_D2_logits, [mb_size,-1]), axis=1, keepdims=True)
print(grad_D1_logits_norm.shape)
print(D1.shape)
reg_D1 = tf.multiply(tf.square(1.0-D1), tf.square(grad_D1_logits_norm))
reg_D2 = tf.multiply(tf.square(D2), tf.square(grad_D2_logits_norm))
disc_regularizer = tf.reduce_mean(reg_D1 + reg_D2)
return disc_regularizer
''' constant parameters '''
mb_size = 512
z_dim = 16
X_dim = 256
h_dim = 96
lr = 1e-3
segment_lengths = np.array([1, 10, 20, 30, 40, 50, 60, 80, 100, 120, 150, 200, 240, 300, 400, 600, 1200])
#segment_lengths = np.array([10, 21])
tf.set_random_seed(1989)
np.random.seed(1989)
def plot(samples):
fig = plt.figure(figsize=(8, 8))
gs = gridspec.GridSpec(8, 8)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample.reshape(16, 16), cmap='Greys_r')
return fig
def xavier_init(size):
in_dim = size[0]
out_dim = size[1]
xavier_stddev = tf.sqrt(6. / (in_dim + out_dim))
return tf.random_normal(shape=size, stddev=xavier_stddev)
''' suggested epoch ranges '''
split_epochs = np.array([200,300])
l1o_epochs = np.array([[100,200],[100,200],[120,220],[100,200],[80,180],[100,200],[100,200],[100,200],[95,195]])
def processing(data, training_subjects, test_subjects, epoch_start, epoch_end, n_epoch, overlapping = False, save_samples = False, result_file = None, plot_losses = False):
'''Q(z|X)'''
X = tf.placeholder(tf.float32, shape=[None, X_dim])
z = tf.placeholder(tf.float32, shape=[None, z_dim])
Q_W1 = tf.Variable(xavier_init([X_dim, h_dim]))
Q_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
Q_W2 = tf.Variable(xavier_init([h_dim, z_dim]))
Q_b2 = tf.Variable(tf.zeros(shape=[z_dim]))
theta_Q = [Q_W1, Q_W2, Q_b1, Q_b2]
def Q(X):
h = lrelu(tf.matmul(X, Q_W1) + Q_b1, leak = 0.1, name = 'h_Q')
z = tf.matmul(h, Q_W2) + Q_b2
return z
'''P(X|z)'''
P_W1 = tf.Variable(xavier_init([z_dim, h_dim]))
P_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
P_W2 = tf.Variable(xavier_init([h_dim, X_dim]))
P_b2 = tf.Variable(tf.zeros(shape=[X_dim]))
theta_P = [P_W1, P_W2, P_b1, P_b2]
def P(z):
h = lrelu(tf.matmul(z, P_W1) + P_b1, leak = 0.1, name = 'h_P')
logits = tf.matmul(h, P_W2) + P_b2
prob = tf.nn.sigmoid(logits)
return prob, logits
'''D(z)'''
D_W1 = tf.Variable(xavier_init([z_dim, h_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
D_W2 = tf.Variable(xavier_init([h_dim, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
def D(z):
h = lrelu(tf.matmul(z, D_W1) + D_b1, leak = 0.1, name = 'h_D')
logits = tf.matmul(h, D_W2) + D_b2
prob = tf.nn.sigmoid(logits)
return prob, logits
'''Losses and solvers'''
z_sample = Q(X)
recon_X, logits = P(z_sample)
# Sample from random z
X_samples, _ = P(z)
D_real, D_real_logits = D(z)
D_fake, D_fake_logits = D(z_sample)
gamma = tf.placeholder_with_default(2.0, shape=()) #for annelling
d_reg = Discriminator_Regularizer(D_real_logits, z, D_fake_logits, z_sample)
D_loss = -tf.reduce_mean(tf.log(D_real) + tf.log(1. - D_fake)) + (gamma/2.0)*d_reg
G_loss = -tf.reduce_mean(tf.log(D_fake))
recon_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=X))
AE_solver = tf.train.AdamOptimizer(lr).minimize(recon_loss, var_list=theta_P + theta_Q)
D_solver = tf.train.GradientDescentOptimizer(lr).minimize(D_loss, var_list=theta_D)
G_solver = tf.train.AdamOptimizer(lr).minimize(G_loss, var_list=theta_Q)
'''processing'''
print('training subjects: ' + str(training_subjects))
training_img_normal = data[training_subjects, 0]
test_img_normal = data[test_subjects, 0]
test_img_abnormal = data[test_subjects, 1:]
'''flatten data to 2D matrix'''
training_img_normal = training_img_normal.reshape((-1,256))
test_img_normal = test_img_normal.reshape((-1,256))
test_img_abnormal = test_img_abnormal.reshape((-1,256))
print('data shape:')
print(training_img_normal.shape)
print(test_img_normal.shape)
print(test_img_abnormal.shape)
print('')
''' assessment: n_seg_len * 7 quantities * n_considered_epoch'''
results_prob, results_disc, results_dist, results_dist_prob, results_dist_disc, results_full = \
[np.zeros((len(segment_lengths), 7, epoch_end - epoch_start + 1)) for i in range(6)]
training_losses = np.zeros((n_epoch, 3))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# default values for annealing
alpha = 0.005
gamma0 = 2.0
T = n_epoch * int(np.ceil(training_img_normal.shape[0] / mb_size))
t = 0
for epoch in range(n_epoch):
indices = get_batch(training_img_normal.shape[0], mb_size)
tmp_losses = np.zeros(3)
for it in range(indices.shape[0]):
t += 1
X_mb = training_img_normal[indices[it]]
z_mb = np.random.randn(mb_size, z_dim)
_, recon_loss_curr = sess.run([AE_solver, recon_loss], feed_dict={X: X_mb, gamma: gamma0 * alpha**(t/T)})
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict={X: X_mb, z: z_mb})
_, G_loss_curr = sess.run([G_solver, G_loss], feed_dict={X: X_mb})
tmp_losses += np.array([D_loss_curr, G_loss_curr, recon_loss_curr])
tmp_losses /= indices.shape[0]
training_losses[epoch,:] = tmp_losses
print('Epoch %3d: D_loss %.3f, G_loss %.3f, Recon_loss %.3f' % (epoch + 1, tmp_losses[0], tmp_losses[1], tmp_losses[2]))
z_generated = np.random.randn(64, z_dim)
if save_samples and (epoch + 1) % 50 == 0:
if not os.path.exists('sampling/'):
os.makedirs('sampling/')
samples = sess.run(X_samples, feed_dict={z: z_generated})
fig = plot(samples)
plt.savefig('sampling/{}.png'.format(str(epoch+1).zfill(3)), bbox_inches='tight')
plt.close(fig)
if (epoch+1) < epoch_start or (epoch+1) > epoch_end:
continue
result_idx = epoch + 1 - epoch_start
max_exp_log = np.exp(log_likelihood(z_dim, np.zeros((1, z_dim))))
assert len(max_exp_log) == 1
max_exp_log = max_exp_log[0]
#print(max_exp_log)
'''assessment by Prob(z, Gaussian)'''
z_abnormal = z_sample.eval({X: test_img_abnormal})
z_normal = z_sample.eval({X: test_img_normal})
prob_abnormal = np.exp(log_likelihood(z_dim, z_abnormal))/max_exp_log
assert np.amax(prob_abnormal) <= 1.0 and np.amin(prob_abnormal) >= 0.0
prob_normal = np.exp(log_likelihood(z_dim, z_normal))/max_exp_log
assert np.amax(prob_normal) <= 1.0 and np.amin(prob_normal) >= 0.0
re = assessment_full(-prob_abnormal, -prob_normal, segment_lengths, calc_mean = True, overlapping = overlapping)
results_prob[:,:,result_idx] = re
'''assessment by D(z|X)'''
disc_abnormal = D_fake.eval({X: test_img_abnormal}).reshape(-1)
disc_normal = D_fake.eval({X: test_img_normal}).reshape(-1)
re = assessment_full(-disc_abnormal, -disc_normal, segment_lengths, calc_mean = True, overlapping = overlapping)
results_disc[:,:,result_idx] = re
'''assessment by Dist(X, X_hat)'''
X_hat_abnormal = recon_X.eval({X: test_img_abnormal})
X_hat_normal = recon_X.eval({X: test_img_normal})
diff_abnormal = (np.mean((X_hat_abnormal - test_img_abnormal)**2, axis = 1))**0.5
diff_normal = (np.mean((X_hat_normal - test_img_normal)**2, axis = 1))**0.5
re = assessment_full(diff_abnormal, diff_normal, segment_lengths, calc_mean = True, overlapping = overlapping)
results_dist[:,:,result_idx] = re
'''use reconstruction error and weight'''
### weight calculation ###
X_hat_train = recon_X.eval({X: training_img_normal})
prob_train = np.exp(log_likelihood(z_dim, z_sample.eval({X: training_img_normal})))/max_exp_log
assert np.amax(prob_train) <= 1.0 and np.amin(prob_train) >= 0.0
diff_train = (np.mean((X_hat_train - training_img_normal)**2, axis = 1))**0.5
disc_train = D_fake.eval({X: training_img_normal}).reshape(-1)
p = 0.125
prob_normal, prob_abnormal, prob_train = prob_normal**p, prob_abnormal**p, prob_train**p
### (option) weighted sum: prob + dist ###
W_diff1, W_prob1 = weight_calc2(diff_train, prob_train)
seq_abnormal = W_diff1 * diff_abnormal + W_prob1 * prob_abnormal
seq_normal = W_diff1 * diff_normal + W_prob1 * prob_normal
re = assessment_full(seq_abnormal, seq_normal, segment_lengths, calc_mean = True, overlapping = overlapping)
results_dist_prob[:,:,result_idx] = re
### (option) weighted sum: disc + dist ###
W_diff2, W_disc2 = weight_calc2(diff_train, disc_train)
seq_abnormal = W_diff2 * diff_abnormal + W_disc2 * disc_abnormal
seq_normal = W_diff2 * diff_normal + W_disc2 * disc_normal
re = assessment_full(seq_abnormal, seq_normal, segment_lengths, calc_mean = True, overlapping = overlapping)
results_dist_disc[:,:,result_idx] = re
### (option) weighted sum ###
W_prob, W_diff, W_disc = weight_calc3(prob_train, diff_train, disc_train)
seq_abnormal = W_diff * diff_abnormal + W_prob * prob_abnormal + W_disc * disc_abnormal
seq_normal = W_diff * diff_normal + W_prob * prob_normal + W_disc * disc_normal
re = assessment_full(seq_abnormal, seq_normal, segment_lengths, calc_mean = True, overlapping = overlapping)
results_full[:,:,result_idx] = re
print('\naverage results:')
prob_mean, prob_std = calc_mean_std_along_epoch(results_prob, segment_lengths, str_title = '\nResults probability')
disc_mean, disc_std = calc_mean_std_along_epoch(results_disc, segment_lengths, str_title = '\nResults discriminator')
dist_mean, dist_std = calc_mean_std_along_epoch(results_dist, segment_lengths, str_title = '\nResults reconstruction')
dist_prob_mean, dist_prob_std = calc_mean_std_along_epoch(results_dist_prob, segment_lengths, str_title = '\nResults dist + prob')
dist_disc_mean, dist_disc_std = calc_mean_std_along_epoch(results_dist_disc, segment_lengths, str_title = '\nResults dist + disc')
full_mean, full_std = calc_mean_std_along_epoch(results_full, segment_lengths, str_title = '\nResults combination')
if plot_losses:
#np.savetxt('training_losses.csv', training_losses, delimiter = ',')
plot_training_losses(training_losses, epoch_start, epoch_end)
# return
results_mean = np.array([t.T for t in np.transpose(np.dstack((prob_mean, disc_mean, dist_mean, dist_prob_mean, dist_disc_mean, full_mean)))])
results_std = np.array([t.T for t in np.transpose(np.dstack((prob_std, disc_std, dist_std, dist_prob_std, dist_disc_std, full_std)))])
return results_mean, results_std
def main(argv):
'''usage: python3 main.py -l 0 -o 0 -s 1 -f results.csv'''
parser = argparse.ArgumentParser(description = 'cylindrical histogram AAE')
parser.add_argument('-l', '--l1o', help = 'leave-one-out', default = -1)
parser.add_argument('-o', '--overlap', help = 'use overlapping segments (sliding window)', default = 0)
parser.add_argument('-s', '--sampling', help = 'save sampled histograms', default = 1)
parser.add_argument('-f', '--file', help = 'file saving AUC results', default = None)
args = vars(parser.parse_args())
l1o = bool(int(args['l1o']))
overlapping = bool(int(args['overlap']))
save_samples = bool(int(args['sampling']))
result_file = args['file']
''' load histogram data '''
loaded = np.load('../../dataset/DIRO_normalized_hists.npz')
data = loaded['data']
n_subject, n_gait, n_frame = data.shape[:3]
if not l1o:
epoch_start, epoch_end = split_epochs
n_epoch = epoch_end + 210
separation = loaded['split']
training_subjects = np.where(separation == 'train')[0]
test_subjects = np.where(separation == 'test')[0]
results_mean, results_std = processing(data, training_subjects, test_subjects, epoch_start, epoch_end, n_epoch, \
overlapping = overlapping, save_samples = save_samples, result_file = result_file, plot_losses = True)
else:
results_mean_l1o = [s for s in range(n_subject)]
for s in np.arange(n_subject):
epoch_start, epoch_end = l1o_epochs[s]
n_epoch = epoch_end + 10
test_subjects = np.array([s])
training_subjects = np.setdiff1d(list(range(n_subject)), test_subjects)
results_mean_l1o[s], _ = processing(data, training_subjects, test_subjects, epoch_start, epoch_end, n_epoch, \
overlapping = overlapping, save_samples = save_samples, result_file = result_file, plot_losses = False)
tf.reset_default_graph()
tf.set_random_seed(1989)
np.random.seed(1989)
if l1o:
results_mean = np.mean(results_mean_l1o, axis = 0)
results_std = np.std(results_mean_l1o, axis = 0)
print('segment lengths: ' + str(segment_lengths))
print('results_mean to plot:')
print(results_mean.shape)
print(results_mean)
print('\nresults_std to plot:')
print(results_std.shape)
print(results_std)
'''plot AUC with different segment lengths'''
lw = 1.7
plt.figure(2)
plt.subplot(211)
plt.plot(segment_lengths, results_mean[0,:,0], color = 'r', marker = '.', linewidth = lw, label = 'prob')
plt.plot(segment_lengths, results_mean[1,:,0], color = 'g', marker = '.', linewidth = lw, label = 'disc')
plt.plot(segment_lengths, results_mean[2,:,0], color = 'b', marker = '.', linewidth = lw, label = 'dist')
plt.xlabel('segment length')
plt.ylabel('AUC')
plt.legend(loc = 'lower right')
plt.subplot(212)
plt.plot(segment_lengths, results_mean[2,:,0], color = 'b', marker = '.', linewidth = lw, label = 'dist')
plt.plot(segment_lengths, results_mean[3,:,0], color = 'c', marker = '.', linewidth = lw, label = 'dist_prob')
plt.plot(segment_lengths, results_mean[4,:,0], color = 'm', marker = '.', linewidth = lw, label = 'dist_disc')
plt.plot(segment_lengths, results_mean[5,:,0], color = 'y', marker = '.', linewidth = lw, label = 'full')
plt.xlabel('segment length')
plt.ylabel('AUC')
plt.legend(loc = 'lower right')
'''plot EER with different segment lengths'''
lw = 1.7
plt.figure(3)
plt.subplot(211)
plt.plot(segment_lengths, results_mean[0,:,1], color = 'r', marker = '.', linewidth = lw, label = 'prob')
plt.plot(segment_lengths, results_mean[1,:,1], color = 'g', marker = '.', linewidth = lw, label = 'disc')
plt.plot(segment_lengths, results_mean[2,:,1], color = 'b', marker = '.', linewidth = lw, label = 'dist')
plt.xlabel('segment length')
plt.ylabel('EER')
plt.legend(loc = 'upper right')
plt.subplot(212)
plt.plot(segment_lengths, results_mean[2,:,1], color = 'b', marker = '.', linewidth = lw, label = 'dist')
plt.plot(segment_lengths, results_mean[3,:,1], color = 'c', marker = '.', linewidth = lw, label = 'dist_prob')
plt.plot(segment_lengths, results_mean[4,:,1], color = 'm', marker = '.', linewidth = lw, label = 'dist_disc')
plt.plot(segment_lengths, results_mean[5,:,1], color = 'y', marker = '.', linewidth = lw, label = 'full')
plt.xlabel('segment length')
plt.ylabel('EER')
plt.legend(loc = 'upper right')
plt.show()
if __name__ == '__main__':
main(sys.argv)
|
{"hexsha": "b94d2f3eea60966f2ea03874c58826c6adf331e8", "size": 17059, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "nguyetn89/cylindrical-histogram-AAE", "max_stars_repo_head_hexsha": "fa88ef321402d8e7ad6bf6af4a6e86685c2d63f9", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-18T15:07:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-18T15:07:33.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "nguyetn89/cylindrical-histogram-AAE", "max_issues_repo_head_hexsha": "fa88ef321402d8e7ad6bf6af4a6e86685c2d63f9", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "nguyetn89/cylindrical-histogram-AAE", "max_forks_repo_head_hexsha": "fa88ef321402d8e7ad6bf6af4a6e86685c2d63f9", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.9811320755, "max_line_length": 172, "alphanum_fraction": 0.6445864353, "include": true, "reason": "import numpy", "num_tokens": 4539}
|
@testset "parser" begin
b = Bobby.fen_to_bitboard(
"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
@test b.free == ~b.taken
@test_throws ArgumentError Bobby.fen_to_bitboard(
"rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBN")
end
|
{"hexsha": "696aa905adeee290c14c26d2e0c218b21b5adc25", "size": 265, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_parser.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/Bobby.jl-bd34264e-e812-11e8-1ee8-bfb20fea2fb4", "max_stars_repo_head_hexsha": "9814a1d987fcf64e61b1626395e6e221a09b6004", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/test_parser.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/Bobby.jl-bd34264e-e812-11e8-1ee8-bfb20fea2fb4", "max_issues_repo_head_hexsha": "9814a1d987fcf64e61b1626395e6e221a09b6004", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/test_parser.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/Bobby.jl-bd34264e-e812-11e8-1ee8-bfb20fea2fb4", "max_forks_repo_head_hexsha": "9814a1d987fcf64e61b1626395e6e221a09b6004", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4444444444, "max_line_length": 67, "alphanum_fraction": 0.6754716981, "num_tokens": 116}
|
function s = spm_existfile(filename)
% Check if a file exists on disk - a compiled routine
% FORMAT S = SPM_EXISTFILE(FILENAME)
% FILENAME - filename (can also be a relative or full pathname to a file)
% S - logical scalar, true if the file exists and false otherwise
%_______________________________________________________________________
%
% This compiled routine is equivalent to:
% >> s = exist(filename,'file') == 2;
% and was written for speed purposes. The differences in behaviour are:
% * spm_existfile returns true for directory names
% * spm_existfile does not look in MATLAB's search path
% * spm_existfile returns false for an existing file that does not have
% read permission
%_______________________________________________________________________
% Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging
% Guillaume Flandin
% $Id: spm_existfile.m 3100 2009-05-06 19:00:39Z guillaume $
%-This is merely the help file for the compiled routine
%error('spm_existfile.c not compiled - see Makefile')
persistent runonce
if isempty(runonce)
warning('spm_existfile is not compiled for your platform.');
runonce = 1;
end
s = exist(filename,'file') > 0;
|
{"author": "leoliuf", "repo": "MRiLab", "sha": "5cdcf1f7b67759700685d3a26ffeb70e55325567", "save_path": "github-repos/MATLAB/leoliuf-MRiLab", "path": "github-repos/MATLAB/leoliuf-MRiLab/MRiLab-5cdcf1f7b67759700685d3a26ffeb70e55325567/External/MatrixUser2.2/External/spm8/spm_existfile.m"}
|
import json
import logging
import os
import re
import string
from collections import Counter
from scipy.cluster.hierarchy import ward, dendrogram
import nltk
from nltk.stem.snowball import SnowballStemmer
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.manifold import MDS
from sklearn.metrics.pairwise import cosine_similarity
from aarhus.aarhus import custom_stopwords
import time
logging.basicConfig(format='%(asctime)s : %(levelname)s :: %(message)s', level=logging.DEBUG)
def strip_proppers(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it'sown token
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)
if word.islower()]
return "".join(
[" " + i if not i.startswith("'") and i not in string.punctuation else i for i in tokens]).strip()
def tokenize_and_stem(text):
# first tokenize by sentence, then by word to ensure that punctuation is caught as it'sown token
tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]
filtered_tokens = []
# filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)
for token in tokens:
if re.search('[a-zA-Z]', token):
filtered_tokens.append(token)
# todo remove touchup list ?
if True:
result = [stemmer.stem(t) for t in filtered_tokens]
else:
stems = [stemmer.stem(t) for t in filtered_tokens]
result = [stem for stem in stems if stem not in touchup_list]
return result
def clean_address(arg_value):
result = str(arg_value)
for token in clean_address_tokens:
if token in result:
result = result.replace(token, ' ')
return result.lower().strip()
start_time = time.time()
stemmer = SnowballStemmer("english")
touchup_list = custom_stopwords.get_specific_stopwords()
# https://groups.google.com/forum/#!topic/microsoft.public.outlookexpress.general/oig7-xNFISg
clean_address_tokens = ['=?us-ascii?Q?', '=0D=0A_=28', '=?utf-8?Q?', '=29?=', '=0D=0A']
with open('./sklearn_kmeans_clustering.json') as data_file:
data = json.load(data_file)
logging.debug(data)
input_folder = data['input_folder']
max_file_count = data['max_file_count']
# model_file_name = data['model_file_name']
cluster_count = data['cluster_count']
random_state = data['random_state']
max_df = data['max_df']
min_df = data['min_df']
max_features = data['max_features']
target_encoding = 'utf-8'
stopwords = custom_stopwords.get_stopwords()
file_names = [os.path.join(root, current) for root, subdirectories, files in os.walk(input_folder) for current in files]
logging.debug('we have %d files', len(file_names))
# truncate
if max_file_count < len(file_names) and max_file_count != -1:
file_names = file_names[:max_file_count]
logging.debug('we are using %d files', len(file_names))
documents = [open(file_name, 'r').read() for file_name in file_names]
tfidf_vectorizer = TfidfVectorizer(max_df=max_df, max_features=max_features, min_df=min_df, stop_words='english',
use_idf=True, tokenizer=tokenize_and_stem, ngram_range=(1, 2), decode_error='ignore',
strip_accents='ascii')
tfidf_matrix = tfidf_vectorizer.fit_transform(documents)
model = KMeans(n_clusters=cluster_count, random_state=random_state)
model.fit(tfidf_matrix)
# todo write terms to a file for later viewing
terms = tfidf_vectorizer.get_feature_names()
logging.debug('we have %d terms/feature names' % len(terms))
terms_out_file = 'sklearn_kmeans_terms.csv'
with open(terms_out_file, 'w') as terms_out_fp:
for item in terms:
terms_out_fp.write("%s\n" % item)
logging.debug('wrote terms to %s' % terms_out_file)
clusters = model.labels_.tolist()
logging.debug('files: %d cluster values: %d' % (len(file_names), len(clusters)))
cluster_counter = Counter()
for item in clusters:
cluster_counter[item] += 1
logging.debug(cluster_counter)
logging.debug('smallest cluster has %d items; largest cluster has %d items' % (
min(cluster_counter.values()), max(cluster_counter.values())))
result = {}
for item in zip(file_names, clusters):
short_file_name = os.path.basename(item[0])
result[short_file_name] = item[1]
pass
cluster_out_file = 'sklearn_kmeans_clusters.json'
with open(cluster_out_file, 'w') as fp:
json.dump(result, fp)
logging.debug('wrote clusters to %s' % cluster_out_file)
if False:
distances = 1 - cosine_similarity(tfidf_matrix)
logging.debug('computed distances usng cosine similarity')
MDS()
# convert two components as we're plotting points in a two-dimensional plane
# "precomputed" because we provide a distance matrix
# we will also specify `random_state` so the plot is reproducible.
mds = MDS(n_components=2, dissimilarity="precomputed", random_state=random_state)
pos = mds.fit_transform(distances) # shape (n_components, n_samples)
linkage_matrix = ward(distances) # define the linkage_matrix using ward clustering pre-computed distances
logging.debug('got linkage matrix')
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(30, 40)) # set size
ax = dendrogram(linkage_matrix, orientation="right") # ), labels=titles);
plt.tick_params( \
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
labelbottom='off')
plt.tight_layout() # show plot with tight layout
# uncomment below to save figure
plt.savefig('ward_clusters.png', dpi=200) # save figure as ward_clusters
pass
finish_time = time.time()
elapsed_hours, elapsed_remainder = divmod(finish_time - start_time, 3600)
elapsed_minutes, elapsed_seconds = divmod(elapsed_remainder, 60)
logging.info(
"Elapsed time: {:0>2}:{:0>2}:{:05.2f}".format(int(elapsed_hours), int(elapsed_minutes), elapsed_seconds))
|
{"hexsha": "1728d739f826af0d758ffed486c8473e3ce159cb", "size": 6158, "ext": "py", "lang": "Python", "max_stars_repo_path": "aarhus/sklearn_kmeans_clustering.py", "max_stars_repo_name": "mikedelong/aarhus", "max_stars_repo_head_hexsha": "0c0e94fadd65be8428fe3bd2c92928e1b23fc2a1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aarhus/sklearn_kmeans_clustering.py", "max_issues_repo_name": "mikedelong/aarhus", "max_issues_repo_head_hexsha": "0c0e94fadd65be8428fe3bd2c92928e1b23fc2a1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 7, "max_issues_repo_issues_event_min_datetime": "2017-01-13T19:04:57.000Z", "max_issues_repo_issues_event_max_datetime": "2017-01-23T14:10:53.000Z", "max_forks_repo_path": "aarhus/sklearn_kmeans_clustering.py", "max_forks_repo_name": "mikedelong/aarhus", "max_forks_repo_head_hexsha": "0c0e94fadd65be8428fe3bd2c92928e1b23fc2a1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0116959064, "max_line_length": 120, "alphanum_fraction": 0.7163039948, "include": true, "reason": "from scipy", "num_tokens": 1485}
|
// Copyright 2008 by BBN Technologies Corp.
// All Rights Reserved.
#include "Generic/common/leak_detection.h" // This must be the first #include
#include "Generic/common/SessionLogger.h"
#include "Generic/common/NullSessionLogger.h"
#include "Generic/common/ConsoleSessionLogger.h"
#include "Generic/common/OutputUtil.h"
#include "Generic/common/UnrecoverableException.h"
#include "Generic/common/UnexpectedInputException.h"
#include "Generic/common/InternalInconsistencyException.h"
#include "Generic/common/UnicodeUtil.h"
#include "Generic/common/ParamReader.h"
#include "Generic/common/Symbol.h"
#include <string.h>
#include <iostream>
#include <boost/algorithm/string.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/functional/hash.hpp>
#include <boost/foreach.hpp>
using namespace std;
// Destructors for SessionLogger{Unsetter,Deleter,Restorer}.
// See header file and http://wiki.d4m.bbn.com/wiki/Serif/SessionLogger.
SessionLoggerUnsetter::~SessionLoggerUnsetter() {
SessionLogger::unsetGlobalLogger();
}
SessionLoggerDeleter::~SessionLoggerDeleter() {
SessionLogger::deleteGlobalLogger();
}
SessionLoggerRestorer::~SessionLoggerRestorer() {
SessionLogger::deleteGlobalLogger();
SessionLogger::restorePrevGlobalLogger();
}
// STATIC MEMBERS
// pointer to a logger that writes to console, file, both, or neither
SessionLogger * SessionLogger::_globalLoggerPtr = 0;
// pointer to a previously stored logger
SessionLogger * SessionLogger::_prevGlobalLoggerPtr = 0;
// default logger that takes anything written to it and swallows it without a trace (so to speak)
//NullSessionLogger SessionLogger::_defaultLogger;
// default logger that causes everything to be written to the console
ConsoleSessionLogger SessionLogger::_defaultLogger;
// This is for backwards compatibility only. Do not use it in new code.
SessionLogger * SessionLogger::logger = &SessionLogger::_defaultLogger;
// STATIC METHODS - SETTING UP/DELETING LOGGER
void SessionLogger::setGlobalLogger(SessionLogger * logger_in, bool store_old_logger/*=false*/) {
if (store_old_logger) {
_prevGlobalLoggerPtr = _globalLoggerPtr; // could be null
}
_globalLoggerPtr = logger_in;
logger = logger_in; // for backwards compatibility only
}
bool SessionLogger::globalLoggerExists() {
return _globalLoggerPtr != 0;}
void SessionLogger::deleteGlobalLogger() {
delete _globalLoggerPtr;
_globalLoggerPtr = 0;
}
/**
* This method does NOT delete any logger objects. To do that, the caller should call deleteGlobalLogger()
* before calling this method.
* @return True if a nonzero global logger was stored previously, false otherwise.
**/
bool SessionLogger::restorePrevGlobalLogger() {
_globalLoggerPtr = _prevGlobalLoggerPtr;
logger = _globalLoggerPtr; // for backwards compatibility only
return (_globalLoggerPtr != 0);
}
// Used by a client to unregister the logger; client must then take care of deleting logger object separately.
void SessionLogger::unsetGlobalLogger() {
_globalLoggerPtr = 0; }
// END OF METHODS FOR SETTING UP/DELETING LOGGER
// private method called internally by methods that write the individual messages
SessionLogger * SessionLogger::getLogger() {
if (_globalLoggerPtr) {
return _globalLoggerPtr;
} else {
return &_defaultLogger;
}
}
/** This private structure is used to hold all of SessionLogger's internal variables.
* This allows us to modify the implementation of the logger without requiring that
* everything that imports SessionLogger.h be recompiled. */
struct SessionLogger::Internals {
// The ordered list of context levels.
std::vector<std::wstring> context_level_names;
// Our current context.
std::vector<std::wstring> context_info;
// We will display any message at or above this threshold.
LogLevel logLevelThreshold;
// Determines whether IDs (in parentheses) are displayed before messages.
bool show_ids;
// Don't re-display the same context if we have multiple messages
// in the same context;
bool context_displayed;
// If false, then log messages that contain newlines will have
// those newlines replaced by "\\n" before they are printed.
bool multiline_messages;
// A string prepended to each line of the logger's output.
std::wstring prefix;
// The number of messages of each type we have displayed.
std::map<LogLevel, int> n_messages;
// A list of message identifiers for messages that we should not report.
std::vector<std::string> suppressedMessages;
// A list of message identifiers for messages that we always report, even if its log level
// falls below the log threshold.
std::vector<std::string> forcedMessages;
// A cache used to quickly check if a message should be suppressed. Note
// that the cache keys are compared based on identity; this is because we
// assume that message identifiers will be inline string constants.
struct MessageIdHash {size_t operator()(const char* s) const {return boost::hash_value((void*)s);}};
struct MessageIdEq {bool operator()(const char* s1, const char* s2) const {return s1 == s2;}};
typedef serif::hash_map<const char*, bool, MessageIdHash, MessageIdEq> MessageIdBoolMap;
MessageIdBoolMap suppressedMessageCache;
MessageIdBoolMap forcedMessageCache;
// This form is easier to use (and slightly more efficient) than the one that follows.
Internals(const std::vector<std::wstring> & context_level_name_vec, const wchar_t* output_prefix):
context_level_names(context_level_name_vec)
{
init(output_prefix);
}
// This form is provided for the older (backwards compatible) SessionLogger constructor.
Internals(size_t n_context_levels, const wchar_t **context_level_name_array, const wchar_t* output_prefix):
context_level_names(context_level_name_array, context_level_name_array + n_context_levels)
{
if (context_level_name_array == 0)
throw InternalInconsistencyException("SessionLogger::Internals",
"context_level_name_array may not be NULL!");
init(output_prefix);
}
void init(const wchar_t* output_prefix) {
show_ids = false;
context_displayed = false;
multiline_messages = true;
prefix = (output_prefix == NULL ? L"" : output_prefix);
logLevelThreshold = INFO_LEVEL;
context_info.resize(context_level_names.size()); // fills vector with blank strings
}
void init_from_params() {
// Setting "log_show_ids: true" makes the display more verbose but allows you to immediately see the ID associated
// with each message (useful for either debugging or determining which IDs to suppress).
show_ids = ParamReader::getOptionalTrueFalseParamWithDefaultVal("log_show_ids", false);
// For example, "log_ignore: rm_set_file_0,rm_set_dir_0" will cause messages with IDs "rm_set_file_0"
// and "rm_set_dir_0" to be suppressed.
// There is no mechanism that forces these IDs to be unique, but the consequence of using duplicate IDs
// is that you may unintentionally suppress more messages than you want, or when searching the code, you
// may come across the wrong instance.
suppressedMessages = ParamReader::getStringVectorParam("log_ignore");
// For example, "log_force: dbg_xyz_0" will cause messages with ID "dbg_xyz_0" to be logged, even if it is
// a debug message and the log threshold is set to ignore debug messages in general.
forcedMessages = ParamReader::getStringVectorParam("log_force");
std::string logThreshold = ParamReader::getParam("log_threshold");
if (!logThreshold.empty()) {
if (boost::iequals(logThreshold, "DEBUG"))
logLevelThreshold = DEBUG_LEVEL;
else if (boost::iequals(logThreshold, "INFO"))
logLevelThreshold = INFO_LEVEL;
else if (boost::iequals(logThreshold, "USER_WARNING"))
logLevelThreshold = USER_WARNING_LEVEL;
else if (boost::iequals(logThreshold, "WARNING"))
logLevelThreshold = WARNING_LEVEL;
else if (boost::iequals(logThreshold, "UNEXPECTED_INPUT_ERROR_LEVEL"))
logLevelThreshold = UNEXPECTED_INPUT_ERROR_LEVEL;
else if (boost::iequals(logThreshold, "INTERNAL_INCONSISTENCY_ERROR_LEVEL"))
logLevelThreshold = INTERNAL_INCONSISTENCY_ERROR_LEVEL;
else if (boost::iequals(logThreshold, "ERROR"))
logLevelThreshold = ERROR_LEVEL;
else
throw UnexpectedInputException("SessionLogger::SessionLogger",
"Parameter 'log_threshold' must be set to 'DEBUG', 'INFO', 'WARNING' or ERROR");
}
multiline_messages = ParamReader::getOptionalTrueFalseParamWithDefaultVal("multiline_log_messages", true);
}
};
void SessionLogger::printGlobalLoggerInfo() {
if (_globalLoggerPtr) {
std::cerr << "Global logger is a " << _globalLoggerPtr->getType() << ", with log level " << _globalLoggerPtr->_impl->logLevelThreshold << "\n";
} else {
std::cerr << "Global logger pointer is null\n";;
}
}
// This default constructor does not call ParamReader, so it is particularly useful when initializing
// a SessionLogger object before ParamReader has been initialized.
SessionLogger::SessionLogger():
_impl(new SessionLogger::Internals(std::vector<std::wstring>(), L"")) {
}
// This form of the constructor is easier to use (and slightly more efficient).
SessionLogger::SessionLogger(const std::vector<std::wstring> & context_level_name_vec, const wchar_t* output_prefix):
_impl(new SessionLogger::Internals(context_level_name_vec, output_prefix)) {
_impl->init_from_params();
}
// This form of the constructor is provided for backwards compatibility.
SessionLogger::SessionLogger(size_t n_context_levels, const wchar_t **context_level_names, const wchar_t* output_prefix):
_impl(new SessionLogger::Internals(n_context_levels, context_level_names, output_prefix)) {
_impl->init_from_params();
}
SessionLogger::~SessionLogger() {
delete _impl;
}
void SessionLogger::updateContext(size_t context_level, const wchar_t *context_info) {
getLogger()->updateLocalContext(context_level, context_info);
}
// This is the version that does the work for updateContext() and the other version of updateLocalContext().
void SessionLogger::updateLocalContext(size_t context_level, const wchar_t *context_info) {
size_t levels_defined = (_impl->context_level_names).size();
if (levels_defined == 0) {
return; // for NullSessionLogger
} else if (context_level >= levels_defined) {
warn("bad_context_level_0") << "Ignoring attempt to update context level " << context_level
<< "; only " << levels_defined << " levels are defined.";
} else {
_impl->context_info[context_level] = context_info;
// erase information about all lower context levels
for (size_t i = context_level + 1; i < _impl->context_info.size(); i++)
_impl->context_info[i].clear();
_impl->context_displayed = false;
}
}
void SessionLogger::updateContext(size_t context_level, const char *context_info) {
getLogger()->updateLocalContext(context_level, context_info);
}
void SessionLogger::updateLocalContext(size_t context_level, const char *context_info) {
updateLocalContext(context_level, UnicodeUtil::toUTF16StdString(context_info, UnicodeUtil::REPLACE_ON_ERROR).c_str());
}
void SessionLogger::beginMessage() {
_impl->n_messages[INFO_LEVEL]++;
*this << L"\n";
}
void SessionLogger::beginEvent() {
_impl->n_messages[DEBUG_LEVEL]++;
*this << L"\nEvent:\n";
writeContext();
*this << L"(--) ";
}
void SessionLogger::beginWarning() {
_impl->n_messages[WARNING_LEVEL]++;
*this << L"\nWarning:\n";
writeContext();
*this << L"(**) ";
}
void SessionLogger::beginError() {
_impl->n_messages[ERROR_LEVEL]++;
*this << L"\nError:\n";
writeContext();
*this << L"(!!) ";
}
void SessionLogger::displaySummary() {
int _n_warnings = _impl->n_messages[WARNING_LEVEL] + _impl->n_messages[USER_WARNING_LEVEL];
int _n_errors = _impl->n_messages[ERROR_LEVEL] + _impl->n_messages[UNEXPECTED_INPUT_ERROR_LEVEL] + _impl->n_messages[INTERNAL_INCONSISTENCY_ERROR_LEVEL];
if (_n_warnings == 0 && _n_errors == 0) {
cout << "Session completed with no warnings.\n";
}
else {
cout << "Session completed with " << _n_errors << " error(s) and "
<< _n_warnings << " warning(s).\n";
cout << "Check session log for warning messages.\n";
}
}
void SessionLogger::writeContext() {
for (size_t i = 0;
i < _impl->context_info.size() && _impl->context_info[i][0] != L'\0';
i++)
{
for (size_t j = 0; j <= i; j++)
*this << L" ";
*this << L"- " << _impl->context_level_names[i] << L": "
<< _impl->context_info[i] << "\n";
}
}
SessionLogger &SessionLogger::operator<<(const char *s) {
displayString(UnicodeUtil::toUTF16StdString(s, UnicodeUtil::REPLACE_ON_ERROR), /*stream_id=*/NULL);
if (strchr(s, L'\n')) flush();
return *this;
}
SessionLogger &SessionLogger::operator<<(const wchar_t *s) {
displayString(s, /*stream_id=*/NULL);
if (wcschr(s, L'\n')) flush();
return *this;
}
SessionLogger &SessionLogger::operator<<(const string &s) {
displayString(UnicodeUtil::toUTF16StdString(s, UnicodeUtil::REPLACE_ON_ERROR), /*stream_id=*/NULL);
if (strchr(s.c_str(), L'\n')) flush();
return *this;
}
SessionLogger &SessionLogger::operator<<(const wstring &s) {
displayString(s, /*stream_id=*/NULL);
if (wcschr(s.c_str(), L'\n')) flush();
return *this;
}
SessionLogger &SessionLogger::operator<<(bool b) {
displayString(boost::lexical_cast<std::wstring>(b), /*stream_id=*/NULL);
return *this;
}
SessionLogger &SessionLogger::operator<<(char c) {
displayString(boost::lexical_cast<std::wstring>(c), /*stream_id=*/NULL);
return *this;
}
SessionLogger &SessionLogger::operator<<(short i) {
displayString(boost::lexical_cast<std::wstring>(i), /*stream_id=*/NULL);
return *this;
}
SessionLogger &SessionLogger::operator<<(int i) {
displayString(boost::lexical_cast<std::wstring>(i), /*stream_id=*/NULL);
return *this;
}
SessionLogger &SessionLogger::operator<<(long i) {
displayString(boost::lexical_cast<std::wstring>(i), /*stream_id=*/NULL);
return *this;
}
SessionLogger &SessionLogger::operator<<(unsigned short i) {
displayString(boost::lexical_cast<std::wstring>(i), /*stream_id=*/NULL);
return *this;
}
SessionLogger &SessionLogger::operator<<(unsigned int i) {
displayString(boost::lexical_cast<std::wstring>(i), /*stream_id=*/NULL);
return *this;
}
SessionLogger &SessionLogger::operator<<(unsigned long i) {
displayString(boost::lexical_cast<std::wstring>(i), /*stream_id=*/NULL);
return *this;
}
SessionLogger &SessionLogger::operator<<(unsigned long long i) {
displayString(boost::lexical_cast<std::wstring>(i), /*stream_id=*/NULL);
return *this;
}
SessionLogger &SessionLogger::operator<<(double r) {
displayString(boost::lexical_cast<std::wstring>(r), /*stream_id=*/NULL);
return *this;
}
SessionLogger &SessionLogger::operator<<(const UnrecoverableException& exc) {
this->reportError() << exc.getSource() << ": " << exc.getMessage();
return *this;
}
// Use this to accept, but ignore, std::endl. Note that it will cause other
// stream manipulators (e.g., left, right, hex) to be ignored as well, but
// they're never used in the codebase (except on ostringstream objects).
SessionLogger &SessionLogger::operator<<(std::ostream&(*f)(std::ostream&)) {
return *this;
}
SessionLogger &SessionLogger::operator<<(const std::ostringstream & ostr) {
displayString(ostr.str(), /*stream_id=*/NULL);
return *this;
}
SessionLogger &SessionLogger::operator<<(const std::wostringstream & ostr) {
displayString(ostr.str(), /*stream_id=*/NULL);
return *this;
}
SessionLogger &SessionLogger::operator<<(const Symbol &s) {
displayString(s.to_string(), /*stream_id=*/NULL);
return *this;
}
SessionLogger &SessionLogger::operator<<(const boost::gregorian::date &d) {
displayString(boost::gregorian::to_iso_extended_string(d), /*stream_id=*/NULL);
return *this;
}
namespace {
const wchar_t *LOG_LEVEL_PREFIXES[SessionLogger::NUM_LEVELS] = {
L"Debug: ", // DEBUG_LEVEL,
L"", // INFO_LEVEL,
L"(!!) Warning: ", // WARNING_LEVEL,
L"(!!) Warning: ", // USER_WARNING_LEVEL,
L"(**) Error: ", // UNEXPECTED_INPUT_ERROR_LEVEL,
L"(**) Error: ", // INTERNAL_INCONSISTENCY_ERROR_LEVEL,
L"(**) Error: ", // ERROR_LEVEL
};
void indent(std::wostringstream& out, size_t levels) {
for (size_t j=0; j<levels; ++j) out << L" ";
}
}
// Static methods.
bool SessionLogger::dbg_or_msg_enabled(const char* identifier) {
return enabledAtLevel(identifier, DEBUG_LEVEL);}
bool SessionLogger::info_or_msg_enabled(const char* identifier) {
return enabledAtLevel(identifier, INFO_LEVEL);}
bool SessionLogger::warn_or_msg_enabled(const char* identifier) {
return enabledAtLevel(identifier, WARNING_LEVEL);}
// We expect this to be used infrequently, if at all, as the user will probably always want to see error messages.
bool SessionLogger::err_or_msg_enabled(const char* identifier) {
return enabledAtLevel(identifier, ERROR_LEVEL);}
// We expect these to be used very infrequently, if at all.
bool SessionLogger::user_warning_or_msg_enabled(const char* identifier) {
return enabledAtLevel(identifier, USER_WARNING_LEVEL);}
bool SessionLogger::err_unexpected_input_or_msg_enabled(const char* identifier) {
return enabledAtLevel(identifier, UNEXPECTED_INPUT_ERROR_LEVEL);}
bool SessionLogger::err_internal_inconsistency_or_msg_enabled(const char* identifier) {
return enabledAtLevel(identifier, INTERNAL_INCONSISTENCY_ERROR_LEVEL);}
// Static. See enabledAtLocalLevel() (which is nonstatic) for details.
bool SessionLogger::enabledAtLevel(const char* identifier, SessionLogger::LogLevel level) {
return getLogger()->enabledAtLocalLevel(identifier, level);
}
// Returns false if the message with the specified identifier is suppressed, or if the specified level
// is below the log threshold (and the message is not in the list of messages that are forced to be displayed,
// specified via a param read by ParamReader). Returns true otherwise.
// It's a bad idea to include a message in both the suppressed (log_ignore) and forced (log_force) lists,
// but if you do, it will be suppressed.
bool SessionLogger::enabledAtLocalLevel(const char* identifier, SessionLogger::LogLevel level) const {
if (isMessageSuppressed(identifier)) {
return false;
} else if ((level < _impl->logLevelThreshold) && !isMessageForced(identifier)) {
return false;
} else {
return true;
}
}
void SessionLogger::reportMessage(SessionLogger::LogLevel level, const char* identifier, const char * stream_id,
const std::wstring &msg, bool add_newline)
{
if (!enabledAtLevel(identifier, level) || msg.empty()) {
return;
}
// increment count of messages at level
_impl->n_messages[level]++;
wostringstream out;
// Display the current context, starting at the last level we displayed.
if (_impl->multiline_messages) {
if (!_impl->context_displayed) {
for (size_t i=0; i<_impl->context_info.size() && !_impl->context_info[i].empty(); ++i) {
out << _impl->prefix;
indent(out, i*2);
out << _impl->context_level_names[i] << L": "
<< _impl->context_info[i] << L"\n";
}
_impl->context_displayed = true;
}
} else {
out << LOG_LEVEL_PREFIXES[level];
out << _impl->prefix << L"[";
for (size_t i=0; i<_impl->context_info.size() && !_impl->context_info[i].empty(); ++i) {
if (i>0) out << L"; ";
out << _impl->context_level_names[i] << L"=" << _impl->context_info[i];
}
out << L"] ";
}
// Display the message. Indent individual lines.
std::vector<std::wstring> lines;
boost::algorithm::split(lines, msg, boost::is_any_of(L"\n"), boost::token_compress_on);
size_t first_nonempty_index=0;
// Find the index of the first nonempty line. If all lines are empty, no problem: we'll increment
// first_nonempty_index past lines.size() and the loop that prints output will be skipped.
while (first_nonempty_index<lines.size() && lines[first_nonempty_index].empty()) {
++first_nonempty_index;
}
for (size_t i=first_nonempty_index; i<lines.size(); ++i) {
if (lines[i].empty())
continue;
if (i>first_nonempty_index) {
if (add_newline) {
out << (_impl->multiline_messages?L"\n":L"\\n");
} else {
out << L" ";
}
}
out << _impl->prefix;
if (_impl->multiline_messages) {
for (size_t j=0; j<_impl->context_info.size() && !_impl->context_info[j].empty(); ++j)
out << L" ";
if (i == first_nonempty_index)
out << LOG_LEVEL_PREFIXES[level];
else
indent(out, wcslen(LOG_LEVEL_PREFIXES[level]));
if (_impl->show_ids && identifier != NULL)
out << L"(" << UnicodeUtil::toUTF16StdString(identifier, UnicodeUtil::REPLACE_ON_ERROR) << L") ";
}
out << lines[i];
}
if (add_newline) {
out << L'\n';
}
displayString(out.str(), stream_id);
flush();
}
bool SessionLogger::isMessageSuppressed(const char* identifier) const {
if (identifier == NULL) return false;
if (_impl->suppressedMessageCache.find(identifier) != _impl->suppressedMessageCache.end())
return _impl->suppressedMessageCache[identifier];
bool suppressed = false;
for (size_t i = 0; i < _impl->suppressedMessages.size(); i++) {
std::string msg_id = _impl->suppressedMessages[i];
if (boost::iequals(msg_id, identifier)) {
suppressed = true;
break;
}
}
if (_impl->suppressedMessageCache.size() < 1000)
_impl->suppressedMessageCache[identifier] = suppressed;
return suppressed;
}
bool SessionLogger::isMessageForced(const char* identifier) const {
if (identifier == NULL) return false;
if (_impl->forcedMessageCache.find(identifier) != _impl->forcedMessageCache.end())
return _impl->forcedMessageCache[identifier];
bool forced = false;
for (size_t i = 0; i < _impl->forcedMessages.size(); i++) {
std::string msg_id = _impl->forcedMessages[i];
if (boost::iequals(msg_id, identifier)) {
forced = true;
break;
}
}
if (_impl->forcedMessageCache.size() < 1000)
_impl->forcedMessageCache[identifier] = forced;
return forced;
}
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(const char *s) {
if (_msg) *_msg << UnicodeUtil::toUTF16StdString(s, UnicodeUtil::REPLACE_ON_ERROR); return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(const wchar_t *s) {
if (_msg) *_msg << s; return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(const std::string &s) {
if (_msg) *_msg << UnicodeUtil::toUTF16StdString(s, UnicodeUtil::REPLACE_ON_ERROR); return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(const std::wstring &s) {
if (_msg) *_msg << s; return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(bool b) {
if (_msg) *_msg << b; return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(char c) {
if (_msg) *_msg << c; return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(short i) {
if (_msg) *_msg << i; return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(int i) {
if (_msg) *_msg << i; return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(long i) {
if (_msg) *_msg << i; return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(unsigned short i) {
if (_msg) *_msg << i; return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(unsigned int i) {
if (_msg) *_msg << i; return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(unsigned long i) {
if (_msg) *_msg << i; return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(unsigned long long i) {
if (_msg) *_msg << i; return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(double r) {
if (_msg) *_msg << r; return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(const UnrecoverableException &exc) {
if (dynamic_cast<const InternalInconsistencyException*>(&exc))
_level = INTERNAL_INCONSISTENCY_ERROR_LEVEL;
else if (dynamic_cast<const UnexpectedInputException*>(&exc))
_level = UNEXPECTED_INPUT_ERROR_LEVEL;
if (_msg) *_msg << exc.getSource() << ": " << exc.getMessage(); return *this;
}
// for ignoring std::endl
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(std::ostream&(*f)(std::ostream&)) {
return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(const std::ostringstream & ostr) {
return operator<<(ostr.str()); }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(const std::wostringstream & ostr) {
if (_msg) *_msg << ostr.str(); return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(const Symbol &s) {
if (_msg) *_msg << s.to_debug_string(); return *this; }
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::operator<<(const boost::gregorian::date &d) {
if (_msg) *_msg << UnicodeUtil::toUTF16StdString(boost::gregorian::to_iso_extended_string(d), UnicodeUtil::REPLACE_ON_ERROR); return *this; }
SessionLogger::LogMessageMaker::LogMessageMaker(LogLevel level):
_level(level), _msg(), _identifier(),
_stream_id(0), _add_trail_newline(true)
{
if (enabledAtLevel(NULL, level))
_msg.reset(_new std::wostringstream());
}
SessionLogger::LogMessageMaker::LogMessageMaker(const LogMessageMaker &other):
_level(other._level), _msg(other._msg), _identifier(other._identifier), _stream_id(other._stream_id),
_add_trail_newline(other._add_trail_newline) {}
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::with_id(const char* log_message_id) {
_identifier = log_message_id;
if ((!_msg) && enabledAtLevel(log_message_id, _level))
_msg.reset(_new std::wostringstream());
return *this;
}
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::with_stream_id(const char* stream_id) {
_stream_id = stream_id;
return *this;
}
SessionLogger::LogMessageMaker& SessionLogger::LogMessageMaker::add_trail_newline(bool add) {
_add_trail_newline = add;
return *this;
}
SessionLogger::LogMessageMaker::~LogMessageMaker() {
// If we're the last one with a pointer to this message, then report it
// before we destroy ourselves.
if (_msg.unique())
getLogger()->reportMessage(_level, _identifier, _stream_id, _msg->str(), _add_trail_newline);
}
// CONVENIENCE METHODS
// Example of usage:
// SessionLogger::info("patt_file_0") << "Reading pattern file " << filename << "\n";
// Note that they expect an ID to be defined. We want to encourage the use of IDs
// everywhere so that the corresponding messages can be suppressed, or the relevant code
// can be found via a search (which is easier than looking for the text of the message).
// We expect these four (which have short names) to be called frequently.
SessionLogger::LogMessageMaker SessionLogger::dbg(const char* log_message_id, const char* stream_id/*=NULL*/) {
return getLogger()->reportDebugMessage().with_id(log_message_id).with_stream_id(stream_id);}
SessionLogger::LogMessageMaker SessionLogger::info(const char* log_message_id, const char* stream_id/*=NULL*/) {
return getLogger()->reportInfoMessage().with_id(log_message_id).with_stream_id(stream_id);}
SessionLogger::LogMessageMaker SessionLogger::warn(const char* log_message_id, const char* stream_id/*=NULL*/) {
return getLogger()->reportWarning().with_id(log_message_id).with_stream_id(stream_id);}
SessionLogger::LogMessageMaker SessionLogger::err(const char* log_message_id, const char* stream_id/*=NULL*/) {
return getLogger()->reportError().with_id(log_message_id).with_stream_id(stream_id);}
// We expect these four (which have longer names) to be called less frequently.
SessionLogger::LogMessageMaker SessionLogger::warn_user(const char* log_message_id, const char* stream_id/*=NULL*/) {
return getLogger()->reportUserWarning().with_id(log_message_id).with_stream_id(stream_id); }
SessionLogger::LogMessageMaker SessionLogger::err_unexpected_input(const char* log_message_id, const char* stream_id/*=NULL*/) {
return getLogger()->reportUnexpectedInputError().with_id(log_message_id).with_stream_id(stream_id); }
SessionLogger::LogMessageMaker SessionLogger::err_internal_inconsistency(const char* log_message_id, const char* stream_id/*=NULL*/) {
return getLogger()->reportInternalInconsistencyError().with_id(log_message_id).with_stream_id(stream_id); }
bool SessionLogger::is_wcerr_synonym(const char * stream_id) {
if (stream_id == NULL) {
return false;
} else if (strcmp(stream_id, "cerr") == 0 ||
strcmp(stream_id, "wcerr") == 0 ||
strcmp(stream_id, "std::cerr") == 0 ||
strcmp(stream_id, "std::wcerr") == 0) {
return true;
} else {
return false;
}
}
bool SessionLogger::is_wcout_synonym(const char * stream_id) {
if (stream_id == NULL) {
return false;
} else if (strcmp(stream_id, "cout") == 0 ||
strcmp(stream_id, "wcout") == 0 ||
strcmp(stream_id, "std::cout") == 0 ||
strcmp(stream_id, "std::wcout") == 0) {
return true;
} else {
return false;
}
}
void SessionLogger::displayString(const std::string &msg, const char *stream_id) {
displayString(UnicodeUtil::toUTF16StdString(msg, UnicodeUtil::REPLACE_ON_ERROR), stream_id);
}
std::string SessionLogger::getType() {
return "SessionLogger";
}
|
{"hexsha": "9316c51f2dbb10065bf9db6a5a933af086781d53", "size": 29921, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/Generic/common/SessionLogger.cpp", "max_stars_repo_name": "BBN-E/serif", "max_stars_repo_head_hexsha": "1e2662d82fb1c377ec3c79355a5a9b0644606cb4", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-03-24T19:57:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T19:57:00.000Z", "max_issues_repo_path": "src/Generic/common/SessionLogger.cpp", "max_issues_repo_name": "BBN-E/serif", "max_issues_repo_head_hexsha": "1e2662d82fb1c377ec3c79355a5a9b0644606cb4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Generic/common/SessionLogger.cpp", "max_forks_repo_name": "BBN-E/serif", "max_forks_repo_head_hexsha": "1e2662d82fb1c377ec3c79355a5a9b0644606cb4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2134986226, "max_line_length": 155, "alphanum_fraction": 0.7207646803, "num_tokens": 7298}
|
#!/usr/bin/python2.7
# _*_ coding: utf-8 _*_
"""
@Author: MarkLiu
"""
import matplotlib.pyplot as plt
import numpy as np
def plotSigmoidTest():
"""
绘制Sigmoid函数
:return:
"""
figure = plt.figure(figsize=(10, 10), facecolor="white")
figure.clear()
pltaxes = plt.subplot(111)
num = np.linspace(-40, 40, 256, endpoint=True)
y = 1.0 / (1 + np.exp(-1 * num)) # 计算Sigmoid函数 y(x) = 1 / (1 + exp(-z))
pltaxes.plot(num, y, color="blue", linewidth=2.0, linestyle="-")
plt.xlim(-40, 40)
plt.savefig('plotSigmoidFunction.jpg')
plt.show()
if __name__ == '__main__':
plotSigmoidTest()
|
{"hexsha": "8a03b0a905311b08bfa08a95df8936db16c11e85", "size": 634, "ext": "py", "lang": "Python", "max_stars_repo_path": "LogisticRegression/plotSigmoid.py", "max_stars_repo_name": "SunnyLQ/Machine_Learning_In_Action_Algorithms", "max_stars_repo_head_hexsha": "8753e0798dd47a210f77306558185a54888e532d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2016-01-24T14:56:42.000Z", "max_stars_repo_stars_event_max_datetime": "2016-12-13T10:21:11.000Z", "max_issues_repo_path": "LogisticRegression/plotSigmoid.py", "max_issues_repo_name": "SunnyLQ/Machine_Learning_In_Action_Algorithms", "max_issues_repo_head_hexsha": "8753e0798dd47a210f77306558185a54888e532d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LogisticRegression/plotSigmoid.py", "max_forks_repo_name": "SunnyLQ/Machine_Learning_In_Action_Algorithms", "max_forks_repo_head_hexsha": "8753e0798dd47a210f77306558185a54888e532d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2016-03-06T06:57:15.000Z", "max_forks_repo_forks_event_max_datetime": "2016-03-06T06:57:15.000Z", "avg_line_length": 19.8125, "max_line_length": 76, "alphanum_fraction": 0.5993690852, "include": true, "reason": "import numpy", "num_tokens": 206}
|
import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestVectorBiFunctionStreamOp(unittest.TestCase):
def test_vectorbifunctionstreamop(self):
df = pd.DataFrame([
["1 2 3", "2 3 4"]
])
data = StreamOperator.fromDataframe(df, schemaStr="vec1 string, vec2 string")
VectorBiFunctionStreamOp() \
.setSelectedCols(["vec1", "vec2"]) \
.setBiFuncName("minus").setOutputCol("vec_minus").linkFrom(data).print();
StreamOperator.execute()
pass
|
{"hexsha": "7871115abccb4d410281a979c9859336fff20818", "size": 553, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/src/main/python/pyalink/alink/tests/examples/from_docs/test_vectorbifunctionstreamop.py", "max_stars_repo_name": "wenwei8268/Alink", "max_stars_repo_head_hexsha": "c00702538c95a32403985ebd344eb6aeb81749a7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/src/main/python/pyalink/alink/tests/examples/from_docs/test_vectorbifunctionstreamop.py", "max_issues_repo_name": "wenwei8268/Alink", "max_issues_repo_head_hexsha": "c00702538c95a32403985ebd344eb6aeb81749a7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/src/main/python/pyalink/alink/tests/examples/from_docs/test_vectorbifunctionstreamop.py", "max_forks_repo_name": "wenwei8268/Alink", "max_forks_repo_head_hexsha": "c00702538c95a32403985ebd344eb6aeb81749a7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5625, "max_line_length": 85, "alphanum_fraction": 0.6528028933, "include": true, "reason": "import numpy", "num_tokens": 135}
|
% !TEX root = Main.tex
\chapter{Change Log}
Changes in Bold were required by the Graduate School
\section{Changes in v1.15}
\begin{itemize}
\end{itemize}
\section{Changes in v1.14}
\begin{itemize}
\item{\bfseries Ensured double spacing in chapter titles.}
\item{\bfseries Removed extra space above chapter titles.}
\item{{\bfseries Removed extra space between chapters in list of figures and list of tables.} This is implemented as an option, you can add the space back (for unofficial copies) with the loftspacing option.}
\item{\bfseries Changed ``The University of Maine'' to ``the University of Maine'' in the auto-sentence of the author biography.}
\item{\bfseries Reduce space between title and author name on abstract pages.}
\item{\bfseries Remove “Chapter” heading from TOC.}
\item{Fixed problem with link target (when using hyperref) and page number in TOC for reference section.}
\item{\bfseries changed ``The University of Maine'' to ``the University of Maine'' in the auto-sentence of the author biography.}
\item{Improved capitalization enforcement for chapter headings and TOC entries. This should make redefining section names (as when using babel) much easier.}
\item{Dropped 2-volume support. (Graduate School no longer needs a printed copy of the thesis.)}
\item{Page number placement now controlled by a class option. It's also more consistent in its application as a result.}
\item{\bfseries Extra space before ``Chapter'' label in TOC has been removed.}
\item{``Chapter'' and ``Appendix'' in chapter headings is now printed in all uppercase.}
\item{\bfseries Tweaks to make 5-dot minimum in TOC leaders better respected. Thanks to pmbean6 for this fix.}
\item{\bfseries Use a pronoun instead of author name for the last sentence of author biography. Users now need to define their preferred pronoun with the \verb=\authorpronoun= command. Do not forget to capitalize the first letter of the pronoun. If no pronoun is provided, then the full author name will appear in the last sentence of the biography.}
\item{\bfseries When figure/table captions are too long to go into the table of contents, the graduate school wants the entry in the table of contents to match the first sentence of caption exactly. To facilitate this the \verb=\caption= command has been redefined so that the optional argument, if given, is automatically prepended to the caption text. Older theses, for which this new behavior would be undesirable, can turn it off with the legacycaptions option.}
\item{\bfseries On title page, when the name and title of an advisor or committee member is long enough to wrap to a second line, that second line will be indented 1.5em (the same as the indentation of a paragraph in the body of the thesis).}
\end{itemize}
\section{Changes in v1.13}
\begin{itemize}
\item{Short form of advisor's name can now be entered as an optional argument of \verb=\principaladvisor=.}
\item{Bugfix: idecimal and jdecimal heading styles were suppressing the section numbers. Thanks to pmbean6 for this fix.}
\item{Margin widths have been tweaked a little so that they more closely conform to the guidelines. Thanks to pmbean6 for this fix.}
\item{If you edited the class file to get justified text back, then subsection headings were being indented in jdecimal style. This has been fixed in preparation for later changes. Thanks to pmbean6 for this fix.}
\item{Package conflict with float package has been resolved. Thanks to pmbean6 for this fix. Those updating thesis should change listof environments to thesislist.}
\item{Bugfix: The default setting of \verb=\parindent= was being forced to 0, which was not as intended.}
\item{Indentation for the headings has been decoupled from \verb=\parindent= and is now tied to \verb=\headindent=.}
\item{Added some basic metadata (title and author) handling when hyperref is loaded. Thanks to pmbean6 for this enhancement}
\item{\bfseries Adjusted page numbering to account for removal of Dissertation Acceptance page.}
\end{itemize}
\section{Changes in v1.12}
\begin{itemize}
\item{\bfseries Eliminated Dissertation Acceptance and Library Rights Statement pages.}
\end{itemize}
\section{Changes in v1.11}
\begin{itemize}
\item{\bfseries Replaced ``thesis'' with \verb=\@type= on Library Rights page.}
\item{\textbf{Labels for signature lines now use the same size font as the rest of the thesis (they were formerly reduced).}}
\item{\textbf{Gap between the title and the text on Dissertation Acceptance and Library Rights page has been reduced.}}
\item{\textbf{Mandatory sentence at the end of the Author Biography (and which the class file produces automatically) is no longer its own paragraph.}}
\item{\textbf{The default headings system has been modified to make it match more closely with the justified decimal example in the Guidelines.}}
\item{Two additional headings systems (headings and idecimal) have been added. These are based on the headings and indented decimal examples in the Guidelines.}
\item{\bfseries Improved Widow/Orphan protection in the TOC.}
\item{\bfseries Improved Widow/Orphan protection in bibliography.}
\end{itemize}
\section{Changes in v1.10}
\begin{itemize}
\item{\textbf{Alignment of multi-line table of contents entries for Appendices altered}}
\item{5-dot leader minimum code reworked to be more robust}
\end{itemize}
\section{Changes in v1.9}
\begin{itemize}
\item{\textbf{Acceptance Page title consolidated to a single line.}}
\item{\textbf{Removed ``Submitted for graduation\ldots'' from Acceptance Page.}}
\end{itemize}
\section{Changes in v1.8}
\begin{itemize}
\item{\textbf{Hyphenation disabled.}}
\item{\textbf{Full justification disabled.}}
\end{itemize}
\section{Changes in v1.7}
\begin{itemize}
\item{Added \verb=\highlight= command.}
\item{Modifications to \verb=\pocket= to make its ToC entries match other chapter-level entries.}
\item{Added two-volume support.}
\item{Made some modifications to help with widow/orphan control in the ToC.}
\end{itemize}
\section{Changes in v1.6}
\begin{itemize}
\item{\textbf{Changed line length for multiple line entires in the ToC.}}
\item{\textbf{Removed the multiple appendices ``Appendices'' header from the ToC.}}
\item{Added twoside option.}
\item{Added unbound option.}
\item{Added hooks to alter heading styles.}
\item{Added chicago and apa option to switch headings automatically to the appropriate style.}
\end{itemize}
\section{Changes in v1.5}
\begin{itemize}
\item{License Changed to LPPL v1.3c.}
\item{Generalized Dissertation Acceptance Page.}
\item{Changed to signature line on Library Rights Page.}
\item{Fixed delimiter in figure and table captions.}
\item{Unified \verb=\copyrightyear{...}= and \verb=\copyrightpage= into single command.}
\item{Refined support for two advisors and number of committee members.}
\item{Removed support for External Reader on title page.}
\item{Created patch code to fix list of tables and list of figures when hyperref is used.}
\item{Added layabstract environment.}
\item{Added listof environment.}
\item{Changed font for verbatim environment and \verb=\verb= command.}
\item{Fixed typesetting of dedication.}
\item{General file maintenance.}
\item{Added insertion of ``Appendices'' to ToC when there are multiple appendices.}
\item{Modified biography environment to auto-generate the last sentence.}
\item{Made identification of number of advisors and committee members automatic.}
\item{Removed \verb=\appsection{...}= as it is redundant with \verb=\section*{...}=.}
\item{Changed way ``Chapters'' and ``Appendices'' are added to the TOC.}
\item{Added tocvsec2 dependance to make the change in TOC depth for the front matter and appendices automatic.}
\item{Modified preface environment to make the non-numbering of its sections, subsections, etc automatic.}
\item{Reserved \verb=\part= for multiple volume support.}
\item{Added \verb=\pocket=.}
\item{Defined a pseudo \verb=\texorpdfstring= command for use in chapter titles. When hyperref is loaded (and defines the command properly) this has the effect of hiding \verb=\MakeUppercase= commands from hyperref.}
\item{Made Preface, Dedication, and Acknowledgements double spaced.}
\item{Created type variables and commands that allows switching to ``thesis'' or ``project'' instead of ``dissertation.''}
\item{Removed footnote rule.}
\item{Renamed \verb=\labelchaptersintoc= to \verb=\toclabel=, generalized its function, and made it compatible with hyperref.}
\item{Added commands to compress title page when needed.}
\end{itemize}
\section{Changes prior to v1.5}
This list is not entirely complete but is a best reconstruction as I can manage. Changes were not logged prior to v1.5.
\begin{itemize}
\item{Added Dissertation Acceptance Page}
\item{Added support for 6 member committees}
\item{Removed Boldface from TOC entries}
\item{Reduced size of chapter and section headers to match text font, both in place and TOC entries}
\item{Added support for two advisors}
\end{itemize}
\endinput
|
{"hexsha": "f9f5a602be8a4ec1481e192d1b662f566415ab02", "size": 8976, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "Documentation/AppB.tex", "max_stars_repo_name": "rpspringuel/maine-thesis", "max_stars_repo_head_hexsha": "c0a4c2b600f25e9c33578fe07de1b271bef4a4b0", "max_stars_repo_licenses": ["LPPL-1.3c"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2017-08-01T15:49:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T22:10:27.000Z", "max_issues_repo_path": "Documentation/AppB.tex", "max_issues_repo_name": "rpspringuel/maine-thesis", "max_issues_repo_head_hexsha": "c0a4c2b600f25e9c33578fe07de1b271bef4a4b0", "max_issues_repo_licenses": ["LPPL-1.3c"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2016-10-14T02:25:49.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-24T17:20:47.000Z", "max_forks_repo_path": "Documentation/AppB.tex", "max_forks_repo_name": "rpspringuel/maine-thesis", "max_forks_repo_head_hexsha": "c0a4c2b600f25e9c33578fe07de1b271bef4a4b0", "max_forks_repo_licenses": ["LPPL-1.3c"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2016-10-08T03:37:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-27T17:06:28.000Z", "avg_line_length": 64.5755395683, "max_line_length": 468, "alphanum_fraction": 0.7803030303, "num_tokens": 2242}
|
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
\copyright Copyright (c) 2022 Visual Computing group of Ulm University,
Germany. See the LICENSE file at the top-level directory of
this distribution.
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import numpy as np
"""
These are all numpy functions.
"""
# lightspeed in nanoseconds
constants = {'lightspeed': 0.299792458}
# in GHz
constants['frequencies'] = np.array([20, 50, 70]) / 1e3
constants['frequencies_str'] = ['20MHz', '50MHz', '70MHz']
# in radians
constants['phase_offsets'] = [0, np.pi / 2, np.pi, 3 / 2 * np.pi]
# in nanoseconds
constants['exposure_time'] = 0.01 / constants['lightspeed']
def tof_depth_from_single_frequency(correlations, frequency, phase_offsets=[0, 120, 240]):
""" Computes depth from single frequency measurements. Contains phase wrapping.
Args:
correlations: `float` of shape `[B, H, W, P]`.
frequency: `float` in GHz.
phase_offsets: `float` of shape '[P]` in degree.
Returns:
`float` of shape `[B, H, W]`.
"""
phase_offsets = (np.array(phase_offsets) / 180 * np.pi).reshape([1, 1, 1, -1])
I = np.sum(-np.sin(phase_offsets) * correlations, axis=-1)
Q = np.sum(np.cos(phase_offsets) * correlations, axis=-1)
delta_phi = np.arctan2(I, Q)
# resolve to positive domain
delta_phi[delta_phi < 0] += 2 * np.pi
#print('phase ', delta_phi)
depth = constants['lightspeed'] / (4 * np.pi * frequency) * delta_phi
return depth
def correlation2depth(correlations, frequency):
""" Computes ToF depth from intensity images. (in meter [m])
Loops around at `1/2 * f`
Optimized version for four phase measurements at 90° step offsets.
Args:
correlations: `floats` of shape `[B, H, W, 4]`.
ordered with offsets `[0°, 90°, 180°, 270°]`
frequency: `float` in GHz
Returns:
`floats` of shape `[B, H, W, 1]`
"""
# phase offset on light path
delta_phi = np.arctan2(correlations[:, :, :, 3] - correlations[:, :, :, 1], correlations[:, :, :, 0] - correlations[:, :, :, 2])
# resolve to positive domain
delta_phi[delta_phi < 0] += 2 * np.pi
#print('phase ', delta_phi)
tof_depth = constants['lightspeed'] / (4 * np.pi * frequency) * delta_phi
return np.expand_dims(tof_depth, axis=-1)
def compute_points_from_depth(depth, fov=[65, 65], return_rays=False):
""" Projects 2.5D depths from camera coordinates to global coordinates.
Args:
depth: `float` shape `[B, H, W, 1]`
fov: `float`, shape `[2]`, field of view of the camera in angles for height and width.
Returns:
points: shape `[B, H, W, 3]`
rays: shape `[1, H, W, 3]`
"""
fov_u = fov[0]
fov_quot = fov[1] / fov[0]
fov_u = fov_u / 180 * np.pi
# B = depth.shape[0]
H = depth.shape[1]
W = depth.shape[2]
u, v = np.meshgrid(
np.linspace(-1, 1, H),
np.linspace(-fov_quot, fov_quot, W),
indexing='ij')
w = np.ones(u.shape) * 1 / np.tan(fov_u / 2)
p = np.stack((u, v, w), axis=-1)
p = np.expand_dims(p, axis=0)
norm_p = np.linalg.norm(p, axis=-1, keepdims=True)
points = p * depth / norm_p
if return_rays:
rays = p / norm_p
return points, rays
return points
def global_depth_to_camera_depth(points):
""" Projects 3D depths from global coordinates to camera coordinates in 2.5D.
Args:
points: `float` shape `[B, H, W, 3]`
Returns:
depths: `float` shape `[B, H, W, 1]`
"""
return np.linalg.norm(points, axis=-1, keepdims=True)
def camera_rays(shape, fov=[65, 65]):
""" normalized camera ray directions
Args:
shape: `ints` of shape `[B, H, W]`.
fov: `float`, field of view of the camera in angles.
Returns:
A `float` `np.array`of shape `[1, H, W, 3]`.
"""
B, H, W = shape
fov_u = fov[0]
fov_quot = fov[1] / fov[0]
fov_u = fov_u / 180 * np.pi
u, v = np.meshgrid(
np.linspace(-1, 1, H),
np.linspace(-fov_quot, fov_quot, W),
indexing='ij')
w = np.ones(u.shape) * 1 / np.tan(fov_u / 2)
p = np.stack((u, v, w), axis=-1)
norm_p = np.linalg.norm(p, axis=-1, keepdims=True)
rays = p / norm_p
return rays
def depth_on_pixels(depth):
""" Turns 2.5D into points using pixel ids as coordinates.
For 3D Plotting
"""
H = depth.shape[0]
W = depth.shape[1]
x, y = np.meshgrid(np.arange(0, H), np.arange(0, W), indexing='ij')
z = depth
# x = x.reshape(-1)
# y = y.reshape(-1)
# z = z.reshape(-1)
return np.stack((x, y, z), axis=-1)
def reconstruct_correlations(amplitudes, intensities, tof_depths, frequency, phase_offsets=[0, 90, 180, 270]):
"""
Args:
amplitudes: `float` of shape `[B, H, W]`.
intensities: `float` of shape `[B, H, W]`.
tof_depths: `float` of shape `[B, H, W]`.
frequenciy: 'float` in GHz.
phase_offsets: `floats` of shape `[4]`.
Returns:
`float` array of shape `[B, H, W, 4]`
"""
amplitudes = np.expand_dims(amplitudes, axis=-1)
intensities = np.expand_dims(intensities, axis=-1)
tof_depths = np.expand_dims(tof_depths, axis=-1)
phase_offsets = (np.array(phase_offsets, dtype=np.float32) * np.pi / 180).reshape([1, 1, 1, -1])
delta_phi = (4 * np.pi * frequency) / constants['lightspeed'] * tof_depths
delta_phi[delta_phi < 0] += 2 * np.pi
return intensities + amplitudes * np.cos(delta_phi + phase_offsets)
def amplitude_and_intensity_from_correlation(corr):
""" Computes amplitude and intensities for correlations measured at [0, 90, 180, 270] degrees.
Args:
corr: leading dimension is `4`.
Returns:
amp: shape of corr, except for first dimension.
int: shape of corr, except for first dimension.
"""
amp = 0.5 * np.sqrt((corr[0] - corr[2])**2 + (corr[1] - corr[3])**2)
int = np.sum(corr, axis=0) / 4
return amp, int
def amplitude_and_intensity_from_correlationv2(correlations, phase_offsets):
"""
Args:
correlations: shape `[B, H, W, P]`
Returns:
amplitudes: shape `[B, H, W]`
intensities: shape `[B, H, W]`
"""
phase_offsets = np.reshape(phase_offsets, [1, 1, 1, -1])
I = np.sum(np.sin(phase_offsets) * correlations, axis=3)
Q = np.sum(np.cos(phase_offsets) * correlations, axis=3)
amplitudes = 0.5 * np.sqrt(I**2 + Q**2)
intensities = np.sum(correlations, axis=3) / len(phase_offsets)
return amplitudes, intensities
if __name__ == '__main__':
import imageio
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--f')
args = parser.parse_args()
if not args.f.endswith('hdr'):
print(args.f)
raise ValueError('invalid file format')
im = imageio.imread(args.f, format='HDR-FI')
i = 1
stride = 5
fig = plt.figure(figsize=plt.figaspect(0.5))
ax = fig.add_subplot(1, 2, 1, projection='3d')
points = depth_on_pixels(im[:, :, i])
points = np.reshape(points[::stride, ::stride], [-1, 3])
ax.scatter(points[:, 0], points[:, 2], -points[:, 1], c=points[:, 2])
ax.set_title('camera coordinates')
ax = fig.add_subplot(1, 2, 2, projection='3d')
depth = im[:, :, i].reshape([1, 600, 600, 1])
points = compute_points_from_depth(depth)[0]
points_subset = np.reshape(points[::stride, ::stride], [-1, 3])
ax.scatter(points_subset[:, 0], points_subset[:, 2], -points_subset[:, 1], c=points_subset[:, 2])
ax.set_title('global coordinates')
plt.show()
|
{"hexsha": "a33ef6817df7facc4b4fbc0f1896881e1ba700bd", "size": 7371, "ext": "py", "lang": "Python", "max_stars_repo_path": "code_dl/data_ops/geom_ops_numpy.py", "max_stars_repo_name": "schellmi42/RADU", "max_stars_repo_head_hexsha": "60b86743bb1e6896d1ae7729a19bb263256fd28f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code_dl/data_ops/geom_ops_numpy.py", "max_issues_repo_name": "schellmi42/RADU", "max_issues_repo_head_hexsha": "60b86743bb1e6896d1ae7729a19bb263256fd28f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code_dl/data_ops/geom_ops_numpy.py", "max_forks_repo_name": "schellmi42/RADU", "max_forks_repo_head_hexsha": "60b86743bb1e6896d1ae7729a19bb263256fd28f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0538116592, "max_line_length": 130, "alphanum_fraction": 0.6251526252, "include": true, "reason": "import numpy", "num_tokens": 2381}
|
(** * Wand: The Magic Wand Operator *)
Set Implicit Arguments.
From SLF Require Import LibSepReference.
From SLF Require Repr.
Close Scope trm_scope.
Implicit Types h : heap.
Implicit Types P : Prop.
Implicit Types H : hprop.
Implicit Types Q : val->hprop.
(* ################################################################# *)
(** * First Pass *)
(** This chapter introduces an additional Separation Logic operator,
called the "magic wand", and written [H1 \-* H2].
This operator has multiple use:
- it helps reformulate the consequence-frame rule in an improved manner,
through a rule called the "ramified frame rule",
- it helps stating the weakest-preconditions of a number of
languages constructs in a concise manner,
- it can be useful to state specification for certain data structures.
This chapter is organized as follows:
- definition and properties of the magic wand operator,
- generalization of the magic wand to postconditions,
- statement and benefits of the ramified frame rule,
- statement of the ramified frame rule in weakest-precondition style,
- generalized definition of [wpgen] that recurses in local functions.
The addition and bonus section includes further discussion, including:
- presentation of alternative, equivalent definitions of the magic wand,
- statement and proofs of additional properties of the magic wand,
- a revised definition of [mkstruct] using the magic wand.
- "Texan triples", which express function specifications using the
magic wand instead of using triples,
- two other operators, disjunction and non-separating conjunction,
so as to complete the presentation of all Separation Logic operators. *)
(* ================================================================= *)
(** ** Intuition for the Magic Wand *)
(** The magic wand operation [H1 \-* H2], to be read "H1 wand H2",
defines a heap predicate such that, if we extend it with [H1],
we obtain [H2]. Formally, the following entailment holds:
H1 \* (H1 \-* H2) ==> H2.
Intuitively, if one can think of the star [H1 \* H2] as the addition
[H1 + H2], then one can think of [H1 \-* H2] as the subtraction
[-H1 + H2]. The entailment stated above essentially captures the idea
that [(-H1 + H2) + H1] simplifies to [H2].
Note, however, that the operation [H1 \-* H2] only makes sense if [H1]
describes a piece of heap that "can" be subtracted from [H2]. Otherwise,
the predicate [H1 \-* H2] characterizes a heap that cannot possibly exist.
Informally speaking, [H1] must somehow be a subset of [H2] for the
subtraction [-H1 + H2] to make sense. *)
(* ================================================================= *)
(** ** Definition of the Magic Wand *)
Module WandDef.
(** Technically, [H1 \-* H2] holds of a heap [h] if, for any heap
[h'] disjoint from [h] and that satisfies [H1], the union
of [h] and [h'] satisfies [H2].
The operator [hwand], which implements the notation [H1 \-* H2],
may thus be defined as follows. *)
Definition hwand' (H1 H2:hprop) : hprop :=
fun h => forall h', Fmap.disjoint h h' -> H1 h' -> H2 (h \u h').
(** The definition above is perfectly fine, however it is more practical
to use an alternative, equivalent definition of [hwand], expressed
in terms of previously introduced Separation Logic operators.
The alternative definition asserts that [H1 \-* H2] corresponds to
some heap predicate, called [H0], such that [H0] starred with [H1]
yields [H2]. In other words, [H0] is such that [(H1 \* H0) ==> H2].
In the definition of [hwand] shown below, observe how [H0] is
existentially quantified. *)
Definition hwand (H1 H2:hprop) : hprop :=
\exists H0, H0 \* \[ H1 \* H0 ==> H2 ].
Notation "H1 \-* H2" := (hwand H1 H2) (at level 43, right associativity).
(** As we establish further in this file, one can prove that [hwand]
and [hwand'] define the same operator.
The reason we prefer taking [hwand] as definition rather than [hwand']
is that it enables us to establish all the properties of the magic wand
by exploiting the tactic [xsimpl], conducting all the reasoning at the
level of [hprop] rather than having to work with explicit heaps of type
[heap]. *)
(* ================================================================= *)
(** ** Characteristic Property of the Magic Wand *)
(** The magic wand is not so easy to make sense of, at first. Reading
its introduction and elimination rules may help further appreciate
its meaning.
The operator [H1 \-* H2] satisfies the following equivalence.
Informally speaking, think of [H0 = -H1+H2] and [H1+H0 = H2]
being equivalent. *)
Lemma hwand_equiv : forall H0 H1 H2,
(H0 ==> H1 \-* H2) <-> (H1 \* H0 ==> H2).
Proof using.
unfold hwand. iff M.
{ xchange M. intros H N. xchange N. }
{ xsimpl H0. xchange M. }
Qed.
(** It turns out that the magic wand operator is uniquely defined by the
equivalence [(H0 ==> H1 \-* H2) <-> (H1 \* H0 ==> H2)].
In other words, as we establish further on, any operator that satisfies
the above equivalence for all arguments is provably equal to [hwand]. *)
(** The right-to-left direction of the equivalence is an introduction rule:
it tells what needs to be proved for constructing a magic wand [H1 \-* H2]
from a state [H0]. What needs to be proved to establish [H0 ==> (H1 \-* H2)]
is that [H0], when starred with [H1], yields [H2]. *)
Lemma himpl_hwand_r : forall H0 H1 H2,
(H1 \* H0) ==> H2 ->
H0 ==> (H1 \-* H2).
Proof using. introv M. applys hwand_equiv. applys M. Qed.
(** The left-to-right direction of the equivalence is an elimination rule:
it tells what can be deduced from an entailment [H0 ==> (H1 \-* H2)].
What can be deduced from this entailment is that if [H0] is starred
with [H1], then [H2] can be recovered. *)
Lemma himpl_hwand_r_inv : forall H0 H1 H2,
H0 ==> (H1 \-* H2) ->
(H1 \* H0) ==> H2.
Proof using. introv M. applys hwand_equiv. applys M. Qed.
(** This elimination rule can be equivalently reformulated in the following
form, which makes clearer that [H1 \-* H2], when starred with [H1],
yields [H2]. *)
Lemma hwand_cancel : forall H1 H2,
H1 \* (H1 \-* H2) ==> H2.
Proof using. intros. applys himpl_hwand_r_inv. applys himpl_refl. Qed.
Arguments hwand_cancel : clear implicits.
(** **** Exercise: 3 stars, standard, especially useful (hwand_inv)
Prove the following inversion lemma for [hwand]. This lemma
essentially captures the fact that [hwand] entails its alternative
definition [hwand']. *)
Lemma hwand_inv : forall h1 h2 H1 H2,
(H1 \-* H2) h2 ->
H1 h1 ->
Fmap.disjoint h1 h2 ->
H2 (h1 \u h2).
Proof using. (* FILL IN HERE *) Admitted.
(** [] *)
(* ================================================================= *)
(** ** Magic Wand for Postconditions *)
(** In what follows, we generalize the magic wand to operate on postconditions,
introducing a heap predicate of the form [Q1 \--* Q2], of type [hprop].
Note that the entailment between two postconditions produces a heap
predicate, and not a postcondition.
The definition follows exactly the same pattern as [hwand]: it quantifies
some heap predicate [H0] such that [H0] starred with [Q1] yields [Q2]. *)
Definition qwand (Q1 Q2:val->hprop) : hprop :=
\exists H0, H0 \* \[ Q1 \*+ H0 ===> Q2 ].
Notation "Q1 \--* Q2" := (qwand Q1 Q2) (at level 43).
(** The operator [qwand] satisfies essentially the same properties as [hwand].
Let us begin with the associated equivalence rule, which captures both
the introduction and the elimination rule. *)
Lemma qwand_equiv : forall H Q1 Q2,
H ==> (Q1 \--* Q2) <-> (Q1 \*+ H) ===> Q2.
Proof using.
unfold qwand. iff M.
{ intros v. xchange M. intros H4 N. xchange N. }
{ xsimpl H. xchange M. }
Qed.
(** The cancellation rule follows. *)
Lemma qwand_cancel : forall Q1 Q2,
Q1 \*+ (Q1 \--* Q2) ===> Q2.
Proof using. intros. rewrite <- qwand_equiv. applys qimpl_refl. Qed.
(** An interesting property of [qwand] is the fact that we can specialize
[Q1 \--* Q2] to [(Q1 v) \-* (Q2 v)], for any value [v]. *)
Lemma qwand_specialize : forall (v:val) (Q1 Q2:val->hprop),
(Q1 \--* Q2) ==> (Q1 v \-* Q2 v).
Proof using.
intros. unfold qwand, hwand. xpull. intros H0 M.
xsimpl H0. xchange M.
Qed.
(* ================================================================= *)
(** ** Frame Expressed with [hwand]: the Ramified Frame Rule *)
(** Recall the consequence-frame rule, which is pervasively used
for example by the tactic [xapp] for reasoning about applications. *)
Parameter triple_conseq_frame : forall H2 H1 Q1 t H Q,
triple t H1 Q1 ->
H ==> H1 \* H2 ->
Q1 \*+ H2 ===> Q ->
triple t H Q.
(** This rule suffers from a practical issue, which we illustrate in
details on a concrete example further on. For now, let us just
attempt to describe the issue at a high-level.
In short, the problem stems from the fact that we need to instantiate
[H2] for applying the rule. Providing [H2] by hand is not practical, thus
we need to infer it. The value of [H2] can be computed as the subtraction
of [H] minus [H1]. The resulting value may then exploited in the last
premise for constructing [Q1 \*+ H2]. This transfer of information via [H2]
from one subgoal to another can be obtained by introducing an "evar" (Coq
unification variable) for [H2]. However this approach does not work
well in the cases where [H] contains existential quantifiers. Indeed,
such existential quantifiers are typically first extracted out of the
entailment [H ==> H1 \* H2] by the tactic [xsimpl]. However, these
existentially quantified variables are not in the scope of [H2], hence
the instantiation of the evar associated with [H2] typically fails. *)
(** The "ramified frame rule" exploits the magic wand operator to circumvent
the problem, by merging the two premises [H ==> H1 \* H2] and
[Q1 \*+ H2 ===> Q] into a single premise that no longer mentions [H2].
This replacement premise is [H ==> H1 \* (Q1 \--* Q)]. To understand where
it comes from, observe first that the second premise [Q1 \*+ H2 ===> Q]
is equivalent to [H2 ==> (Q1 \--* Q)]. By replacing [H2] with [Q1 \--* Q]
inside the first premise [H ==> H1 \* H2], we obtain the new premise
[H ==> H1 \* (Q1 \--* Q)].
This merging of the two entailments leads us to the statement of the
"ramified frame rule" shown below. *)
Lemma triple_ramified_frame : forall H1 Q1 t H Q,
triple t H1 Q1 ->
H ==> H1 \* (Q1 \--* Q) ->
triple t H Q.
Proof using.
introv M W. applys triple_conseq_frame (Q1 \--* Q) M.
{ applys W. } { applys qwand_cancel. }
Qed.
(** Reciprocally, we can prove that the ramified frame rule entails
the consequence-frame rule. Hence, the ramified frame rule has
the same expressive power as the consequence-frame rule. *)
Lemma triple_conseq_frame_of_ramified_frame : forall H2 H1 Q1 t H Q,
triple t H1 Q1 ->
H ==> H1 \* H2 ->
Q1 \*+ H2 ===> Q ->
triple t H Q.
Proof using.
introv M WH WQ. applys triple_ramified_frame M.
xchange WH. xsimpl. rewrite qwand_equiv. applys WQ.
Qed.
(* ================================================================= *)
(** ** Ramified Frame Rule in Weakest-Precondition Style *)
(** The ramified frame rule, which we have just stated for triples,
features a counterpart expressed in weakest-precondition style ([wp]).
In what follows, we present the "wp ramified rule", named [wp_ramified].
This rule admits a concise statement and subsumes all other
structural rules of Separation Logic. Its statement is as follows.
(wp t Q1) \* (Q1 \--* Q2) ==> (wp t Q2).
*)
(** To see where this statement comes from, recall from the chapter
[WPsem] the rule named [wp_conseq_frame], which combines
the consequence rule and the frame rule for [wp]. *)
Parameter wp_conseq_frame : forall t H Q1 Q2,
Q1 \*+ H ===> Q2 ->
(wp t Q1) \* H ==> (wp t Q2).
(** Let us reformulate this rule using a magic wand. The premise
[Q1 \*+ H ===> Q2] can be rewritten as [H ==> (Q1 \--* Q2)].
By replacing [H] with [Q1 \--* Q2] in the conclusion of
[wp_conseq_frame], we obtain the ramified rule for [wp]. *)
Lemma wp_ramified : forall t Q1 Q2,
(wp t Q1) \* (Q1 \--* Q2) ==> (wp t Q2).
Proof using. intros. applys wp_conseq_frame. applys qwand_cancel. Qed.
(** **** Exercise: 3 stars, standard, especially useful (wp_conseq_frame_of_wp_ramified)
Prove that [wp_conseq_frame] is derivable from [wp_ramified].
To that end, prove the statement of [wp_conseq_frame] by using
only [wp_ramified], the characteristic property of the magic
wand [qwand_equiv], and properties of the entailment relation. *)
Lemma wp_conseq_frame_of_wp_ramified : forall t H Q1 Q2,
Q1 \*+ H ===> Q2 ->
(wp t Q1) \* H ==> (wp t Q2).
Proof using. (* FILL IN HERE *) Admitted.
(** [] *)
(** The following reformulation of [wp_ramified] can be more practical
to exploit in practice, because it applies to any goal of the form
[H ==> wp t Q]. *)
Lemma wp_ramified_trans : forall t H Q1 Q2,
H ==> (wp t Q1) \* (Q1 \--* Q2) ->
H ==> (wp t Q2).
Proof using. introv M. xchange M. applys wp_ramified. Qed.
End WandDef.
(* ================================================================= *)
(** ** Automation with [xsimpl] for [hwand] Expressions *)
(** One can extend the tactic [xsimpl] to recognize the magic wand,
and automatically perform a number of obvious simplifications.
This extension is implemented in the file [LibSepSimpl], which
exports the tactic [xsimpl] illustrated in this section. *)
Module XsimplDemo.
(** [xsimpl] is able to spot a magic wand that cancels out.
For example, if an iterated separating conjunction includes
both [H2 \-* H3] and [H2], then these two heap predicates can
be merged, leaving just [H3]. *)
Lemma xsimpl_demo_hwand_cancel : forall H1 H2 H3 H4 H5,
H1 \* (H2 \-* H3) \* H4 \* H2 ==> H5.
Proof using. intros. xsimpl. Abort.
(** [xsimpl] is able to simplify uncurried magic wands.
For example, if an iterated separating conjunction includes
[(H1 \* H2 \* H3) \-* H4] and [H2], the two predicates can be
merged, leaving [(H1 \* H3) \-* H4]. *)
Lemma xsimpl_demo_hwand_cancel_partial : forall H1 H2 H3 H4 H5 H6,
((H1 \* H2 \* H3) \-* H4) \* H5 \* H2 ==> H6.
Proof using. intros. xsimpl. Abort.
(** [xsimpl] automatically applies the introduction rule [himpl_hwand_r]
when the right-hand-side, after prior simplification, reduces to
just a magic wand. In the example below, [H1] is first cancelled out
from both sides, then [H3] is moved from the RHS to the LHS. *)
Lemma xsimpl_demo_himpl_hwand_r : forall H1 H2 H3 H4 H5,
H1 \* H2 ==> H1 \* (H3 \-* (H4 \* H5)).
Proof using. intros. xsimpl. Abort.
(** [xsimpl] can iterate a number of simplifications involving
different magic wands. *)
Lemma xsimpl_demo_hwand_iter : forall H1 H2 H3 H4 H5,
H1 \* H2 \* ((H1 \* H3) \-* (H4 \-* H5)) \* H4 ==> ((H2 \-* H3) \-* H5).
Proof using. intros. xsimpl. Qed.
(** [xsimpl] is also able to deal with the magic wand for postconditions.
In particular, it is able to merge [Q1 \--* Q2] with [Q1 v],
leaving [Q2 v]. *)
Lemma xsimpl_demo_qwand_cancel : forall v (Q1 Q2:val->hprop) H1 H2,
(Q1 \--* Q2) \* H1 \* (Q1 v) ==> H2.
Proof using. intros. xsimpl. Abort.
End XsimplDemo.
(* ================================================================= *)
(** ** Evaluation of [wpgen] Recursively in Locally Defined Functions *)
Module WPgenRec.
Implicit Types vx vf : val.
(** Recall from chapter [WPgen] the original definition of [wpgen],
that is, before numerous refactoring. It admitted the following shape.
Fixpoint wpgen (t:trm) (Q:val->hprop) : hprop :=
match t with
| trm_val v => Q v
| trm_fun x t1 => Q (val_fun x t1)
| trm_fix f x t1 => Q (val_fix f x t1)
...
end.
This definition of [wpgen t Q] does not recurse inside the body
of functions that occur in the argument [t]. Instead, it treats
such locally defined functions just like values. *)
(** Not processing local functions does not limit expressiveness, because
it is always possible for the user to manually invoke [wpgen]
for each locally defined function, during the verification proof.
Nevertheless, it is much more satisfying and much more practical
to set up [wpgen] so that it recursively computes the weakest
precondition of all the local functions that it encounters during
its evaluation.
In what follows, we show how such a version of [wpgen] can be set up.
We begin with the case of non-recursive functions of the form
[trm_fun x t1], then generalize the definition to the slightly more complex
case of recursive functions of the form [trm_fix f x t1]. In both cases,
the function [wpgen] will get recursively invoked on the body [t1]
of the function, in a context extended with the appropriate bindings.
The new definition of [wpgen] will take the shape shown below, for
well-suited definitions of [wpgen_fun] and [wpgen_fix] yet to be
introduced. In the code snippet below, [vx] denotes a value to
which the function may be applied, and [vf] denotes the value
associated with the function itself, this value being used in particular
in the case of recursive calls.
Fixpoint wpgen (E:ctx) (t:trm) : formula :=
mkstruct match t with
| trm_val v => wpgen_val v
| trm_fun x t1 => wpgen_fun (fun vx => wpgen ((x,vx)::E) t1)
| trm_fix f x t1 => wpgen_fix (fun vf vx => wpgen ((f,vf)::(x,vx)::E) t1)
...
end.
*)
(* ----------------------------------------------------------------- *)
(** *** 1. Treatment of Non-Recursive Functions *)
(** For simplicity, let us assume for now the substitution context [E] to be
empty, and let us ignore the presence of the predicate [mkstruct].
Our goal task is to provide a definition for [wpgen (trm_fun x t1) Q],
expressed in terms of [wpgen t1].
Let [vf] denote the function [val_fun x t1], which the term [trm_fun x t1]
evaluates to. The heap predicate [wpgen (trm_fun x t1) Q] should
assert that that the postcondition [Q] holds of the result value [vf],
i.e., that [Q vf] should hold.
Rather than specifying that [vf] is equal to [val_fun x t1] as we were
doing previously, we would like to specify that [vf] denotes a function
whose body admits [wpgen t1] as weakest precondition. This information
no longer exposes the syntax of the term [t1], and is nevertheless
sufficient for the user to reason about the behavior of the function [vf].
To that end, we define the heap predicate [wpgen (trm_fun x t1) Q] to
be of the form [\forall vf, \[P vf] \-* Q vf] for a carefully crafted
predicate [P] that describes the behavior of the function by means of its
weakest precondition. [P] is defined further on.
The universal quantification on [vf] is intended to hide away from the
user the fact that [vf] actually denotes [val_fun x t1]. It would be
correct to replace [\forall vf, ...] with [let vf := val_fun x t1 in ...],
yet we are purposely trying to abstract away from the syntax of [t1], hence
the universal quantification of [vf].
In the heap predicate [\forall vf, \[P vf] \-* Q vf], the magic wand
features a left-hand side of the form [\[P vf]] is intended to provide
to the user the knowledge of the weakest precondition of the body [t1] of
the function, in such a way that the user is able to derive the
specification aimed for the local function [vf].
Concretely, the proposition [P vf] should enable the user establishing
properties of applications of the form [trm_app vf vx], where [vf] denotes
the function and [vx] denotes an argument to which the function is applied.
To figure out the details of the statement of [P], it is useful to recall
from chapter [WPgen] the statement of the lemma
[triple_app_fun_from_wpgen], which we used for reasoning about top-level
functions. Its statement appears below---variables were renamed to better
match the current context. *)
Parameter triple_app_fun_from_wpgen : forall vf vx x t1 H' Q',
vf = val_fun x t1 ->
H' ==> wpgen ((x,vx)::nil) t1 Q' ->
triple (trm_app vf vx) H' Q'.
(** The lemma above enables establishing a triple for an application
[trm_app vf vx] with precondition [H'] and postcondition [Q'],
from the premise [H' ==> wgen ((x,vx)::nil) t1 Q'].
It therefore makes sense, in our definition of the predicate
[wpgen (trm_fun x t1) Q], which we said would take the form
[\forall vf, \[P vf] \-* Q vf], to define [P vf] as:
forall vx H' Q', (H' ==> wpgen ((x,vx)::nil) t1 Q') ->
triple (trm_app vf vx) H' Q'
This proposition can be slightly simplified, by using [wp] instead
of [triple], allowing to eliminate [H']. We thus define [P vf] as:
forall vx H', wpgen ((x,vx)::nil) t1 Q' ==> wp (trm_app vf vx) Q'
*)
(** Overall, the definition of [wpgen E t] is as follows. Note that
the occurence of [nil] is replaced with [E] to account for the
case of a nonempty context.
Fixpoint wpgen (E:ctx) (t:trm) : formula :=
mkstruct match t with
...
| trm_fun x t1 => fun Q =>
let P vf :=
(forall vx H', wpgen ((x,vx)::nil) t1 Q' ==> wp (trm_app vf vx) Q') in
\forall vf, \[P vf] \-* Q vf
...
end.
*)
(** The actual definition of [wpgen] exploits an intermediate definition
called [wpgen_fun], as shown below:
Fixpoint wpgen (E:ctx) (t:trm) : formula :=
mkstruct match t with
...
| trm_fun x t1 => wpgen_fun (fun vx => wpgen ((x,vx)::E) t1)
...
end.
where [wpgen_fun] is defined as follows:
*)
Definition wpgen_fun (Fof:val->formula) : formula := fun Q =>
\forall vf, \[forall vx Q', Fof vx Q' ==> wp (trm_app vf vx) Q'] \-* Q vf.
(** The soundness lemma for this construct follows from the wp-style
reasoning rule for applications, called [wp_app_fun], introduced in
chapter [WPsem]. It is not needed to follow at this stage through
the details of the proof. (The proof involves lemmas about [\forall]
and about [\-*] that are stated and proved only further on in this
chapter.) *)
Lemma wpgen_fun_sound : forall x t1 Fof,
(forall vx, formula_sound (subst x vx t1) (Fof vx)) ->
formula_sound (trm_fun x t1) (wpgen_fun Fof).
Proof using.
introv M. intros Q. unfolds wpgen_fun. applys himpl_hforall_l (val_fun x t1).
xchange hwand_hpure_l.
{ intros. applys himpl_trans_r. { applys* wp_app_fun. } { applys* M. } }
{ applys wp_fun. }
Qed.
(** When we carry out the proof of soundness for the new version of [wpgen]
that features [wpgen_fun], we obtain the following new proof obligation.
(To see it, play the proof of lemma [wpgen_sound],
in file [LibSepDirect.v].) *)
Lemma wpgen_fun_proof_obligation : forall E x t1,
(forall E, formula_sound (isubst E t1) (wpgen E t1)) ->
formula_sound (trm_fun x (isubst (rem x E) t1))
(wpgen_fun (fun v => wpgen ((x,v)::E) t1)).
(** The proof exploits the lemma [wpgen_fun_sound] that we just established,
as well as a substitution lemma, the same as the one used to justify the
soundness of the [wpgen] of a let-binding. *)
Proof using.
introv IH. applys wpgen_fun_sound.
{ intros vx. rewrite <- isubst_rem. applys IH. }
Qed.
(* ----------------------------------------------------------------- *)
(** *** 2. Treatment of Recursive Functions *)
(** The formula produced by [wpgen] for a recursive function [trm_fix f x t1]
is almost the same as for a non-recursive function, the main difference
being that we need to add a binding in the context to associate the name [f]
of the recursive function with the value [vf] that corresponds to the
recursive function.
Here again, the heap predicate [wpgen (trm_fun x t1) Q] will be of the
form [\forall vf, \[P vf] \-* Q vf].
To figure out the details of the statement of [P], recall from [WPgen]
the statement of [triple_app_fix_from_wpgen], which is useful for reasoning
about top-level recursive functions. *)
Parameter triple_app_fix_from_wpgen : forall vf vx f x t1 H' Q',
vf = val_fix f x t1 ->
H' ==> wpgen ((f,vf)::(x,vx)::nil) t1 Q' ->
triple (trm_app vf vx) H' Q'.
(** It therefore makes sense to define [P vf] as:
forall vx H' Q', (H' ==> wpgen ((f,vf)::(x,vx)::nil) t1 Q') ->
triple (trm_app vf vx) H' Q'
which can be rewritten as:
forall vx H', wpgen ((f,vf)::(x,vx)::nil) t1 Q' ==> wp (trm_app vf vx) Q'
We thus consider:
Fixpoint wpgen (E:ctx) (t:trm) : formula :=
mkstruct match t with
| ..
| trm_fix f x t1 => wpgen_fix (fun vf v => wpgen ((f,vf)::(x,v)::E) t1)
| ..
end
with the following definition for [wpgen_fix]. *)
Definition wpgen_fix (Fof:val->val->formula) : formula := fun Q =>
\forall vf, \[forall vx Q', Fof vf vx Q' ==> wp (trm_app vf vx) Q'] \-* Q vf.
(** The soundness lemma for [wpgen_fix] is very similar to that of [wpgen_fun].
Again, it is not needed to follow through the details of this proof. *)
Lemma wpgen_fix_sound : forall f x t1 Fof,
(forall vf vx, formula_sound (subst x vx (subst f vf t1)) (Fof vf vx)) ->
formula_sound (trm_fix f x t1) (wpgen_fix Fof).
Proof using.
introv M. intros Q. unfolds wpgen_fix.
applys himpl_hforall_l (val_fix f x t1). xchange hwand_hpure_l.
{ intros. applys himpl_trans_r. { applys* wp_app_fix. } { applys* M. } }
{ applys wp_fix. }
Qed.
(** The proof of soundness of [wpgen] involves the following proof obligation
to handle the case of recursive functions. (To see it, play the proof of
lemma [wpgen_sound], in file [LibSepDirect.v].) *)
Lemma wpgen_fix_proof_obligation : forall E f x t1,
(forall E, formula_sound (isubst E t1) (wpgen E t1)) ->
formula_sound (trm_fix f x (isubst (rem x (rem f E)) t1))
(wpgen_fix (fun vf vx => wpgen ((f,vf)::(x,vx)::E) t1)).
Proof using.
introv IH. applys wpgen_fix_sound.
{ intros vf vx. rewrite <- isubst_rem_2. applys IH. }
Qed.
(** Here again, we introduce a piece of notation for [wpgen_fix]. We let
[Fix' f x := F] stand for [wpgen_fix (fun f x => F)]. *)
Notation "'Fix'' f x ':=' F1" :=
((wpgen_fix (fun f x => F1)))
(at level 69, f ident, x ident, right associativity,
format "'[v' '[' 'Fix'' f x ':=' F1 ']' ']'").
(** Remark: similarly to [xfun], one could devise a [xfix] tactic.
We omit the details. *)
(* ----------------------------------------------------------------- *)
(** *** 3. Final Definition of [wpgen], with Processing a Local Functions *)
(** The final definition of [wpgen] appears below. *)
Fixpoint wpgen (E:ctx) (t:trm) : formula :=
mkstruct match t with
| trm_val v => wpgen_val v
| trm_var x => wpgen_var E x
| trm_fun x t1 => wpgen_fun (fun v => wpgen ((x,v)::E) t1)
| trm_fix f x t1 => wpgen_fix (fun vf v => wpgen ((f,vf)::(x,v)::E) t1)
| trm_if t0 t1 t2 => wpgen_if t0 (wpgen E t1) (wpgen E t2)
| trm_seq t1 t2 => wpgen_seq (wpgen E t1) (wpgen E t2)
| trm_let x t1 t2 => wpgen_let (wpgen E t1) (fun v => wpgen ((x,v)::E) t2)
| trm_app t1 t2 => wp (isubst E t)
end.
(** The full soundness proof appears in file [LibSepDirect], lemma
[wpgen_sound]. *)
(* ----------------------------------------------------------------- *)
(** *** 4. Notation and Tactic to Handle [wpgen_fun] and [wpgen_fix] *)
(** Like for other auxiliary functions associated with [wpgen], we introduce
a custom notation for [wpgen_fun]. Here, we let [Fun' x := F] stand for
[wpgen_fun (fun x => F)]. *)
Notation "'Fun'' x ':=' F1" :=
((wpgen_fun (fun x => F1)))
(at level 69, x ident, right associativity,
format "'[v' '[' 'Fun'' x ':=' F1 ']' ']'").
(** Also, like for other language constructs, we introduce a custom tactic
for [wpgen_fun]. It is called [xfun], and helps the user to process a
local function definition in the course of a verification script.
The tactic [xfun] can be invoked either with or without providing a
specification for the local function. *)
(** First, we describe the tactic [xfun S], where [S] describes the
specification of the local function. A typical call is of the form
[xfun (fun (f:val) => forall ..., triple (f ..) .. ..)].
The tactic [xfun S] generates two subgoals. The first one requires the
user to establish the specification [S] for the function whose body admits
the weakest precondition [Fof]. The second one requires the user to prove
that the rest of the program is correct, in a context where the local
function can be assumed to satisfy the specification [S].
The definition of [xfun S] appears next. It is not required to understand
the details. An example use case appears further on. *)
Lemma xfun_spec_lemma : forall (S:val->Prop) H Q Fof,
(forall vf,
(forall vx H' Q', (H' ==> Fof vx Q') -> triple (trm_app vf vx) H' Q') ->
S vf) ->
(forall vf, S vf -> (H ==> Q vf)) ->
H ==> wpgen_fun Fof Q.
Proof using.
introv M1 M2. unfold wpgen_fun. xsimpl. intros vf N.
applys M2. applys M1. introv K. rewrite <- wp_equiv. xchange K. applys N.
Qed.
Tactic Notation "xfun" constr(S) :=
xseq_xlet_if_needed; xstruct_if_needed; applys xfun_spec_lemma S.
(** Second, we describe the tactic [xfun] without argument. It applies to a goal
of the form [H ==> wpgen_fun Fof Q]. The tactic [xfun] simply makes
available an hypothesis about the local function. The user may subsequently
exploit this hypothesis for reasoning about a call to that function, just
like if the code of the function was inlined at its call site. The use of
[xfun] without argument is usually relevant only for local functions that
are invoked exactly once (as is often the case for functions passed to
higher-order iterators). Again, an example use case appears further on. *)
Lemma xfun_nospec_lemma : forall H Q Fof,
(forall vf,
(forall vx H' Q', (H' ==> Fof vx Q') -> triple (trm_app vf vx) H' Q') ->
(H ==> Q vf)) ->
H ==> wpgen_fun Fof Q.
Proof using.
introv M. unfold wpgen_fun. xsimpl. intros vf N. applys M.
introv K. rewrite <- wp_equiv. xchange K. applys N.
Qed.
Tactic Notation "xfun" :=
xseq_xlet_if_needed; xstruct_if_needed; applys xfun_nospec_lemma.
(** A generalization of [xfun] that handles recursive functions may be defined
following exactly the same pattern. *)
(** This completes our presentation of a version of [wpgen] that recursively
processes the local definition of non-recursive functions. An practical
example is presented next. *)
(* ----------------------------------------------------------------- *)
(** *** 5. Example Computation of [wpgen] in Presence of a Local Function *)
(** In the example that follows, we assume all the set up from [WPgen] to
be reproduced with the definition of [wpgen] that leverages [wpgen_fun]
and [wpgen_fix]. This set up is formalized in full in the file
[LibSepDirect]. *)
Import DemoPrograms.
(** Consider the following example program, which involves a local function
definition, then two successive calls to that local function. *)
Definition myfun : val :=
<{ fun 'p =>
let 'f = (fun_ 'u => incr 'p) in
'f ();
'f () }>.
(** We first illustrate a call to [xfun] with an explicit specification.
Here, the function [f] is specified as incrementing the reference [p].
Observe that the body function of the function [f] is verified only
once. The reasoning about the two calls to the function [f] that appears
in the code are done with respect to the specification that we provide
as argument to [xfun] at the moment of the definition of [f]. *)
Lemma triple_myfun : forall (p:loc) (n:int),
triple (trm_app myfun p)
(p ~~> n)
(fun _ => p ~~> (n+2)).
Proof using.
xwp.
xfun (fun (f:val) => forall (m:int),
triple (f())
(p ~~> m)
(fun _ => p ~~> (m+1))); intros f Hf.
{ intros. applys Hf. clear Hf. xapp. (* exploits [triple_incr] *) xsimpl. }
xapp. (* exploits [Hf]. *)
xapp. (* exploits [Hf]. *)
replace (n+1+1) with (n+2); [|math]. xsimpl.
Qed.
(** We next illustrate a call to [xfun] without argument. The "generic
specification" that comes as hypothesis about the local function
is a proposition that describes the behavior of the function in terms
of the weakest-precondition of its body.
When reasoning about a call to the function, one can invoke this
generic specification. The effect is equivalent to that of inlining
the code of the function at its call site.
Here, there are two calls to the function. We will thus have to exploit
twice the generic specification of [f], which corresponds to its body
[incr p]. We will therefore have to reason twice about the increment
function. *)
Lemma triple_myfun' : forall (p:loc) (n:int),
triple (trm_app myfun p)
(p ~~> n)
(fun _ => p ~~> (n+2)).
Proof using.
xwp.
xfun; intros f Hf.
xapp. (* exploits [Hf] *)
xapp. (* exploits [triple_incr] *)
xapp. (* exploits [Hf] *)
xapp. (* exploits [triple_incr] *)
replace (n+1+1) with (n+2); [|math]. xsimpl.
Qed.
End WPgenRec.
(* ################################################################# *)
(** * More Details *)
(* ================================================================= *)
(** ** Benefits of the Ramified Rule over the Consequence-Frame Rule *)
(** Earlier on, we sketched an argument claiming that the consequence-
frame rule is not very well suited for carrying out proofs in
practice, due to issues with working with evars for instantiating
the heap predicate [H2] in the rule. Let us come back to this point
and describe the issue in depth on a concrete example, and show
how the ramified frame rule smoothly handles that same example. *)
Module WandBenefits.
Import WandDef.
(** Recall the consequence-frame rule. *)
Parameter triple_conseq_frame : forall H2 H1 Q1 t H Q,
triple t H1 Q1 ->
H ==> H1 \* H2 ->
Q1 \*+ H2 ===> Q ->
triple t H Q.
(** One practical caveat with this rule is that we must resolve [H2],
which corresponds to the difference between [H] and [H1].
In practice, providing [H2] explicitly is extremely tedious.
The alternative is to leave [H2] as an evar, and count on the
fact that the tactic [xsimpl], when applied to [H ==> H1 \* H2],
will correctly instantiate [H2].
This approach works in simple cases, but fails in particular in
the case where [H] contains an existential quantifier.
For a concrete example, consider the specification of the
function [ref], which allocates a reference. *)
Parameter triple_ref : forall (v:val),
triple (val_ref v)
\[]
(funloc p => p ~~> v).
(** Assume that wish to derive the following triple, which extends
both the precondition and the postcondition of the above specification
[triple_ref] with the heap predicate [\exists l' v', l' ~~> v'].
This predicate describes the existence of some, totally unspecified,
reference cell. It is a bit artificial but illustrates well the issue. *)
Lemma triple_ref_extended : forall (v:val),
triple (val_ref v)
(\exists l' v', l' ~~> v')
(funloc p => p ~~> v \* \exists l' v', l' ~~> v').
(** Let us prove that this specification is derivable from the
original one, namely [triple_ref]. *)
Proof using.
intros. applys triple_conseq_frame.
(* observe the evar [?H2] that appears in the second and third subgoals *)
{ applys triple_ref. }
{ (* here, [?H2] should be in theory instantiated with the RHS.
but [xsimpl] strategy is to first extract the quantifiers
from the LHS. After that, the instantiation of [?H2] fails,
because the LHS contains variables that are not defined in
the scope of the evar [?H2] at the time it was introduced. *)
xsimpl.
Abort.
(** Now, let us apply the ramified frame rule to carry out the same
proof, and observe how the problem does not show up. *)
Lemma triple_ref_extended' : forall (v:val),
triple (val_ref v)
(\exists l' v', l' ~~> v')
(funloc p => p ~~> v \* \exists l' v', l' ~~> v').
Proof using.
intros. applys triple_ramified_frame.
{ applys triple_ref. }
{ xsimpl.
(* Here again, [xsimpl] strategy works on the LHS, and pulls out
the existentially quantified variables. But it works here
because the remaining of the reasoning takes place in the
same subgoal, in the scope of the extended Coq context. *)
intros l' v'. rewrite qwand_equiv. xsimpl. auto. }
Qed.
End WandBenefits.
(* ================================================================= *)
(** ** Properties of [hwand] *)
Module WandProperties.
Import WandDef.
(** We next present the most important properties of [H1 \-* H2].
Thereafter, the tactic [xsimpl] is accessible, but in a form
that does not provide support for the magic wand.
The actual tactic would trivially solve many of these lemmas,
but using it would be cheating because the implementation of
[xsimpl] relies on several of these lemmas. *)
(* ----------------------------------------------------------------- *)
(** *** Structural Properties of [hwand] *)
(** The operator [H1 \-* H2] is contravariant in [H1] and covariant
in [H2], similarly to the implication operator [->]. *)
Lemma hwand_himpl : forall H1 H1' H2 H2',
H1' ==> H1 ->
H2 ==> H2' ->
(H1 \-* H2) ==> (H1' \-* H2').
Proof using.
introv M1 M2. applys himpl_hwand_r. xchange M1.
xchange (hwand_cancel H1 H2). applys M2.
Qed.
(** Two predicates [H1 \-* H2] ans [H2 \-* H3] may simplify
to [H1 \-* H3]. This simplification is reminiscent of the
arithmetic operation [(-H1 + H2) + (-H2 + H3) = (-H1 + H3)]. *)
Lemma hwand_trans_elim : forall H1 H2 H3,
(H1 \-* H2) \* (H2 \-* H3) ==> (H1 \-* H3).
Proof using.
intros. applys himpl_hwand_r. xchange (hwand_cancel H1 H2).
Qed.
(** The predicate [H \-* H] holds of the empty heap.
Intuitively, one can rewrite [0] as [-H + H]. *)
Lemma himpl_hempty_hwand_same : forall H,
\[] ==> (H \-* H).
Proof using. intros. apply himpl_hwand_r. xsimpl. Qed.
(* ----------------------------------------------------------------- *)
(** *** Tempting Yet False Properties for [hwand] *)
(** The reciprocal entailement [(H \-* H) ==> \[]] is false, however.
For a counterexample, instantiate [H] as [fun h => True], or,
equivalently, [\exists H', H']. A singleton heap does satisfy
[H \-* H], although it clearly does not satisfy the empty
predicate [\[]]. *)
Lemma himpl_hwand_same_hempty_counterexample : forall p v,
let H := (\exists H', H') in
(p ~~> v) ==> (H \-* H).
Proof using. intros. subst H. rewrite hwand_equiv. xsimpl. Qed.
(** In technical terms, [H \-* H] characterizes the empty heap only
in the case where [H] is "precise", that is, when it describes
a heap of a specific shape. In the above counterexample, [H] is
clearly not precise, because it is satisfied by heaps of any shape.
The notion of "preciseness" can be defined formally, yet it is
out of the scope of this course. *)
(** As another tempting yet false property of the magic wand,
consider the reciprocal entailment to the cancellation lemma,
that is, [H2 ==> H1 \* (H1 \-* H2)]. It does not hold in general.
As counter-example, consider [H2 = \[]] and [H1 = \[False]].
We can prove that the empty heap does not satisfies
[\[False] \* (\[False] \-* \[])]. *)
Lemma not_himpl_hwand_r_inv_reciprocal : exists H1 H2,
~ (H2 ==> H1 \* (H1 \-* H2)).
Proof using.
exists \[False] \[]. intros N. forwards K: N (Fmap.empty:heap).
applys hempty_intro. rewrite hstar_hpure_l in K. destruct K. auto.
Qed.
(** More generally, one has to be suspicious of any entailment
that introduces wands "out of nowhere".
The entailment [hwand_trans_elim]:
[(H1 \-* H2) \* (H2 \-* H3) ==> (H1 \-* H3)]
is correct because, intuitively, the left-hand-side captures
that [H1 <= H2] and that [H1 <= H3] for some vaguely defined
notion of [<=] as "being a subset of". From that, we can derive
[H1 <= H3], and justify that the right-hand-side makes sense.
On the contrary, the reciprocal entailment
[(H1 \-* H3) ==> (H1 \-* H2) \* (H2 \-* H3)]
is certainly false, because from [H1 <= H3] there is no way
to justify [H1 <= H2] nor [H2 <= H3]. *)
(* ----------------------------------------------------------------- *)
(** *** Interaction of [hwand] with [hempty] and [hpure] *)
(** The heap predicate [\[] \-* H] is equivalent to [H]. *)
Lemma hwand_hempty_l : forall H,
(\[] \-* H) = H.
Proof using.
intros. unfold hwand. xsimpl.
{ intros H0 M. xchange M. }
{ xsimpl. }
Qed.
(** The lemma above shows that the empty predicate [\[]] can
be removed from the LHS of a magic wand.
More generally, a pure predicate [\[P]] can be removed from
the LHS of a magic wand, as long as [P] is true. Formally: *)
Lemma hwand_hpure_l : forall P H,
P ->
(\[P] \-* H) = H.
Proof using.
introv HP. unfold hwand. xsimpl.
{ intros H0 M. xchange M. applys HP. }
{ xpull. auto. }
Qed.
(** Reciprocally, to prove that a heap satisfies [\[P] \-* H],
it suffices to prove that this heap satisfies [H] under the
assumption that [P] is true. Formally: *)
Lemma himpl_hwand_hpure_r : forall H1 H2 P,
(P -> H1 ==> H2) ->
H1 ==> (\[P] \-* H2).
Proof using. introv M. applys himpl_hwand_r. xsimpl. applys M. Qed.
(** **** Exercise: 2 stars, standard, optional (himpl_hwand_hpure_lr)
Prove that [\[P1 -> P2]] entails [\[P1] \-* \[P2]]. *)
Lemma himpl_hwand_hpure_lr : forall (P1 P2:Prop),
\[P1 -> P2] ==> (\[P1] \-* \[P2]).
Proof using. (* FILL IN HERE *) Admitted.
(** [] *)
(* ----------------------------------------------------------------- *)
(** *** Interaction of [hwand] with [hstar] *)
(** An interesting property is that arguments on the LHS of a magic
wand can equivalently be "curried" or "uncurried", just like a
function of type "(A * B) -> C" is equivalent to a function of
type "A -> B -> C".
The heap predicates [(H1 \* H2) \-* H3] and [H1 \-* (H2 \-* H3)]
and [H2 \-* (H1 \-* H3)] are all equivalent. Intuitively, they all
describe the predicate [H3] with the missing pieces [H1] and [H2].
The equivalence between the uncurried form [(H1 \* H2) \-* H3]
and the curried form [H1 \-* (H2 \-* H3)] is formalized by the
lemma shown below. The third form, [H2 \-* (H1 \-* H3)], follows
from the commutativity property [H1 \* H2 = H2 \* H1]. *)
Lemma hwand_curry_eq : forall H1 H2 H3,
(H1 \* H2) \-* H3 = H1 \-* (H2 \-* H3).
Proof using.
intros. applys himpl_antisym.
{ apply himpl_hwand_r. apply himpl_hwand_r.
xchange (hwand_cancel (H1 \* H2) H3). }
{ apply himpl_hwand_r. xchange (hwand_cancel H1 (H2 \-* H3)).
xchange (hwand_cancel H2 H3). }
Qed.
(** Another interesting property is that the RHS of a magic wand
can absorb resources that the magic wand is starred with.
Concretely, from [(H1 \-* H2) \* H3], one can get the predicate
[H3] to be absorbed by the [H2] in the magic wand, yielding
[H1 \-* (H2 \* H3)].
One way to read this: "if you own [H3] and, when given [H1]
you own [H2], then, when given [H1], you own both [H2] and [H3]." *)
Lemma hstar_hwand : forall H1 H2 H3,
(H1 \-* H2) \* H3 ==> H1 \-* (H2 \* H3).
Proof using.
intros. applys himpl_hwand_r. xsimpl. xchange (hwand_cancel H1 H2).
Qed.
(** Remark: the reciprocal entailment is false: it is not possible
to extract a heap predicate out of the LHS of an entailment.
Indeed, that heap predicate might not exist if it is mentioned
in the RHS of the magic wand. *)
(** **** Exercise: 1 star, standard, especially useful (himpl_hwand_hstar_same_r)
Prove that [H1] entails [H2 \-* (H2 \* H1)]. *)
Lemma himpl_hwand_hstar_same_r : forall H1 H2,
H1 ==> (H2 \-* (H2 \* H1)).
Proof using. (* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard, especially useful (hwand_cancel_part)
Prove that [H1 \* ((H1 \* H2) \-* H3)] simplifies to [H2 \-* H3]. *)
Lemma hwand_cancel_part : forall H1 H2 H3,
H1 \* ((H1 \* H2) \-* H3) ==> (H2 \-* H3).
Proof using. (* FILL IN HERE *) Admitted.
(** [] *)
End WandProperties.
(* ################################################################# *)
(** * Optional Material *)
(* ================================================================= *)
(** ** Equivalence Between Alternative Definitions of the Magic Wand *)
Module HwandEquiv.
Implicit Type op : hprop->hprop->hprop.
(** In what follows we prove the equivalence between the three
characterizations of [hwand H1 H2] that we have presented:
1. The definition [hwand'], expressed directly in terms of heaps:
[fun h => forall h', Fmap.disjoint h h' -> H1 h' -> H2 (h' \u h)]
2. The definition [hwand], expressed in terms of existing operators:
[\exists H0, H0 \* \[ (H1 \* H0) ==> H2]]
3. The characterization via the equivalence [hwand_equiv]:
[forall H0 H1 H2, (H0 ==> H1 \-* H2) <-> (H1 \* H0 ==> H2)].
4. The characterization via the pair of the introduction rule
[himpl_hwand_r] and the elimination rule [hwand_cancel].
To prove the 4-way equivalence, we first prove the equivalence
between (1) and (2), then prove the equivalence between (2) and (3),
and finally the equivalence between (3) and (4).
*)
Definition hwand_characterization_1 (op:hprop->hprop->hprop) :=
op = (fun H1 H2 =>
(fun h => forall h', Fmap.disjoint h h' -> H1 h' -> H2 (h' \u h))).
Definition hwand_characterization_2 (op:hprop->hprop->hprop) :=
op = (fun H1 H2 => \exists H0, H0 \* \[ H1 \* H0 ==> H2 ]).
Definition hwand_characterization_3 (op:hprop->hprop->hprop) :=
forall H0 H1 H2, (H0 ==> op H1 H2) <-> (H1 \* H0 ==> H2).
Definition hwand_characterization_4 (op:hprop->hprop->hprop) :=
(forall H0 H1 H2, (H1 \* H0 ==> H2) -> (H0 ==> op H1 H2))
/\ (forall H1 H2, (H1 \* (op H1 H2) ==> H2)).
Lemma hwand_characterization_1_eq_2 :
hwand_characterization_1 = hwand_characterization_2.
Proof using.
applys pred_ext_1. intros op.
unfold hwand_characterization_1, hwand_characterization_2.
asserts K: (forall A B, A = B -> (op = A <-> op = B)).
{ intros. iff; subst*. } apply K; clear K.
apply pred_ext_3. intros H1 H2 h. iff M.
{ exists (=h). rewrite hstar_hpure_r. split.
{ auto. }
{ intros h3 K3. rewrite hstar_comm in K3.
destruct K3 as (h1&h2&K1&K2&D&U). subst h1 h3.
rewrites (>> union_comm_of_disjoint D). applys M D K2. } }
{ (* This direction reproduces the proof of [hwand_inv]. *)
intros h1 D K1. destruct M as (H0&M).
destruct M as (h0&h2&K0&K2&D'&U).
lets (N&E): hpure_inv (rm K2). subst h h2.
rewrite Fmap.union_empty_r in *.
applys N. applys hstar_intro K1 K0. applys disjoint_sym D. }
Qed.
Lemma hwand_characterization_2_eq_3 :
hwand_characterization_2 = hwand_characterization_3.
Proof using.
applys pred_ext_1. intros op.
unfold hwand_characterization_2, hwand_characterization_3. iff K.
{ subst. intros. (* apply hwand_equiv. *) iff M.
{ xchange M. intros H3 N. xchange N. }
{ xsimpl H0. xchange M. } }
{ apply fun_ext_2. intros H1 H2. apply himpl_antisym.
{ lets (M&_): (K (op H1 H2) H1 H2). xsimpl (op H1 H2).
applys M. applys himpl_refl. }
{ xsimpl. intros H0 M. rewrite K. applys M. } }
Qed.
Lemma hwand_characterization_3_eq_4 :
hwand_characterization_3 = hwand_characterization_4.
Proof using.
applys pred_ext_1. intros op.
unfold hwand_characterization_3, hwand_characterization_4. iff K.
{ split.
{ introv M. apply <- K. apply M. }
{ intros. apply K. auto. } }
{ destruct K as (K1&K2). intros. split.
{ introv M. xchange M. xchange (K2 H1 H2). }
{ introv M. applys K1. applys M. } }
Qed.
End HwandEquiv.
(* ================================================================= *)
(** ** Operator [hforall] *)
Module NewQwand.
Export WandDef.
(** In the beginning of this chapter, we defined [qwand] following the pattern
of [hwand], as [ \exists H0, H0 \* \[ Q1 \*+ H0 ==> Q2 ] ].
An alternative approach consists of defining [qwand] in terms of [hwand].
This alternative definition involves the universal quantifier for heap
predicates, written [\forall x, H]. The universal quantifier is the
counterpart of the existential quantifier [\exists x, H].
Using the [\forall] quantifier, we may define [Q1 \--* Q2] as the heap
predicate [\forall v, (Q1 v) \-* (Q2 v)]. *)
(** Let us first formalize the definition of the universal quantifier on
[hprop]. Technically, a heap predicate of the form [\forall x, H] stands for
[hforall (fun x => H)], where the definition of [hforall] follows the
exact same pattern as for [hexists]. The definition shown below is somewhat
technical---details may be safely skipped over. *)
Definition hforall (A : Type) (J : A -> hprop) : hprop :=
fun h => forall x, J x h.
Notation "'\forall' x1 .. xn , H" :=
(hforall (fun x1 => .. (hforall (fun xn => H)) ..))
(at level 39, x1 binder, H at level 50, right associativity,
format "'[' '\forall' '/ ' x1 .. xn , '/ ' H ']'").
(** The introduction and elimination rule for [hforall] are as follows. *)
Lemma hforall_intro : forall A (J:A->hprop) h,
(forall x, J x h) ->
(hforall J) h.
Proof using. introv M. applys* M. Qed.
Lemma hforall_inv : forall A (J:A->hprop) h,
(hforall J) h ->
forall x, J x h.
Proof using. introv M. applys* M. Qed.
(** The introduction rule in an entailement for [\forall] appears below.
To prove that a heap satisfies [\forall x, J x], one must show that,
for any [x], this heap satisfies [J x]. *)
Lemma himpl_hforall_r : forall A (J:A->hprop) H,
(forall x, H ==> J x) ->
H ==> (\forall x, J x).
Proof using. introv M. intros h K x. apply~ M. Qed.
(** The elimination rule in an entailment for [\forall] appears below.
Assuming a heap satisfies [\forall x, J x], one can derive that the
same heap satisfies [J v] for any desired value [v]. *)
Lemma hforall_specialize : forall A (v:A) (J:A->hprop),
(\forall x, J x) ==> (J v).
Proof using. intros. intros h K. apply* K. Qed.
(** This last lemma can equivalently be formulated in the following way,
which makes it easier to apply in some cases. *)
Lemma himpl_hforall_l : forall A (v:A) (J:A->hprop) H,
J v ==> H ->
(\forall x, J x) ==> H.
Proof using. introv M. applys himpl_trans M. applys hforall_specialize. Qed.
(** Universal quantifers that appear in the precondition of a triple
may be specialized, just like those in the left-hand side of an
entailment. *)
Lemma triple_hforall : forall A (v:A) t (J:A->hprop) Q,
triple t (J v) Q ->
triple t (\forall x, J x) Q.
Proof.
introv M. applys triple_conseq M.
{ applys hforall_specialize. }
{ applys qimpl_refl. }
Qed.
(* ================================================================= *)
(** ** Alternative Definition of [qwand] *)
Declare Scope qwand_scope.
Open Scope qwand_scope.
(** We are now read to state the alternative definition of [Q1 \--* Q2],
as the heap predicate [\forall v, (Q1 v) \-* (Q2 v)]. *)
Definition qwand (Q1 Q2:val->hprop) : hprop :=
\forall v, (Q1 v) \-* (Q2 v).
Notation "Q1 \--* Q2" := (qwand Q1 Q2) (at level 43) : qwand_scope.
(** Let us establish the properties of the new definition of [qwand].
We begin by the specialization lemma, which asserts that [Q1 \--* Q2]
can be specialized to [(Q1 v) \-* (Q2 v)] for any value [v]. This
result is a direct consequence of the corresponding specialization
property of [\forall]. *)
Lemma qwand_specialize : forall (v:val) (Q1 Q2:val->hprop),
(Q1 \--* Q2) ==> (Q1 v \-* Q2 v).
Proof using.
intros. unfold qwand. applys himpl_hforall_l v. xsimpl.
Qed.
(** We next prove the equivalence rule. *)
Lemma qwand_equiv : forall H Q1 Q2,
H ==> (Q1 \--* Q2) <-> (Q1 \*+ H) ===> Q2.
Proof using.
intros. iff M.
{ intros x. xchange M. xchange (qwand_specialize x).
xchange (hwand_cancel (Q1 x)). }
{ applys himpl_hforall_r. intros x. applys himpl_hwand_r.
xchange (M x). }
Qed.
(** The cancellation rule follows. *)
Lemma qwand_cancel : forall Q1 Q2,
Q1 \*+ (Q1 \--* Q2) ===> Q2.
Proof using. intros. rewrite <- qwand_equiv. applys qimpl_refl. Qed.
(** Like [H1 \-* H2], the operation [Q1 \--* Q2] is contravariant in [Q1]
and covariant in [Q2]. *)
Lemma qwand_himpl : forall Q1 Q1' Q2 Q2',
Q1' ===> Q1 ->
Q2 ===> Q2' ->
(Q1 \--* Q2) ==> (Q1' \--* Q2').
Proof using.
introv M1 M2. rewrite qwand_equiv. intros x.
xchange (qwand_specialize x). xchange M1.
xchange (hwand_cancel (Q1 x)). xchange M2.
Qed.
(** Like [H1 \-* H2], the operation [Q1 \--* Q2] can absorb in its RHS
resources to which it is starred. *)
Lemma hstar_qwand : forall Q1 Q2 H,
(Q1 \--* Q2) \* H ==> Q1 \--* (Q2 \*+ H).
Proof using.
intros. rewrite qwand_equiv. xchange (@qwand_cancel Q1).
Qed.
(** **** Exercise: 1 star, standard, especially useful (himpl_qwand_hstar_same_r)
Prove that [H] entails [Q \--* (Q \*+ H)]. *)
Lemma himpl_qwand_hstar_same_r : forall H Q,
H ==> Q \--* (Q \*+ H).
Proof using. (* FILL IN HERE *) Admitted.
(** [] *)
(** **** Exercise: 2 stars, standard, optional (qwand_cancel_part)
Prove that [H \* ((Q1 \*+ H) \--* Q2)] simplifies to [Q1 \--* Q2].
Hint: use [xchange]. *)
Lemma qwand_cancel_part : forall H Q1 Q2,
H \* ((Q1 \*+ H) \--* Q2) ==> (Q1 \--* Q2).
Proof using. (* FILL IN HERE *) Admitted.
(** [] *)
(** ** Equivalence between Alternative Definitions of the Magic Wand
for Postconditions *)
Module QwandEquiv.
Implicit Type op : (val->hprop)->(val->hprop)->hprop.
(** In what follows we prove the equivalence between five equivalent
characterizations of [qwand H1 H2]:
1. The definition expressed directly in terms of heaps:
[fun h => forall v h', Fmap.disjoint h h' -> Q1 v h' -> Q2 v (h \u h')]
2. The definition [qwand], expressed in terms of existing operators:
[\exists H0, H0 \* \[ (Q1 \*+ H0) ===> Q2]]
3. The definition expressed using the universal quantifier:
[\forall v, (Q1 v) \-* (Q2 v)]
4. The characterization via the equivalence [hwand_equiv]:
[forall H0 H1 H2, (H0 ==> H1 \-* H2) <-> (H1 \* H0 ==> H2)].
5. The characterization via the pair of the introduction rule
[himpl_qwand_r] and the elimination rule [qwand_cancel].
The proof are essentially identical to the equivalence proofs for [hwand],
except for definition (3), which is specific to [qwand].
*)
Definition qwand_characterization_1 op :=
op = (fun Q1 Q2 => (fun h => forall v h', Fmap.disjoint h h' ->
Q1 v h' -> Q2 v (h \u h'))).
Definition qwand_characterization_2 op :=
op = (fun Q1 Q2 => \exists H0, H0 \* \[ Q1 \*+ H0 ===> Q2 ]).
Definition qwand_characterization_3 op :=
op = (fun Q1 Q2 => \forall v, (Q1 v) \-* (Q2 v)).
Definition qwand_characterization_4 op :=
forall H0 Q1 Q2, (H0 ==> op Q1 Q2) <-> (Q1 \*+ H0 ===> Q2).
Definition qwand_characterization_5 op :=
(forall H0 Q1 Q2, (Q1 \*+ H0 ===> Q2) -> (H0 ==> op Q1 Q2))
/\ (forall Q1 Q2, (Q1 \*+ (op Q1 Q2) ===> Q2)).
Lemma hwand_characterization_1_eq_2 :
qwand_characterization_1 = qwand_characterization_2.
Proof using.
applys pred_ext_1. intros op.
unfold qwand_characterization_1, qwand_characterization_2.
asserts K: (forall A B, A = B -> (op = A <-> op = B)).
{ intros. iff; subst*. } apply K; clear K.
apply pred_ext_3. intros Q1 Q2 h. iff M.
{ exists (=h). rewrite hstar_hpure_r. split.
{ auto. }
{ intros v h3 K3. rewrite hstar_comm in K3.
destruct K3 as (h1&h2&K1&K2&D&U). subst h1 h3. applys M D K2. } }
{ intros v h1 D K1. destruct M as (H0&M).
destruct M as (h0&h2&K0&K2&D'&U).
lets (N&E): hpure_inv (rm K2). subst h h2.
rewrite Fmap.union_empty_r in *.
applys N. rewrite hstar_comm. applys hstar_intro K0 K1 D. }
Qed.
Lemma qwand_characterization_2_eq_3 :
qwand_characterization_2 = qwand_characterization_3.
Proof using.
applys pred_ext_1. intros op.
unfold qwand_characterization_2, qwand_characterization_3.
asserts K: (forall A B, A = B -> (op = A <-> op = B)).
{ intros. iff; subst*. } apply K; clear K.
apply fun_ext_2. intros Q1 Q2. apply himpl_antisym.
{ xpull. intros H0 M. applys himpl_hforall_r. intros v.
rewrite hwand_equiv. xchange M. }
{ xsimpl (qwand Q1 Q2). applys qwand_cancel. }
Qed.
Lemma qwand_characterization_2_eq_4 :
qwand_characterization_2 = qwand_characterization_4.
Proof using.
applys pred_ext_1. intros op.
unfold qwand_characterization_2, qwand_characterization_4. iff K.
{ subst. intros. iff M.
{ xchange M. intros v H3 N. xchange N. }
{ xsimpl H0. xchange M. } }
{ apply fun_ext_2. intros H1 H2. apply himpl_antisym.
{ lets (M&_): (K (op H1 H2) H1 H2). xsimpl (op H1 H2).
applys M. applys himpl_refl. }
{ xsimpl. intros H0 M. rewrite K. applys M. } }
Qed.
Lemma qwand_characterization_4_eq_5 :
qwand_characterization_4 = qwand_characterization_5.
Proof using.
applys pred_ext_1. intros op.
unfold qwand_characterization_4, qwand_characterization_5. iff K.
{ split.
{ introv M. apply <- K. apply M. }
{ intros. apply K. auto. } }
{ destruct K as (K1&K2). intros. split.
{ introv M. xchange M. xchange (K2 Q1 Q2). }
{ introv M. applys K1. applys M. } }
Qed.
End QwandEquiv.
(* ================================================================= *)
(** ** Simplified Definition of [mkstruct] *)
(** The definition of [mkstruct] can be simplified using the magic
wand operator for postconditions.
Recall the definition of [mkstruct] from chapter [WPgen]. *)
Definition mkstruct' (F:formula) : formula :=
fun (Q:val->hprop) => \exists Q1 H, F Q1 \* H \* \[Q1 \*+ H ===> Q].
(** Observe that the fragment [\exists H, H \* \[Q1 \*+ H ===> Q]]
is equivalent to [Q1 \--* Q]. This observation leads to the following
more concise reformulation of the definition of [mkstruct]. *)
Definition mkstruct (F:formula) : formula :=
fun Q => \exists Q1, F Q1 \* (Q1 \--* Q).
(** Let us prove, for this revised definition, that [mkstruct] satisfies
the three required properties (recall [WPgen]): [mkstruct_erase],
[mkstruct_frame], and [mkstruct_conseq]. In these proofs, we assume
the revised definition of [qwand] expressed in terms of [hwand]
and [hforall]. *)
Lemma mkstruct_erase : forall F Q,
F Q ==> mkstruct F Q.
Proof using.
intros. unfold mkstruct. xsimpl Q. rewrite qwand_equiv. xsimpl.
Qed.
Lemma mkstruct_conseq : forall F Q1 Q2,
Q1 ===> Q2 ->
mkstruct F Q1 ==> mkstruct F Q2.
Proof using.
introv WQ. unfold mkstruct. xpull. intros Q'. xsimpl Q'.
rewrite qwand_equiv. xchange qwand_cancel. xchange WQ.
Qed.
Lemma mkstruct_frame : forall F H Q,
(mkstruct F Q) \* H ==> mkstruct F (Q \*+ H).
Proof using.
intros. unfold mkstruct. xpull. intros Q'. xsimpl Q'.
rewrite qwand_equiv. xchange qwand_cancel.
Qed.
End NewQwand.
(* ================================================================= *)
(** ** Texan Triples *)
Module TexanTriples.
(** In this section, we assume a version of [xsimpl] that handles
the magic wand. Note that [hforall] and other operators are
set "Opaque", their definitions cannot be unfolded. *)
Implicit Types v w : val.
Implicit Types p : loc.
(* ----------------------------------------------------------------- *)
(** *** 1. Example of Texan Triples *)
(** In this section, we show that specification triples can be presented
in a different style using weakest preconditions. *)
(** Consider for example the specification triple for allocation. *)
Parameter triple_ref : forall v,
triple (val_ref v)
\[]
(funloc p => p ~~> v).
(** This specification can be equivalently reformulated in the following
form. *)
Parameter wp_ref : forall Q v,
\[] \* (\forall p, p ~~> v \-* Q (val_loc p)) ==> wp (val_ref v) Q.
(** Above, we purposely left the empty heap predicate to the front to
indicate where the precondition, if it were not empty, would go in
the reformulation. *)
(** In what follows, we describe the chain of transformation that can take us
from the triple form to the [wp] form, and establish the reciprocal.
We then formalize the general pattern for translating a triple
into a "texan triple" (i.e., the wp-based specification). *)
(** By replacing [triple t H Q] with [H ==> wp t Q], the specification
[triple_ref] can be reformulated as follows. *)
Lemma wp_ref_0 : forall v,
\[] ==> wp (val_ref v) (funloc p => p ~~> v).
Proof using. intros. rewrite wp_equiv. applys triple_ref. Qed.
(** We wish to cast the RHS in the form [wp (val_ref v) Q] for an abstract
variable [Q]. To that end, we reformulate the above statement by including
a magic wand relating the current postcondition, which is
[(funloc p => p ~~> v)], and [Q]. *)
Lemma wp_ref_1 : forall Q v,
((funloc p => p ~~> v) \--* Q) ==> wp (val_ref v) Q.
Proof using. intros. xchange (wp_ref_0 v). applys wp_ramified. Qed.
(** This statement can be made slightly more readable by unfolding the
definition of the magic wand for postconditions, as shown next. *)
Lemma wp_ref_2 : forall Q v,
(\forall r, (\exists p, \[r = val_loc p] \* p ~~> v) \-* Q r)
==> wp (val_ref v) Q.
Proof using. intros. applys himpl_trans wp_ref_1. xsimpl. Qed.
(** Interestingly, the variable [r], which is equal to [val_loc p],
can now be substituted away, further increasing readability.
We obtain the specificaiton of [val_ref] in "Texan triple style". *)
Lemma wp_ref_3 : forall Q v,
(\forall p, (p ~~> v) \-* Q (val_loc p)) ==> wp (val_ref v) Q.
Proof using.
intros. applys himpl_trans wp_ref_2. xsimpl. intros ? p ->.
xchange (hforall_specialize p).
Qed.
(* ----------------------------------------------------------------- *)
(** *** 2. The General Pattern *)
(** In practice, specification triples can (pretty much) all be casted
in the form: [triple t H (fun r => exists x1 x2, \[r = v] \* H'].
In such a specification:
- the value [v] may depend on the [xi],
- the heap predicate [H'] may depend on [r] and the [xi],
- the number of existentials [xi] may vary, possibly be zero,
- the equality \[r = v] may be removed if no pure fact is needed about [r].
Such a specification triple of the form
[triple t H (fun r => exists x1 x2, \[r = v] \* H']
can be be reformulated as the Texan triple:
[(\forall x1 x2, H \-* Q v) ==> wp t Q].
We next formalize the equivalence between the two presentations, for
the specific case where the specification involves a single auxiliary
variable, called [x]. The statement below makes it explicit that
[v] may depend on [x], and that [H] may depend on [r] and [x]. *)
Lemma texan_triple_equiv : forall t H A (Hof:val->A->hprop) (vof:A->val),
(triple t H (fun r => \exists x, \[r = vof x] \* Hof r x))
<-> (forall Q, H \* (\forall x, Hof (vof x) x \-* Q (vof x)) ==> wp t Q).
Proof using.
intros. rewrite <- wp_equiv. iff M.
{ intros Q. xchange M. applys wp_ramified_trans.
xsimpl. intros r x ->.
xchange (hforall_specialize x). }
{ applys himpl_trans M. xsimpl~. }
Qed.
(* ----------------------------------------------------------------- *)
(** *** 3. Other Examples *)
Section WpSpecRef.
(** The wp-style specification of [ref], [get] and [set] follow. *)
Lemma wp_get : forall v p Q,
(p ~~> v) \* (p ~~> v \-* Q v) ==> wp (val_get p) Q.
Proof using.
intros. rewrite wp_equiv. applys triple_conseq_frame.
{ applys triple_get. } { applys himpl_refl. } { xsimpl. intros ? ->. auto. }
Qed.
Lemma wp_set : forall v w p Q,
(p ~~> v) \* (\forall r, p ~~> w \-* Q r) ==> wp (val_set p w) Q.
Proof using.
intros. rewrite wp_equiv. applys triple_conseq_frame.
{ applys triple_set. } { applys himpl_refl. }
{ intros r. xchange (hforall_specialize r). }
Qed.
Lemma wp_free : forall v p Q,
(p ~~> v) \* (\forall r, Q r) ==> wp (val_free p) Q.
Proof using.
intros. rewrite wp_equiv. applys triple_conseq_frame.
{ applys triple_free. } { applys himpl_refl. }
{ intros r. xchange (hforall_specialize r). }
Qed.
(** Alternatively, we can advertize that [set] and [free] output the unit
value. *)
Parameter triple_set' : forall w p v,
triple (val_set p w)
(p ~~> v)
(fun r => \[r = val_unit] \* p ~~> w).
Parameter triple_free' : forall p v,
triple (val_free p)
(p ~~> v)
(fun r => \[r = val_unit]).
Lemma wp_set' : forall v w p Q,
(p ~~> v) \* (p ~~> w \-* Q val_unit) ==> wp (val_set p w) Q.
Proof using.
intros. rewrite wp_equiv. applys triple_conseq_frame.
{ applys triple_set'. }
{ applys himpl_refl. }
{ xsimpl. intros ? ->. auto. }
Qed.
Lemma wp_free' : forall v w p Q,
(p ~~> v) \* (Q val_unit) ==> wp (val_free p) Q.
Proof using.
intros. rewrite wp_equiv. applys triple_conseq_frame.
{ applys triple_free'. }
{ applys himpl_refl. }
{ xsimpl. intros ? ->. auto. }
Qed.
End WpSpecRef.
(* ----------------------------------------------------------------- *)
(** *** 4. Equivalent expressiveness *)
(** Let's show that the specification [wp_ref_3] is
as strong as the original specification [triple_ref]. *)
Lemma triple_ref_of_wp_ref_3 : forall v,
triple (val_ref v)
\[]
(funloc p => p ~~> v).
Proof using.
intros. rewrite <- wp_equiv.
applys himpl_trans; [| applys wp_ref_3 ].
xsimpl*.
Qed.
(** Likewise for the other three operations: the triple-based specification
is derivable from the wp-based specification. *)
Lemma triple_get_of_wp_get : forall v p,
triple (val_get p)
(p ~~> v)
(fun r => \[r = v] \* (p ~~> v)).
Proof using.
intros. rewrite <- wp_equiv.
applys himpl_trans; [| applys wp_get ].
xsimpl*.
Qed.
Lemma triple_set : forall w p v,
triple (val_set (val_loc p) v)
(p ~~> w)
(fun _ => p ~~> v).
Proof using.
intros. rewrite <- wp_equiv.
applys himpl_trans; [| applys wp_set ].
xsimpl*.
Qed.
Lemma triple_free : forall p v,
triple (val_free (val_loc p))
(p ~~> v)
(fun _ => \[]).
Proof using.
intros. rewrite <- wp_equiv.
applys himpl_trans; [| applys wp_free ].
xsimpl*.
Qed.
(* ----------------------------------------------------------------- *)
(** *** 5. Exercise *)
(** Let us put to practice the use of a Texan triple on a different example.
Recall the function [incr] and its specification (from [Hprop.v]). *)
Parameter incr : val.
Parameter triple_incr : forall (p:loc) (n:int),
triple (incr p)
(p ~~> n)
(fun v => \[v = val_unit] \* (p ~~> (n+1))).
(** **** Exercise: 3 stars, standard, especially useful (wp_incr)
State a Texan triple for [incr] as a lemma called [wp_incr],
then prove this lemma from [triple_incr].
Hint: the proof is a bit easier by first turning the [wp] into a [triple]
and then reasoning about triples, compared to working on the [wp] form. *)
(* FILL IN HERE *)
(** [] *)
End TexanTriples.
(* ================================================================= *)
(** ** Direct Proof of [wp_ramified] Directly from Hoare Triples *)
Module WpFromHoare.
Import NewQwand.
(** Recall from the last section of the chapter [WPsem] that it can
be interesting to define [wp]-style rules directly from the [hoare]
rules, so as to bypass the statements and proofs of rules for triples.
In the first part of this chapter, we proved that the rule
[wp_ramified] is derivable from the consequence-frame rule for triples.
Let us now show how to prove the rule [wp_ramified] directly from
the rules of Hoare logic. *)
Lemma wp_ramified : forall t Q1 Q2,
(wp t Q1) \* (Q1 \--* Q2) ==> (wp t Q2).
Proof using.
intros. unfold wp. xpull. intros H M.
xsimpl (H \* (Q1 \--* Q2)). intros H'.
applys hoare_conseq M. { xsimpl. }
intros r. xchange (qwand_specialize r). xsimpl.
rewrite hstar_comm. applys hwand_cancel.
Qed.
End WpFromHoare.
(* ================================================================= *)
(** ** Conjunction and Disjunction Operators on [hprop] *)
(** The disjunction and the (non-separating) conjunction are two
other Separation Logic operators. The are not so useful in
practice, because they can be trivially encoded using Coq
conditional construct, or using Coq pattern matching.
Nevertheless, these two operators can prove useful in specific
contexts. We present them also for the sake of completeness. *)
Module ConjDisj.
Import NewQwand.
(* ----------------------------------------------------------------- *)
(** *** Definition of [hor] *)
(** The heap predicate [hor H1 H2] lifts the disjunction operator
[P1 \/ P2] from [Prop] to [hprop].
Concretely, the heap predicate [hor H1 H2] describes a heap
that satisfies [H1] or satifies [H2] (possibly both).
The heap predicate [hor] admits a direct definition as a
function over heaps. *)
Definition hor' (H1 H2 : hprop) : hprop :=
fun h => H1 h \/ H2 h.
(** An alternative definition leverages the [\exists] quantifier.
The definition, shown below, reads as follows: "there exists
an unspecified boolean value [b] such that if [b] is true
then [H1] holds, else if [b] is false then [H2] holds".
The benefits of this definition is that the proof of its properties
can be established without manipulating heaps explicitly. *)
Definition hor (H1 H2 : hprop) : hprop :=
\exists (b:bool), if b then H1 else H2.
(** **** Exercise: 3 stars, standard, optional (hor_eq_hor')
Prove the equivalence of the definitions [hor] and [hor']. *)
Lemma hor_eq_hor' :
hor = hor'.
Proof using. (* FILL IN HERE *) Admitted.
(** [] *)
(** The introduction and elimination rules for [hor] are as follows.
- If [H1] holds, then "[H1] or [H2]" holds.
- Symmetrically, if [H2] holds, then "[H1] or [H2]" holds.
- Reciprocally, if "[H1] or [H2]" holds, then one can perform a case
analysis on whether it is [H1] or [H2] that holds. Concretely, to
show that "[H1] or [H2]" entails [H3], one must show both that
[H1] entails [H3] and that [H2] entails [H3]. *)
Lemma himpl_hor_r_r : forall H1 H2,
H1 ==> hor H1 H2.
Proof using. intros. unfolds hor. exists* true. Qed.
Lemma himpl_hor_r_l : forall H1 H2,
H2 ==> hor H1 H2.
Proof using. intros. unfolds hor. exists* false. Qed.
(** In practice, these two rules are easier to exploit when combined with a
transitivity step. *)
Lemma himpl_hor_r_r_trans : forall H1 H2 H3,
H3 ==> H1 ->
H3 ==> hor H1 H2.
Proof using. introv W. applys himpl_trans W. applys himpl_hor_r_r. Qed.
Lemma himpl_hor_r_l_trans : forall H1 H2 H3,
H3 ==> H2 ->
H3 ==> hor H1 H2.
Proof using. introv W. applys himpl_trans W. applys himpl_hor_r_l. Qed.
(** The elimination rule is stated as follows. *)
Lemma himpl_hor_l : forall H1 H2 H3,
H1 ==> H3 ->
H2 ==> H3 ->
hor H1 H2 ==> H3.
Proof using.
introv M1 M2. unfolds hor. applys himpl_hexists_l. intros b. case_if*.
Qed.
(** The operator [hor] is commutative. To establish this property, it is
handy to exploit the following lemma, called [if_neg], which swaps the
two branches of a conditional by negating the boolean condition. *)
Lemma if_neg : forall (b:bool) A (X Y:A),
(if b then X else Y) = (if neg b then Y else X).
Proof using. intros. case_if*. Qed.
(** **** Exercise: 2 stars, standard, especially useful (hor_comm)
Prove that [hor] is a symmetric operator.
Hint: exploit [if_neg] and [hprop_op_comm] (from chapter [Himpl]). *)
Lemma hor_comm : forall H1 H2,
hor H1 H2 = hor H2 H1.
Proof using. (* FILL IN HERE *) Admitted.
(** [] *)
Module HorExample.
Import Repr.
Implicit Types q : loc.
(** Recall from chapter [Repr] the definition of [MList], and the two
lemmas [MList_nil] and [MList_cons] that reformulates that definition. *)
(** **** Exercise: 4 stars, standard, especially useful (hor_comm)
Prove that [MList] can be characterized by a disjunction expressed using
[hor] as shown below. *)
Lemma MList_using_hor : forall L p,
MList L p =
hor (\[L = nil /\ p = null])
(\exists x q L', \[L = x::L']
\* (p ~~~>`{ head := x; tail := q})
\* (MList L' q)).
Proof using. (* FILL IN HERE *) Admitted.
(** [] *)
End HorExample.
(* ----------------------------------------------------------------- *)
(** *** Definition of [hand] *)
(** The heap predicate [hand H1 H2] lifts the disjunction operator
[P1 /\ P2] from [Prop] to [hprop].
Concretely, the heap predicate [hand H1 H2] describes a heap
that satisfies [H1] and at the same time satifies [H2].
The heap predicate [hand] admits a direct definition as a
function over heaps. *)
Definition hand' (H1 H2 : hprop) : hprop :=
fun h => H1 h /\ H2 h.
(** An alternative definition leverages the [\forall] quantifier.
The definition, shown below, reads as follows: "for any
boolean value [b], if [b] is true then [H1] should hold, and
if [b] is false then [H2] should hold". *)
Definition hand (H1 H2 : hprop) : hprop :=
\forall (b:bool), if b then H1 else H2.
(** **** Exercise: 2 stars, standard, especially useful (hand_eq_hand')
Prove the equivalence of the definitions [hand] and [hand']. *)
Lemma hand_eq_hand' :
hand = hand'.
Proof using. (* FILL IN HERE *) Admitted.
(** [] *)
(** The introduction and elimination rules for [hand] are as follows.
- If "[H1] and [H2]" holds, then in particular [H1] holds.
- Symmetrically, if "[H1] and [H2]" holds, then in particular [H2] holds.
- Reciprocally, to prove that a heap predicate [H3] entails
"[H1] and [H2]", one must prove that [H3] entails [H1], and that
[H3] satisfies [H2].
*)
Lemma himpl_hand_l_r : forall H1 H2,
hand H1 H2 ==> H1.
Proof using. intros. unfolds hand. applys* himpl_hforall_l true. Qed.
Lemma himpl_hand_l_l : forall H1 H2,
hand H1 H2 ==> H2.
Proof using. intros. unfolds hand. applys* himpl_hforall_l false. Qed.
Lemma himpl_hand_r : forall H1 H2 H3,
H3 ==> H1 ->
H3 ==> H2 ->
H3 ==> hand H1 H2.
Proof using. introv M1 M2 Hh. intros b. case_if*. Qed.
(** **** Exercise: 1 star, standard, especially useful (hand_comm)
Prove that [hand] is a symmetric operator.
Hint: use [if_neg] and [hprop_op_comm]. *)
Lemma hand_comm : forall H1 H2,
hand H1 H2 = hand H2 H1.
Proof using. (* FILL IN HERE *) Admitted.
(** [] *)
End ConjDisj.
(* ================================================================= *)
(** ** Summary of All Separation Logic Operators *)
Module SummaryHprop.
(** The core operators are defined as functions over heaps. *)
Definition hempty : hprop :=
fun h => (h = Fmap.empty).
Definition hsingle (p:loc) (v:val) : hprop :=
fun h => (h = Fmap.single p v).
Definition hstar (H1 H2 : hprop) : hprop :=
fun h => exists h1 h2, H1 h1
/\ H2 h2
/\ Fmap.disjoint h1 h2
/\ h = Fmap.union h1 h2.
Definition hexists A (J:A->hprop) : hprop :=
fun h => exists x, J x h.
Definition hforall (A : Type) (J : A -> hprop) : hprop :=
fun h => forall x, J x h.
(** The remaining operators can be defined either as functions over
heaps, or as derived definitions expressed in terms of the core
operators defined above. *)
(** Direct definition for the remaining operators. *)
Module ReaminingOperatorsDirect.
Definition hpure (P:Prop) : hprop :=
fun h => (h = Fmap.empty) /\ P.
Definition hwand (H1 H2:hprop) : hprop :=
fun h => forall h', Fmap.disjoint h h' -> H1 h' -> H2 (h \u h').
Definition qwand (Q1 Q2:val->hprop) : hprop :=
fun h => forall v h', Fmap.disjoint h h' -> Q1 v h' -> Q2 v (h \u h').
Definition hor (H1 H2 : hprop) : hprop :=
fun h => H1 h \/ H2 h.
Definition hand (H1 H2 : hprop) : hprop :=
fun h => H1 h /\ H2 h.
End ReaminingOperatorsDirect.
(** Alternative definitions for the same operators, expressed in terms
of the core operators. *)
Module ReaminingOperatorsDerived.
Definition hpure (P:Prop) : hprop :=
\exists (p:P), \[].
Definition hwand (H1 H2 : hprop) : hprop :=
\exists H0, H0 \* \[ (H1 \* H0) ==> H2 ].
Definition qwand (Q1 Q2 : val->hprop) : hprop :=
\forall v, (Q1 v) \-* (Q2 v).
Definition qwand' (Q1 Q2 : val->hprop) : hprop := (* alternative *)
\exists H0, H0 \* \[ (Q1 \*+ H0) ===> Q2].
Definition hand (H1 H2 : hprop) : hprop :=
\forall (b:bool), if b then H1 else H2.
Definition hor (H1 H2 : hprop) : hprop :=
\exists (b:bool), if b then H1 else H2.
End ReaminingOperatorsDerived.
(** In practice, it saves a lot of effort to use the derived definitions,
because using derived definitions all the properties of these definitions
can be established with the help of the [xsimpl] tactic, through reasoning
taking place exclusively at the level of [hprop]. *)
End SummaryHprop.
(* ================================================================= *)
(** ** Historical Notes *)
(** The magic wand is an operator that was introduced in the very first days
of Separation Logic. From a logical perspective, it makes total sense to
have it. From a practical perspective, however, it was not always entirely
obvious how the magic wand could simplify specifications and proofs.
Experience with CFML 1.0 shows that it is possible to develop an entire
verification frameworks and verify thousands of lines of advanced data
structures and algorithms without ever involving the magic wand operator.
The magic wand, however, reveals its interest when exploited (1) in the
ramified frame rule, and (2) in weakest-precondition style reasoning rules.
The idea of the ramified frame rule was introduced by
[Krishnaswami, Birkedal, and Aldrich 2010] (in Bib.v). Its general statement,
as formulated in the present chapter, was proposed by
[Hobor and Villard 2013] (in Bib.v). Developers of the tools VST and Iris
have advertised for the interest of this rule. The ramified frame
rule was integrated in CFML 2.0 in 2018. *)
(* 2021-08-11 15:25 *)
|
{"author": "blainehansen", "repo": "coq-playground", "sha": "93619e132a7fabf9d3b796465e253469815a0266", "save_path": "github-repos/coq/blainehansen-coq-playground", "path": "github-repos/coq/blainehansen-coq-playground/coq-playground-93619e132a7fabf9d3b796465e253469815a0266/SLF/Wand.v"}
|
"""
Adapted from https://github.com/john-hewitt/structural-probes
"""
import numpy as np
from collections import namedtuple, defaultdict
from argparse import ArgumentParser
import os
import torch
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from transformers import AutoTokenizer
import math
argp = ArgumentParser()
argp.add_argument("--output_dir", default="./out/eval_results")
argp.add_argument("--raw_data_dir", default="./data/")
argp.add_argument("--model_name", default="bert-base-cased")
argp.add_argument("--split", default="test")
argp.add_argument("--print_tikz", action="store_true",default=False)
argp.add_argument("--pmi_clamp_zero", action="store_true",default=False)
argp.add_argument("--word_piece_agg_space", type=str, default="log", choices=["log", "exp"])
argp.add_argument("--word_piece_agg_type", type=str, default="avg", choices=["avg", "min", "max"])
argp.add_argument("--no_prefix_space", action="store_true")
argp.add_argument("--word_piece_pmi_path", type=str, default="", required=True)
def unique_pairs(n):
"""Produce pairs of indexes in range(n)"""
for i in range(n):
for j in range(i + 1, n):
yield i, j
class Task:
"""Abstract class representing a linguistic task mapping texts to labels."""
@staticmethod
def labels(observation):
"""Maps an observation to a matrix of labels.
Should be overriden in implementing classes.
"""
raise NotImplementedError
class ParseDistanceTask(Task):
"""Maps observations to dependency parse distances between words."""
@staticmethod
def labels(observation):
"""Computes the distances between all pairs of words; returns them as a torch tensor.
Args:
observation: a single Observation class for a sentence:
Returns:
A torch tensor of shape (sentence_length, sentence_length) of distances
in the parse tree as specified by the observation annotation.
"""
sentence_length = len(observation[0]) # All observation fields must be of same length
distances = torch.zeros((sentence_length, sentence_length))
for i in range(sentence_length):
for j in range(i, sentence_length):
i_j_distance = ParseDistanceTask.distance_between_pairs(observation, i, j)
distances[i][j] = i_j_distance
distances[j][i] = i_j_distance
return distances
@staticmethod
def distance_between_pairs(observation, i, j, head_indices=None):
"""Computes path distance between a pair of words
TODO: It would be (much) more efficient to compute all pairs' distances at once;
this pair-by-pair method is an artefact of an older design, but
was unit-tested for correctness...
Args:
observation: an Observation namedtuple, with a head_indices field.
or None, if head_indies != None
i: one of the two words to compute the distance between.
j: one of the two words to compute the distance between.
head_indices: the head indices (according to a dependency parse) of all
words, or None, if observation != None.
Returns:
The integer distance d_path(i,j)
"""
if i == j:
return 0
if observation:
head_indices = []
number_of_underscores = 0
for elt in observation.head_indices:
if elt == "_":
head_indices.append(0)
number_of_underscores += 1
else:
head_indices.append(int(elt) + number_of_underscores)
i_path = [i + 1]
j_path = [j + 1]
i_head = i + 1
j_head = j + 1
while True:
if not (i_head == 0 and (i_path == [i + 1] or i_path[-1] == 0)):
i_head = head_indices[i_head - 1]
i_path.append(i_head)
if not (j_head == 0 and (j_path == [j + 1] or j_path[-1] == 0)):
j_head = head_indices[j_head - 1]
j_path.append(j_head)
if i_head in j_path:
j_path_length = j_path.index(i_head)
i_path_length = len(i_path) - 1
break
elif j_head in i_path:
i_path_length = i_path.index(j_head)
j_path_length = len(j_path) - 1
break
elif i_head == j_head:
i_path_length = len(i_path) - 1
j_path_length = len(j_path) - 1
break
total_length = j_path_length + i_path_length
return total_length
def load_conll_dataset(filepath):
"""Reads in a conllx file; generates Observation objects
For each sentence in a conllx file, generates a single Observation
object.
Args:
filepath: the filesystem path to the conll dataset
Returns:
A list of Observations
"""
observations = []
lines = (x for x in open(filepath))
for buf in generate_lines_for_sent(lines):
conllx_lines = []
for line in buf:
conllx_lines.append(line.strip().split("\t"))
embeddings = [None for x in range(len(conllx_lines))]
observation = Observations(*zip(*conllx_lines), embeddings)
observations.append(observation)
return observations
def generate_lines_for_sent(lines):
"""Yields batches of lines describing a sentence in conllx.
Args:
lines: Each line of a conllx file.
Yields:
a list of lines describing a single sentence in conllx.
"""
buf = []
for line in lines:
if line.startswith("#"):
continue
if not line.strip():
if buf:
yield buf
buf = []
else:
continue
else:
buf.append(line.strip())
if buf:
yield buf
class ObservationIterator(Dataset):
"""List Container for lists of Observations and labels for them.
Used as the iterator for a PyTorch dataloader.
"""
def __init__(self, observations, task):
self.observations = observations
self.set_labels(observations, task)
def set_labels(self, observations, task):
"""Constructs aand stores label for each observation.
Args:
observations: A list of observations describing a dataset
task: a Task object which takes Observations and constructs labels.
"""
self.labels = []
for observation in tqdm(observations, desc="[computing labels]"):
self.labels.append(task.labels(observation))
def __len__(self):
return len(self.observations)
def __getitem__(self, idx):
return self.observations[idx], self.labels[idx]
def prims_matrix_to_edges(matrix, words, poses):
"""
Constructs a minimum spanning tree from the pairwise weights in matrix;
returns the edges.
Never lets punctuation-tagged words be part of the tree.
"""
pairs_to_distances = {}
uf = UnionFind(len(matrix))
for i_index, line in enumerate(matrix):
for j_index, dist in enumerate(line):
if poses[i_index] in ["''", ",", ".", ":", "``", "-LRB-", "-RRB-"]:
continue
if poses[j_index] in ["''", ",", ".", ":", "``", "-LRB-", "-RRB-"]:
continue
pairs_to_distances[(i_index, j_index)] = dist
edges = []
for (i_index, j_index), distance in sorted(pairs_to_distances.items(), key=lambda x: (x[1], -x[0][0])):
if uf.find(i_index) != uf.find(j_index):
uf.union(i_index, j_index)
edges.append((i_index, j_index))
return edges
def report_uuas_and_tikz(args, prediction_batches, dataset, split_name):
"""Computes the UUAS score for a dataset and writes tikz dependency latex.
From the true and predicted distances, computes a minimum spanning tree
of each, and computes the percentage overlap between edges in all
predicted and gold trees.
For the first 20 examples (if not the test set) also writes LaTeX to disk
for visualizing the gold and predicted minimum spanning trees.
All tokens with punctuation part-of-speech are excluded from the minimum
spanning trees.
Args:
prediction_batches: A sequence of batches of predictions for a data split
dataset: A sequence of batches of Observations
split_name the string naming the data split: {train,dev,test}
"""
uspan_total = 0
uspan_correct = 0
adjacent_correct = 0
rand_correct = 0
total_sents = 0
per_relation_stats = defaultdict(lambda: [0, 0])
per_relation_stats_adjacent = defaultdict(lambda: [0, 0])
uuas_per_sen = []
for prediction_batch, (data_batch, label_batch, length_batch, observation_batch) in tqdm(
zip(prediction_batches, dataset), desc="computing uuas"
):
for prediction, label, length, (observation, _) in zip(
prediction_batch, label_batch, length_batch, observation_batch
):
words = observation.sentence
poses = observation.xpos_sentence
length = int(length)
assert length == len(observation.sentence)
prediction = prediction[:length, :length]
label = label[:length, :length].cpu()
temp_gold_edges = list(
zip([int(x) - 1 for x in observation.index], [int(x) - 1 for x in observation.head_indices])
)
edge_to_relation = dict(zip([tuple(sorted(e)) for e in temp_gold_edges], observation.governance_relations))
gold_edges = prims_matrix_to_edges(label, words, poses)
pred_edges = prims_matrix_to_edges(prediction, words, poses)
rand_edges = prims_matrix_to_edges(np.random.rand(*prediction.shape), words, poses)
non_punct = (np.array(observation.upos_sentence) != "PUNCT").nonzero()[0]
adjacent_edges = [(non_punct[i], non_punct[i + 1]) for i in range(len(non_punct) - 1)]
pred_edges = set([tuple(sorted(e)) for e in pred_edges])
gold_edges = set([tuple(sorted(e)) for e in gold_edges])
adjacent_edges = set([tuple(sorted(e)) for e in adjacent_edges])
if args.print_tikz and total_sents < 100 :
print_tikz(args.output_dir, pred_edges, gold_edges, edge_to_relation, words, split_name)
num_correct = 0
for edge in gold_edges:
per_relation_stats[edge_to_relation[edge]][0] += 1
if edge in pred_edges:
num_correct += 1
per_relation_stats[edge_to_relation[edge]][1] += 1
num_correct_adjacent = 0
for edge in gold_edges:
per_relation_stats_adjacent[edge_to_relation[edge]][0] += 1
if edge in adjacent_edges:
num_correct_adjacent += 1
per_relation_stats_adjacent[edge_to_relation[edge]][1] += 1
num_correct_rand = len(set([tuple(sorted(e)) for e in rand_edges]).intersection(gold_edges))
# computed error matrix after filtering adjacency edges
pred_in_adjacent = pred_edges.intersection(adjacent_edges)
pred_out_adjacent = pred_edges - adjacent_edges
gold_in_adjacent = gold_edges.intersection(adjacent_edges)
gold_out_adjacent = gold_edges - adjacent_edges
uspan_correct += num_correct
adjacent_correct += num_correct_adjacent
rand_correct += num_correct_rand
uspan_total += len(gold_edges)
total_sents += 1
uuas_per_sen.append(uspan_correct / uspan_total)
uuas = uspan_correct / float(uspan_total)
uuas_adjacent = adjacent_correct / float(uspan_total)
uuas_rand = rand_correct / float(uspan_total)
return (
uuas,
uuas_per_sen,
uuas_adjacent,
uuas_rand,
per_relation_stats,
per_relation_stats_adjacent,
)
class UnionFind:
"""
Naive UnionFind implementation for (slow) Prim's MST algorithm
Used to compute minimum spanning trees for distance matrices
"""
def __init__(self, n):
self.parents = list(range(n))
def union(self, i, j):
if self.find(i) != self.find(j):
i_parent = self.find(i)
self.parents[i_parent] = j
def find(self, i):
i_parent = i
while True:
if i_parent != self.parents[i_parent]:
i_parent = self.parents[i_parent]
else:
break
return i_parent
def custom_pad(batch_observations):
"""Pads sequences with 0 and labels with -1; used as collate_fn of DataLoader.
Loss functions will ignore -1 labels.
If labels are 1D, pads to the maximum sequence length.
If labels are 2D, pads all to (maxlen,maxlen).
Args:
batch_observations: A list of observations composing a batch
Return:
A tuple of:
input batch, padded
label batch, padded
lengths-of-inputs batch, padded
Observation batch (not padded)
"""
seqs = [x[0].sentence for x in batch_observations]
lengths = torch.tensor([len(x) for x in seqs], device="cpu")
label_shape = batch_observations[0][1].shape
maxlen = int(max(lengths))
label_maxshape = [maxlen for x in label_shape]
labels = [-torch.ones(*label_maxshape, device="cpu") for x in seqs]
for index, x in enumerate(batch_observations):
length = x[1].shape[0]
if len(label_shape) == 1:
labels[index][:length] = x[1]
elif len(label_shape) == 2:
labels[index][:length, :length] = x[1]
else:
raise ValueError("Labels must be either 1D or 2D right now; got either 0D or >3D")
labels = torch.stack(labels)
return seqs, labels, lengths, batch_observations
def has_numbers(inputString):
return any(char.isdigit() for char in inputString)
def print_tikz(reporting_root, prediction_edges, gold_edges, edge_to_relation, words, split_name):
""" Turns edge sets on word (nodes) into tikz dependency LaTeX. """
words = list(words)
for i, word in enumerate(words):
word = word.replace("$", "\$").replace("&", "+").replace("%", "\%")
if has_numbers(word):
word = f"${word}$"
words[i] = word
with open(os.path.join(reporting_root, "visualize.tikz"), "a") as fout:
string = "\\begin{figure}"
string += "\\resizebox{\\textwidth}{!}{" + "\n"
string += """\\begin{dependency}[edge unit distance=5ex]
\\begin{deptext}[column sep=2cm]
"""
string += "\\& ".join([x for x in words]) + " \\\\" + "\n"
string += "\\end{deptext}" + "\n"
for i_index, j_index in gold_edges:
string += "\\depedge{{{}}}{{{}}}{{{}}}\n".format(
i_index + 1, j_index + 1, edge_to_relation.get((i_index, j_index), ".")
)
for i_index, j_index in prediction_edges:
string += "\\depedge[edge style={{red!60!}}, edge below]{{{}}}{{{}}}{{{}}}\n".format(
i_index + 1, j_index + 1, edge_to_relation.get((i_index, j_index), "wrong")
)
string += "\\end{dependency}\n"
string += "}\n"
string += "\\end{figure}"
string += "\\clearpage"
fout.write("\n\n")
fout.write(string)
def convert_to_prediction(
tokenizer,
word_piece_pmi,
raw_sen,
pmi_clamp_zero,
word_piece_agg_space,
word_piece_agg_type,
no_prefix_space=False,
):
if pmi_clamp_zero:
word_piece_pmi.clamp_(min=0)
raw_words = raw_sen.split()
if word_piece_pmi.size(0) == len(raw_words):
word_pmi = word_piece_pmi
else:
if word_piece_agg_space == "exp":
word_piece_pmi = word_piece_pmi.exp()
temp_word_pmi = torch.zeros(word_piece_pmi.size(0), len(raw_words))
word_piece_pt = 0
word_to_num_pieces = []
for word_pt, word in enumerate(raw_sen.split()):
if "roberta" in tokenizer.__class__.__name__.lower():
tokens = tokenizer.tokenize(word, add_prefix_space=not (no_prefix_space and word_pt == 0))
else:
tokens = tokenizer.tokenize(word)
word_to_num_pieces.append(len(tokens))
if len(tokens) > 1:
if word_piece_agg_type == "avg":
temp_word_pmi[:, word_pt] = word_piece_pmi[:, word_piece_pt : word_piece_pt + len(tokens)].mean(
dim=1
)
elif word_piece_agg_type == "max":
temp_word_pmi[:, word_pt] = word_piece_pmi[:, word_piece_pt : word_piece_pt + len(tokens)].max(
dim=1
)[0]
elif word_piece_agg_type == "min":
temp_word_pmi[:, word_pt] = word_piece_pmi[:, word_piece_pt : word_piece_pt + len(tokens)].min(
dim=1
)[0]
else:
temp_word_pmi[:, word_pt] = word_piece_pmi[:, word_piece_pt]
word_piece_pt += len(tokens)
word_pmi = torch.zeros(len(raw_words), len(raw_words))
word_piece_pt = 0
for word_pt, num_pieces in enumerate(word_to_num_pieces):
if num_pieces > 1:
word_pmi[word_pt] = temp_word_pmi[word_piece_pt : word_piece_pt + num_pieces].mean(dim=0)
else:
word_pmi[word_pt] = temp_word_pmi[word_piece_pt]
word_piece_pt += num_pieces
if word_piece_agg_space == "exp":
word_piece_pmi = word_piece_pmi.log()
word_pmi = word_pmi.log()
assert word_pmi.size(0) == len(raw_words), "Doesn't type check after alignment"
prediction = -word_pmi # convert to distance-like metric
prediction = prediction.numpy().astype("double")
for i in range(len(raw_words)):
prediction[i][i] = float("-inf")
for i in range(len(raw_words) - 1):
prediction[i][i + 1] -= 1e-4 # default to right branching
return prediction
def convert_word_piece_pmi_to_predictions(args, word_piece_pmis):
with open(os.path.join(args.raw_data_dir, f"raw.{args.split}.txt"), "r") as f:
raw_sens = f.readlines()
raw_sens = [s.strip() for s in raw_sens]
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
predictions = []
for word_piece_pmi, raw_sen in tqdm(zip(word_piece_pmis, raw_sens), total=len(raw_sens), desc="aligning word pmi"):
prediction = convert_to_prediction(
tokenizer,
word_piece_pmi,
raw_sen,
args.pmi_clamp_zero,
args.word_piece_agg_space,
args.word_piece_agg_type,
args.no_prefix_space,
)
prediction = np.expand_dims(prediction, axis=0) # stupid hack for the pipeline
predictions.append(prediction)
return predictions
Observations = namedtuple(
"Observations",
[
"index",
"sentence",
"lemma_sentence",
"upos_sentence",
"xpos_sentence",
"morph",
"head_indices",
"governance_relations",
"secondary_relations",
"extra_info",
"embeddings",
],
)
if __name__ == "__main__":
args = argp.parse_args()
# Preparing output path
out_path = os.path.join(args.output_dir)
os.makedirs(out_path, exist_ok=True)
if os.path.isfile(os.path.join(out_path, f"visualize.tikz")):
os.remove(os.path.join(out_path, f"visualize.tikz"))
args.output_dir = out_path
dataset_cache_path = os.path.join(args.raw_data_dir, f"{args.split}.observations.dataset")
if os.path.isfile(dataset_cache_path):
dataset = torch.load(dataset_cache_path)
else:
observations_path = os.path.join(args.raw_data_dir, f"ptb3-wsj-{args.split}.conllx")
observations = load_conll_dataset(observations_path)
task = ParseDistanceTask()
dataset = ObservationIterator(observations, task)
torch.save(dataset, dataset_cache_path)
# Loading test set data
dataloader = DataLoader(dataset, batch_size=1, collate_fn=custom_pad, shuffle=False)
word_piece_pmi_path = args.word_piece_pmi_path
word_piece_pmis = torch.load(word_piece_pmi_path)
predictions = convert_word_piece_pmi_to_predictions(args, word_piece_pmis)
torch.save(predictions, os.path.join(args.output_dir, "converted_predictions.pt"))
# Heavy-lifting for the evaluation is done in this function
(
uuas,
uuas_per_sen,
uuas_adjacent,
uuas_rand,
per_relation_stats,
per_relation_stats_adjacent,
) = report_uuas_and_tikz(args, predictions, dataloader, args.split)
print("UUAS per relation")
per_relation_analysis = sorted(per_relation_stats.items(), key=lambda x: x[1][0], reverse=True)
per_relation_analysis_adjacent = sorted(per_relation_stats_adjacent.items(), key=lambda x: x[1][0], reverse=True)
data = []
for i in range(len(per_relation_analysis)):
acc_mi = per_relation_analysis[i][1][1] / per_relation_analysis[i][1][0]
acc_adjacent = per_relation_analysis_adjacent[i][1][1] / per_relation_analysis_adjacent[i][1][0]
try:
odds_ratio = math.log(acc_mi / (1 - acc_mi)) - math.log(acc_adjacent / (1 - acc_adjacent))
se = math.sqrt(
1 / per_relation_analysis[i][1][1]
+ 1 / per_relation_analysis_adjacent[i][1][1]
+ 1 / (per_relation_analysis[i][1][0] - per_relation_analysis[i][1][1])
+ 1 / (per_relation_analysis_adjacent[i][1][0] - per_relation_analysis_adjacent[i][1][1])
)
except:
odds_ratio = 0
se = 0
data.append(
(
per_relation_analysis[i][0],
per_relation_analysis[i][1][0],
per_relation_analysis[i][1][1] / per_relation_analysis[i][1][0],
per_relation_analysis_adjacent[i][1][1] / per_relation_analysis_adjacent[i][1][0],
odds_ratio,
1.96 * se,
)
)
data = sorted(data, key=lambda x: x[2] - x[3], reverse=True)
print("Relation, Count, Our, Adjacent, Odds Ratio, CI")
for relation, count, num1, num2, log_odds_ratio, ci in data:
print(
f"{relation:10},",
f"{count:4},",
f"{num1:6.2%},",
f"{num2:6.2%},",
f"{log_odds_ratio:6.2f},",
f"{ci:6.2f}",
)
# bootstrap confidence interval
all_bootstraps = []
for i in range(int(1e4)):
bootstrap_uuas = np.mean(np.random.choice(uuas_per_sen, size=(len(uuas_per_sen), ), replace=True))
all_bootstraps.append(bootstrap_uuas)
# Printing final report
print(f"Bootsrap mean {np.mean(all_bootstraps)}, std +- {np.std(all_bootstraps)}")
print(f"UUAS: {uuas}")
print(f"Right Branching UUAS: {uuas_adjacent}")
print(f"Random Tree UUAS: {uuas_rand}")
with open(os.path.join(args.output_dir, "uuas.txt"), "w") as f:
f.write(f"Right Branching UUAS: {uuas_adjacent:.6f}" + "\n")
f.write(f"UUAS: {uuas:.6f}" + "\n")
torch.save(args, os.path.join(out_path, "args.pt"))
|
{"hexsha": "c88899ed04bc994c64cb7f07141f0a9f3092df2b", "size": 23464, "ext": "py", "lang": "Python", "max_stars_repo_path": "unsupervised_parsing/mutual_info_eval.py", "max_stars_repo_name": "tatsu-lab/mlm_inductive_bias", "max_stars_repo_head_hexsha": "2d99e2477293036949ba356c88513729244dc1f9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-04-14T22:06:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-12T19:41:12.000Z", "max_issues_repo_path": "unsupervised_parsing/mutual_info_eval.py", "max_issues_repo_name": "tatsu-lab/mlm_inductive_bias", "max_issues_repo_head_hexsha": "2d99e2477293036949ba356c88513729244dc1f9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "unsupervised_parsing/mutual_info_eval.py", "max_forks_repo_name": "tatsu-lab/mlm_inductive_bias", "max_forks_repo_head_hexsha": "2d99e2477293036949ba356c88513729244dc1f9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-06-06T09:43:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-20T00:40:42.000Z", "avg_line_length": 37.0679304897, "max_line_length": 119, "alphanum_fraction": 0.6137913399, "include": true, "reason": "import numpy", "num_tokens": 5527}
|
subroutine GetTime2(net,n,t_start,t_stop)
c
c read time interval to estimate a RATE change
c
c net is input for the time format
c n is an index of the rate change
c t_start and _stop are output: time span, in days since 1960, to analyze data
c
character*3 net
character*80 string1,string2
double precision fjul1, fjul2, t_start, t_stop,day1,day2
if (net .eq. 'otr') then
print*,'rate change interal',n,' year day_of_yr year day_of_yr'
print*,' "day_of_yr" may be decimal day'
read(5,*)yr1,fjul1,yr2,fjul2
write(25,*)yr1,fjul1,yr2,fjul2
call jul2day(int(yr1),int(fjul1),iday)
c days since 1960
t_start=dble(iday)+fjul1-int(fjul1)
call jul2day(int(yr2),int(fjul2),iday)
c days since 1960
t_stop=dble(iday)+fjul2-int(fjul2)
end if
if (net .eq. 'otx' ) then
print*,' rate change interal',n,' year mn da year mn da'
print*,' "da" may be a decimal day'
read(5,*) yr1,mn1,day1,yr2,mn2,day2
write(25,*) yr1,mn1,day1,yr2,mn2,day2
call date2day(int(yr1),int(mn1),int(day1),iday)
t_start=dble(iday)+day1-int(day1)
call date2day(int(yr2),int(mn2),int(day2),iday)
t_stop=dble(iday)+day2-int(day2)
end if
if (net .eq. 'otd' ) then
print*,' rate change interal',n,' YearMmnDa YearMmnDay'
print*,' "da" may be a decimal day'
read(5,*) day1, day2
write(25,*) day1, day2
yr1=float(int(day1/10000.))
mn1=int((day1-10000*yr1)/100.)
day1=day1-10000.*yr1- 100.0*mn1
call date2day(int(yr1),int(mn1),int(day1),iday)
t_start=dble(iday)+day1-int(day1)
yr2=float(int(day2/10000.))
mn2=int((day2-10000*yr2)/100.)
day2=day2-10000.*yr2- 100.0*mn2
call date2day(int(yr2),int(mn2),int(day2),iday)
t_stop=dble(iday)+day2-int(day2)
end if
if (net .eq. 'mjd' ) then
print*,' rate change interal',n,' MJD MJD'
print*,' May be decimal days'
read(5,*) day1, day2
write(25,*) day1, day2
t_start=day1-36933.0
t_stop=day2-36933.0
end if
if (net .eq. 'gmt' ) then
print*,' rate change interal',n,' in GMT time format'
print*,' two entries required'
read(5,*) string1, string2
write(25,*) string1, string2
read(string1,100)iyr,imn,ida,ihr,mn,sec
100 format(i4,1x,i2,1x,i2,1x,i2,1x,i2,1x,f5.2)
call date2day(iyr,imn,ida,iday)
t_start=dble(iday)+dble(ihr)/24.0 + dble(mn)/(24.0*60.0) +
& dble(sec)/(24.0*3600.0)
read(string2,100)iyr,imn,ida,ihr,mn,sec
call date2day(iyr,imn,ida,iday)
t_stop=dble(iday)+dble(ihr)/24.0 + dble(mn)/(24.0*60.0) +
& dble(sec)/(24.0*3600.0)
end if
return
end
subroutine GetTime3(net,n,t_start)
c
c read time to estimate an OFFSETs
c
c net is input for the time format
c n is an index of the rate change
c t_start day since 1960
c
character*3 net
character*80 string1
double precision fjul1, t_start,day1
if (net .eq. 'otr') then
print*,' Offset time',n,' year day_of_yr '
print*,' "day_of_yr" may be decimal day'
read(5,*)yr1,fjul1
write(25,*)yr1,fjul1
call jul2day(int(yr1),int(fjul1),iday)
c days since 1960
t_start=dble(iday)+fjul1-int(fjul1)
end if
if (net .eq. 'otx' ) then
print*,' Offset time',n,' year mn da '
print*,' "da" may be a decimal day'
read(5,*) yr1,mn1,day1
write(25,*) yr1,mn1,day1
call date2day(int(yr1),int(mn1),int(day1),iday)
t_start=dble(iday)+day1-int(day1)
end if
if (net .eq. 'otd' ) then
print*,' Offset Time',n,' YearMmnDa '
print*,' "da" may be a decimal day'
read(5,*) day1
write(25,*) day1
yr1=float(int(day1/10000.))
mn1=int((day1-10000*yr1)/100.)
day1=day1-10000.*yr1- 100.0*mn1
call date2day(int(yr1),int(mn1),int(day1),iday)
t_start=dble(iday)+day1-int(day1)
end if
if (net .eq. 'mjd' ) then
print*,' Offset time',n,' MJD '
print*,' May be decimal days'
read(5,*) day1
write(25,*) day1
t_start=day1-36933.0
end if
if (net .eq. 'gmt' ) then
print*,' Offset time',n,' in GMT time format'
read(5,*) string1
write(25,*) string1
read(string1,100)iyr,imn,ida,ihr,mn,sec
100 format(i4,1x,i2,1x,i2,1x,i2,1x,i2,1x,f5.2)
call date2day(iyr,imn,ida,iday)
t_start=dble(iday)+dble(ihr)/24.0 + dble(mn)/(24.0*60.0) +
& dble(sec)/(24.0*3600.0)
end if
return
end
subroutine GetTime4(net,n,t_start)
c
c read time to estimate an exponential/Omori trend
c
c net is input for the time format
c n is an index of the rate change
c t_start day since 1960
c
character*3 net
character*80 string1
double precision fjul1, t_start,day1
if (net .eq. 'otr') then
print*,' Exponential time',n,' year day_of_yr '
print*,' "day_of_yr" may be decimal day'
read(5,*)yr1,fjul1
write(25,*)yr1,fjul1
call jul2day(int(yr1),int(fjul1),iday)
c days since 1960
t_start=dble(iday)+fjul1-int(fjul1)
end if
if (net .eq. 'otx' ) then
print*,' Exponential time',n,' year mn da '
print*,' "da" may be a decimal day'
read(5,*) yr1,mn1,day1
write(25,*) yr1,mn1,day1
call date2day(int(yr1),int(mn1),int(day1),iday)
t_start=dble(iday)+day1-int(day1)
end if
if (net .eq. 'otd' ) then
print*,' Exponential Time',n,' YearMmnDa '
print*,' "da" may be a decimal day'
read(5,*) day1
write(25,*) day1
yr1=float(int(day1/10000.))
mn1=int((day1-10000*yr1)/100.)
day1=day1-10000.*yr1- 100.0*mn1
call date2day(int(yr1),int(mn1),int(day1),iday)
t_start=dble(iday)+day1-int(day1)
end if
if (net .eq. 'mjd' ) then
print*,' Exponential time',n,' MJD '
print*,' May be decimal days'
read(5,*) day1
write(25,*) day1
t_start=day1-36933.0
end if
if (net .eq. 'gmt' ) then
print*,' Exponential time',n,' in GMT time format'
read(5,*) string1
write(25,*) string1
read(string1,100)iyr,imn,ida,ihr,mn,sec
100 format(i4,1x,i2,1x,i2,1x,i2,1x,i2,1x,f5.2)
call date2day(iyr,imn,ida,iday)
t_start=dble(iday)+dble(ihr)/24.0 + dble(mn)/(24.0*60.0) +
& dble(sec)/(24.0*3600.0)
end if
return
end
|
{"hexsha": "6247f705ce756cccfa1d9f50f34e1872a3aca20e", "size": 6704, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/GetTime.f", "max_stars_repo_name": "langbein-usgs/est_noise", "max_stars_repo_head_hexsha": "c29e13c3f900310f20cbc808ba30f8b1daade7b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-12-18T01:04:47.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-13T08:22:37.000Z", "max_issues_repo_path": "src/GetTime.f", "max_issues_repo_name": "langbein-usgs/est_noise", "max_issues_repo_head_hexsha": "c29e13c3f900310f20cbc808ba30f8b1daade7b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/GetTime.f", "max_forks_repo_name": "langbein-usgs/est_noise", "max_forks_repo_head_hexsha": "c29e13c3f900310f20cbc808ba30f8b1daade7b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8585858586, "max_line_length": 79, "alphanum_fraction": 0.5730906921, "num_tokens": 2481}
|
\chapter{Acknowledgements}
This work concludes a long and at times difficult journey.
I express my deep gratitude to my supervisors, Nicolas Couffin, for his guidance and knowledge, and Stefano Calzavara, for his patience and valuable comments. My thanks extend to Quarkslab for a challenging internship in an office full of brilliant and pleasant colleagues.
To the old and new friends I met at Ca' Foscari, I am truly grateful for the many \enquote{intelligent} and meaningful conversations, countless moments of plain fun, and unforgettable days and nights of hacking (and pizza) with \emph{c00kies@venice}.
For their love, understanding, and financial support, I give my heartfelt thanks to my parents, Michele and Emilia.
|
{"hexsha": "918f380896b898455a3614a52861b9a966c7ceee", "size": 730, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "content/ack.tex", "max_stars_repo_name": "integeruser/yet-another-latex-thesis-template", "max_stars_repo_head_hexsha": "76ada9c9196a47269f22d1043e5d51c61e9ac4b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-03-14T15:02:12.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-08T00:49:33.000Z", "max_issues_repo_path": "content/ack.tex", "max_issues_repo_name": "integeruser/yet-another-latex-thesis-template", "max_issues_repo_head_hexsha": "76ada9c9196a47269f22d1043e5d51c61e9ac4b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "content/ack.tex", "max_forks_repo_name": "integeruser/yet-another-latex-thesis-template", "max_forks_repo_head_hexsha": "76ada9c9196a47269f22d1043e5d51c61e9ac4b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 81.1111111111, "max_line_length": 273, "alphanum_fraction": 0.8068493151, "num_tokens": 162}
|
using Graphs
using AtomicLevels
type LevelVertex
config::Config
term::Term
calc_props::Dict
exp_eng
wfn
end
import Graphs.attributes
function attributes(l::LevelVertex, g::AbstractGraph)
Dict{UTF8String,Any}("label" => "$(l.config) $(l.term)")
end
LevelVertex(config::Config, term::Term,
exp_eng = Inf;
kwargs...) = LevelVertex(config, term, Dict(kwargs), exp_eng, nothing)
level_graph_init = () -> graph(Vector{LevelVertex}(),Vector{Edge{LevelVertex}}())
level_graph = level_graph_init()
import Base.print, Base.show
function print(io::IO, lg::typeof(level_graph_init()))
map(reverse(topological_sort_by_dfs(level_graph))) do l
println(l.config, " ", l.term)
end
end
function add_level!(dep::Union{LevelVertex,Void}, args...; kwargs...)
s = LevelVertex(args...; kwargs...)
global level_graph
add_vertex!(level_graph, s)
dep != nothing && add_edge!(level_graph, s, dep)
s
end
add_level!(args...; kwargs...) = add_level!(nothing, args...; kwargs...)
function add_levels!(dep::Union{LevelVertex,Void}, config::Config, args...; kwargs...)
ts = terms(config)
for i in eachindex(ts)
a = copy(args)
if length(a) > 0 && length(a[1]) > 1
a = (a[1][i],a[2:end]...)
end
dep = add_level!(dep, config, ts[i], a...; kwargs...)
end
dep
end
add_levels!(args...; kwargs...) = add_levels!(nothing, args...; kwargs...)
function clear_levels!()
global level_graph
level_graph = level_graph_init()
end
# Used to perform calculation using MPI codes if that is
# requested. However, certain level calculations may be explicitly
# marked as unsuitable for MPI codes (e.g. gst of He II), so these are
# masked.
function maybe_mpi_run(g::Function, d::Dict)
if get(d, :nompi, false)
delete!(d, :nompi)
delete!(d, :mpi_np)
g(d)
elseif (np = get(d, :mpi_np, 1)) > 1
delete!(d, :mpi_np)
init_mpi(np, joinpath(homedir(), "tmp_mpi")) do
g(d)
end
else
g(d)
end
end
function get_and_delete!(d::Dict, sym)
v = d[sym]
delete!(d, sym)
v
end
function get_and_delete!(d::Dict, args...)
map(args) do a
get_and_delete!(d, a)
end
end
get_wfn(wfn::Union{AbstractString,Void}) = wfn
get_wfn(wfn::Vector) = first(wfn)
function calc_level(f::Function, level::LevelVertex, wfn; kwargs...)
wfn = maybe_mpi_run((d) -> f(level, d, get_wfn(wfn)),
merge(Dict(kwargs), level.calc_props))
println(wfn)
assert(isfile(wfn))
wfn
end
# Loop over all levels in topological order such that the output of
# one calculation can be used as input for the next, that has
# requested that.
function calc_levels(f::Function; kwargs...)
global level_graph
map(reverse(topological_sort_by_dfs(level_graph))) do l
e = out_edges(l, level_graph)
if length(e) > 0
wfn = first(e).target.wfn
else
wfn = nothing
end
l.wfn = calc_level(f, l, wfn; kwargs...)
end
end
function calc_levels_atsp(Z; kwargs...)
calc_levels(;kwargs...) do l,d,wfn0
ncorr = get_and_delete!(d, :ncorr)
hf_mchf_bp(l.config, l.term, Z, ncorr, wfn0; overwrite = false, d...)[2]
end
end
function plot_level_graph(filename)
stdin, proc = open(`neato -T$(splitext(filename)[2][2:end]) -o "$filename"`, "w")
to_dot(AST.level_graph, stdin)
close(stdin)
filename
end
export add_level!, add_levels!, clear_levels!, calc_levels, calc_levels_atsp, plot_level_graph
|
{"hexsha": "02202ae560d03ee7bfb6980055afcac393271697", "size": 3579, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/level_graph.jl", "max_stars_repo_name": "mansour2014/AST", "max_stars_repo_head_hexsha": "fcd0026958d0006dba3bde2173e416d683a16201", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-07-21T14:01:42.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-19T12:03:35.000Z", "max_issues_repo_path": "src/level_graph.jl", "max_issues_repo_name": "mansour2014/AST", "max_issues_repo_head_hexsha": "fcd0026958d0006dba3bde2173e416d683a16201", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2016-03-03T21:11:31.000Z", "max_issues_repo_issues_event_max_datetime": "2016-03-20T21:33:51.000Z", "max_forks_repo_path": "src/level_graph.jl", "max_forks_repo_name": "mansour2014/AST", "max_forks_repo_head_hexsha": "fcd0026958d0006dba3bde2173e416d683a16201", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-07T09:59:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T09:59:55.000Z", "avg_line_length": 26.9097744361, "max_line_length": 94, "alphanum_fraction": 0.6362112322, "num_tokens": 1007}
|
[STATEMENT]
lemma (in merkle_interface) merkle_interfaceD: "merge_on UNIV h bo m"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. merge_on UNIV h bo m
[PROOF STEP]
using merkle_interface_aux[of h bo m, symmetric]
[PROOF STATE]
proof (prove)
using this:
merge_on UNIV h bo m = merkle_interface h bo m
goal (1 subgoal):
1. merge_on UNIV h bo m
[PROOF STEP]
by simp unfold_locales
|
{"llama_tokens": 158, "file": "ADS_Functor_Merkle_Interface", "length": 2}
|
import numpy as np
import pytest
from py3nj import clebsch_gordan
rng = np.random.RandomState(0)
# reference values from
# https://en.wikipedia.org/wiki/Table_of_Clebsch%E2%80%93Gordan_coefficients
CG = (
# j1 = 1/2, j2 = 1/2
((1, 1, 2, 1, 1, 2), 1),
((1, 1, 2, -1, -1, -2), 1),
((1, 1, 2, 1, -1, 0), np.sqrt(0.5)),
((1, 1, 2, -1, 1, 0), np.sqrt(0.5)),
((1, 1, 0, 1, -1, 0), np.sqrt(0.5)),
((1, 1, 0, -1, 1, 0), -np.sqrt(0.5)),
# j1 = 1, j2 = 1/2
((2, 2, 4, 2, 0, 2), np.sqrt(1.0 / 2)),
)
def test_wigner3j_value():
# scalar test
for three_j, expected in CG:
print(three_j)
three_j = np.array(three_j)
actual = clebsch_gordan(*three_j)
assert np.allclose(actual, expected)
# test vector
three_j = np.array([thr for thr, _ in CG]).T
expected = np.array([value for _, value in CG]).T
actual = clebsch_gordan(*three_j)
assert np.allclose(actual, expected)
|
{"hexsha": "9dc7cf5a2afd529caeed7a7d877a4694ad635b26", "size": 956, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_cg.py", "max_stars_repo_name": "fujiisoup/py3nj", "max_stars_repo_head_hexsha": "15f179ecc21033022b05e27c681a1512e2b0e604", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-07-08T19:58:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-22T09:22:11.000Z", "max_issues_repo_path": "tests/test_cg.py", "max_issues_repo_name": "fujiisoup/py3nj", "max_issues_repo_head_hexsha": "15f179ecc21033022b05e27c681a1512e2b0e604", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2019-05-24T09:31:55.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-22T00:26:01.000Z", "max_forks_repo_path": "tests/test_cg.py", "max_forks_repo_name": "fujiisoup/py3nj", "max_forks_repo_head_hexsha": "15f179ecc21033022b05e27c681a1512e2b0e604", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-11-08T16:44:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-21T10:13:34.000Z", "avg_line_length": 25.8378378378, "max_line_length": 76, "alphanum_fraction": 0.5627615063, "include": true, "reason": "import numpy", "num_tokens": 390}
|
import numpy as np
def half_hour(t):
"""
Computes the half-hour of timestamp t as integer between 0 and 47.
"""
return 2 * t.hour + t.minute // 30
def standardize(x, mean, std):
"""
Standardizes the input x by subtracting the mean and dividing by the standard deviation.
"""
return (x - mean) / np.maximum(1e-6, std)
def min_max_norm(x, min_x, max_x):
"""
Computes the min-max-norm of input x with minimum min_x and maximum max_x.
"""
return (x - min_x) / np.maximum(1e-6, (max_x - min_x))
def inv_min_max_norm(x, min_x, max_x):
"""
Computes the inverse min-max-norm of input x with minimum min_x and maximum max_x.
"""
return x * (max_x - min_x) + min_x
def running_nanmean(x, window_size=48):
"""
Computes a running mean over the time series x with a window size of window_size.
Nans are ignored for the calculation of the mean.
"""
half_ws = window_size // 2
N = len(x)
x_hat = np.zeros_like(x)
for i in range(N):
x_hat[i] = np.nanmean(x[max(0, i - half_ws):min(N, i + half_ws + 1)])
return x_hat
def interpolate_nans(x):
"""
Linearly interpolates nans in the time series x and returns a time series without nans.
"""
if x.ndim == 1:
nans = np.isnan(x)
x[nans] = np.interp(np.where(nans)[0], np.where(~nans)[0], x[~nans])
return x
else:
for i in range(x.shape[1]):
nans = np.isnan(x[:, i])
x[nans, i] = np.interp(np.where(nans)[0], np.where(~nans)[0], x[~nans, i])
return x
def weighted_nanmean(x, weights, axis=None):
"""
Computes a weighted mean over the time series x with each entry weighted by weights.
Nans are ignored for the calculation of the mean.
"""
weighted_mean = np.nansum(weights * x, axis=axis)
weighted_mean /= np.sum(weights * ~np.isnan(x), axis=axis)
return weighted_mean
def weighted_nanvar(x, weights):
"""
Computes a weighted variance over the time series x with each entry weighted by weights.
Nans are ignored for the calculation of the variance.
"""
weighted_var = np.nansum(weights * (x - weighted_nanmean(x, weights))**2)
weighted_var /= np.sum(weights * ~np.isnan(x))
return weighted_var
def round_floats(o):
"""
Rounds the floating point o to 6 decimal points.
"""
if isinstance(o, float):
return round(o, 6)
if isinstance(o, dict):
return {k: round_floats(v) for k, v in o.items()}
if isinstance(o, (list, tuple)):
return [round_floats(x) for x in o]
return o
|
{"hexsha": "e1a0e349281b453f5fd0a8f42259a2eddabe90b5", "size": 2612, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "kitteltom/probabilistic-energy-forecasting", "max_stars_repo_head_hexsha": "6ebd4130e42d1b0808e5e9499acf3fb401b47315", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-11-24T15:52:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-16T18:26:15.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "kitteltom/probabilistic-energy-forecasting", "max_issues_repo_head_hexsha": "6ebd4130e42d1b0808e5e9499acf3fb401b47315", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "kitteltom/probabilistic-energy-forecasting", "max_forks_repo_head_hexsha": "6ebd4130e42d1b0808e5e9499acf3fb401b47315", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7032967033, "max_line_length": 92, "alphanum_fraction": 0.6248085758, "include": true, "reason": "import numpy", "num_tokens": 712}
|
# This file is based on https://github.com/onnx/onnx-tensorflow
import json
import warnings
import sys
import onnx
from onnx import TensorProto
from onnx import numpy_helper
from onnx import shape_inference
from onnx import ModelProto
from onnx import GraphProto
from onnx import helper
# from onnx_tf.common import data_type
from pb_wrapper import OnnxNode
from pb_wrapper import OnnxGraph
from node_converter import new_opname
from node_converter import new_create_op
from node_converter import tensor_proto_to_tensor
from node_converter import onnx_node_to_ln_op
from type_converter import dtype_onnx2tl
def add_value_info_for_constants(model : onnx.ModelProto):
"""
Currently onnx.shape_inference doesn't use the shape of initializers, so add
that info explicitly as ValueInfoProtos.
Mutates the model.
Args:
model: The ModelProto to update.
"""
# All (top-level) constants will have ValueInfos before IRv4 as they are all inputs
if model.ir_version < 4:
return
def add_const_value_infos_to_graph(graph : onnx.GraphProto):
inputs = {i.name for i in graph.input}
existing_info = {vi.name: vi for vi in graph.value_info}
for init in graph.initializer:
# Check it really is a constant, not an input
if init.name in inputs:
continue
# The details we want to add
elem_type = init.data_type
shape = init.dims
# Get existing or create new value info for this constant
vi = existing_info.get(init.name)
if vi is None:
vi = graph.value_info.add()
vi.name = init.name
# Even though it would be weird, we will not overwrite info even if it doesn't match
tt = vi.type.tensor_type
if tt.elem_type == onnx.TensorProto.UNDEFINED:
tt.elem_type = elem_type
if not tt.HasField("shape"):
# Ensure we set an empty list if the const is scalar (zero dims)
tt.shape.dim.extend([])
for dim in shape:
tt.shape.dim.add().dim_value = dim
# Handle subgraphs
for node in graph.node:
for attr in node.attribute:
# Ref attrs refer to other attrs, so we don't need to do anything
if attr.ref_attr_name != "":
continue
if attr.type == onnx.AttributeProto.GRAPH:
add_const_value_infos_to_graph(attr.g)
if attr.type == onnx.AttributeProto.GRAPHS:
for g in attr.graphs:
add_const_value_infos_to_graph(g)
return add_const_value_infos_to_graph(model.graph)
def get_model(onnx_model):
if not isinstance(onnx_model, ModelProto) and not isinstance(onnx_model, GraphProto):
raise TypeError('get_model() only accepts ModelProto or GraphProto '
'incorrect type: {}'.format(type(onnx_model)))
if isinstance(onnx_model, GraphProto):
onnx_model = helper.make_model(onnx_model)
add_value_info_for_constants(onnx_model)
onnx_model = shape_inference.infer_shapes(onnx_model)
# onnx.checker.check_model(onnx_model)
onnx_graph = onnx_model.graph
# print(onnx_graph)
# exit()
if onnx_graph.initializer:
input_tensors = onnx_initializer_to_data_tensors(
onnx_graph.initializer);
initialized = {init.name for init in onnx_graph.initializer}
else:
input_tensors = []
initialized = set()
# creating empty tensors for currently unknown inputs
for value_info in onnx_graph.input:
if value_info.name in initialized:
continue
shape = list(
d.dim_value if (d.dim_value > 0 and d.dim_param == "") else None
for d in value_info.type.tensor_type.shape.dim)
tensor = {'name': value_info.name,
'dtype': dtype_onnx2tl(value_info.name, value_info.type.tensor_type.elem_type),
'dims': shape,
'data': None}
input_tensors.append((value_info.name, tensor))
value_infos = {vi.name: vi for vi in onnx_graph.value_info}
for vi in onnx_graph.output:
value_infos[vi.name] = vi
tensor_dict = dict(input_tensors)
tensor_dict['__value_infos'] = value_infos
model = {'ops': []}
for tensor in input_tensors:
model['ops'].append(new_create_op(tensor[1]))
for node in onnx_graph.node:
onnx_node = OnnxNode(node)
ops = onnx_node_to_ln_op(onnx_node, tensor_dict)
for op in ops:
model['ops'].append(op)
return model
def onnx_initializer_to_data_tensors(initializer):
return [tensor_proto_to_tensor(init) for init in initializer]
|
{"hexsha": "9e9acbd4c53b1b7137eb3ace4e3beb6939c0df5d", "size": 4837, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/onnx2ln/backend.py", "max_stars_repo_name": "zhaozhixu/LightNet", "max_stars_repo_head_hexsha": "d21f056b6fbc1a241364e7c6797dbdc57e61866b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2018-11-23T01:59:53.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-21T05:56:09.000Z", "max_issues_repo_path": "tools/onnx2ln/backend.py", "max_issues_repo_name": "zhaozhixu/LightNet", "max_issues_repo_head_hexsha": "d21f056b6fbc1a241364e7c6797dbdc57e61866b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/onnx2ln/backend.py", "max_forks_repo_name": "zhaozhixu/LightNet", "max_forks_repo_head_hexsha": "d21f056b6fbc1a241364e7c6797dbdc57e61866b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2018-05-11T07:43:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T04:48:53.000Z", "avg_line_length": 36.6439393939, "max_line_length": 97, "alphanum_fraction": 0.649989663, "include": true, "reason": "import numpy", "num_tokens": 1068}
|
import tactic -- hide
open function nat -- hide
/-
## The `assumption` tactic
The first tactic that we'll learn is the `assumption` tactic. This can be used
when your goal is exactly one of your hypotheses. In the following example,
there are three hypotheses, namely the fact that $a$ is $3$ (hypothesis `ha`), the
fact that $b$ is $4$ (hypothesis `hb`) and the fact that $c$ is $5$ (hypothesis `hc`).
Since we want to prove that $b=4$, which is one of our hypotheses, we should be able to
win by typing `assumption,` (**don't forget the comma**). Delete the `sorry` and try it.
**Pro tip:** If the hypothesis to be used is called, say `hb`, you can also close the goal
by using `exact hb,` instead. Sometimes it is more efficient to do so, especially if we believe
that assumption should work and we don't know why. The `exact` tactic will give us information
about why that does not work.
-/
/- Symbol:
ℕ : \N
-/
/- Lemma : no-side-bar
If $a$ is $3$ and $b$ is $4$ and $c$ is $5$, then $b$ is $4$.
-/
lemma l0 (a b c : ℕ) (ha : a = 3) (hb : b = 4) (hc : c = 5) : b = 4 :=
begin
assumption,
end
|
{"author": "mmasdeu", "repo": "fundamental", "sha": "ef60218d34c089beda66b39a85a4604b3604651f", "save_path": "github-repos/lean/mmasdeu-fundamental", "path": "github-repos/lean/mmasdeu-fundamental/fundamental-ef60218d34c089beda66b39a85a4604b3604651f/src/tutorial_world/00_assumption.lean"}
|
"""
@test_reference filename expr [by] [kw...]
Tests that the expression `expr` with reference `filename` using
equality test strategy `by`.
The pipeline of `test_reference` is:
1. preprocess `expr`
2. read and preprocess `filename`
3. compare the results using `by`
4. if test fails in an interactive session (e.g, `include(test/runtests.jl)`), an interactive dialog will be trigered.
Arguments:
* `filename::String`: _relative_ path to the file that contains the macro invocation.
* `expr`: the actual content used to compare.
* `by`: the equality test function. By default it is `isequal` if not explicitly stated.
# Types
The file-extension of `filename`, as well as the type of the
result of evaluating `expr`, determine how contents are processed and
compared. The contents is treated as:
* Images when `expr` is an image type, i.e., `AbstractArray{<:Colorant}`;
* SHA256 when `filename` endswith `*.sha256`;
* Text as a fallback.
## Images
Images are compared _approximately_ using a different `by` to ignore most encoding
and decoding errors. The default function is generated from [`psnr_equality`](@ref).
The file can be either common image files (e.g., `*.png`), which are handled by
`FileIO`, or text-coded `*.txt` files, which is handled by `ImageInTerminal`.
Text-coded image has less storage requirements and also allows to view the
reference file in a simple terminal using `cat`.
## SHA256
The hash of the `expr` and content of `filename` are compared.
!!! tip
This is useful for a convenient low-storage way of making
sure that the return value doesn't change for selected test
cases.
## Fallback
Simply test the equality of `expr` and the contents of `filename` without any
preprocessing.
# Examples
```julia
# store as string using ImageInTerminal with encoding size (5,10)
@test_reference "camera.txt" testimage("cameraman") size=(5,10)
# using folders in the relative path is allowed
@test_reference "references/camera.png" testimage("cameraman")
# Images can also be stored as hash. Note however that this
# can only check for equality (no tolerance possible)
@test_reference "references/camera.sha256" testimage("cameraman")
# test images with custom psnr threshold
@test_reference "references/camera.png" testimage("cameraman") by=psnr_equality(20)
# test number with absolute tolerance 10
@test_reference "references/string3.txt" 1338 by=(ref, x)->isapprox(ref, x; atol=10)
```
"""
macro test_reference(reference, actual, kws...)
dir = Base.source_dir()
expr = :(test_reference(abspath(joinpath($dir, $(esc(reference)))), $(esc(actual))))
for kw in kws
(kw isa Expr && kw.head == :(=)) || error("invalid signature for @test_reference")
k, v = kw.args
push!(expr.args, Expr(:kw, k, esc(v)))
end
expr
end
function test_reference(
filename::AbstractString, raw_actual;
by = nothing, render = nothing, kw...)
test_reference(query_extended(filename), raw_actual, by, render; kw...)
end
function test_reference(
file::File{F},
raw_actual::T,
equiv=nothing,
rendermode=nothing;
kw...) where {F <: DataFormat, T}
path = file.filename
dir, filename = splitdir(path)
# infer the default rendermode here
# since `nothing` is always passed to this method from
# test_reference(filename::AbstractString, raw_actual; kw...)
if rendermode === nothing
rendermode = default_rendermode(F, raw_actual)
end
actual = _convert(F, raw_actual; kw...)
# preprocessing when reference file doesn't exists
if !isfile(path)
@info("Reference file for \"$filename\" does not exist. It will be created")
# TODO: move encoding out from render
render(rendermode, raw_actual)
mkpath(dir)
savefile(file, actual)
@info("Please run the tests again for any changes to take effect")
return nothing # skip current test case
end
# file exists
reference = loadfile(T, file)
if equiv === nothing
# generally, `reference` and `actual` are of the same type after preprocessing
equiv = default_equality(reference, actual)
end
if equiv(reference, actual)
@test true # to increase test counter if reached
else
# post-processing when test fails
println("Test for \"$filename\" failed.")
render(rendermode, reference, actual)
if !isinteractive()
error("You need to run the tests interactively with 'include(\"test/runtests.jl\")' to update reference images")
end
if !input_bool("Replace reference with actual result (path: $path)?")
@test false
else
savefile(file, actual)
@info("Please run the tests again for any changes to take effect")
end
end
end
|
{"hexsha": "211f510e5e0cd2b91f1354d4bbabf1ef469cbb8f", "size": 4827, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/test_reference.jl", "max_stars_repo_name": "timholy/ReferenceTests.jl", "max_stars_repo_head_hexsha": "cb4125e14656a12ea9660c22e545c5a620f85502", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/test_reference.jl", "max_issues_repo_name": "timholy/ReferenceTests.jl", "max_issues_repo_head_hexsha": "cb4125e14656a12ea9660c22e545c5a620f85502", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/test_reference.jl", "max_forks_repo_name": "timholy/ReferenceTests.jl", "max_forks_repo_head_hexsha": "cb4125e14656a12ea9660c22e545c5a620f85502", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9668874172, "max_line_length": 124, "alphanum_fraction": 0.6917339963, "num_tokens": 1151}
|
\section{Packages: a few favourites}
\begin{frame}{\href{https://ctan.org/pkg/cleveref?lang=en}%
{\text{\color{white} \tb usepackage\{cleveref\}}}}
cleveref formats cross-references automatically
See \cref{fig:lion}.
\begin{figure}
\centering
\includegraphics[width=0.35\textwidth]{lion.png}
\caption{\TeX\ the Lion.}
\label{fig:lion}
\end{figure}
\end{frame}
\begin{frame}[fragile]{\href{https://ctan.org/pkg/cleveref?lang=en}%
{\text{\color{white} \tb usepackage\{cleveref\}}}}
\begin{lstlisting}
% Reference as Figure 1, instead of fig. 1
\usepackage[capitalise,noabbrev]{cleveref}
\end{lstlisting}
\vspace{-0.1cm}\hspace{0.25cm}\vdots
\begin{lstlisting}
See \cref{fig:lion}.
\begin{figure}
\centering
\includegraphics[width=0.4\textwidth]{Lion.png}
\caption{\TeX\ the Lion.}
\label{fig:lion}
\end{figure}
\end{lstlisting}
\end{frame}
\begin{frame}{\href{https://ctan.org/pkg/hyperref?lang=en}%
{\text{\color{white} \tb usepackage\{hyperref\}}}}
\begin{itemize}
\item Adds hypertext links to cross-references.
\item See e.g. \hyperlink{toc}%
{\color{link_purple}{this link to the Table of Contents}}, the links
in the table of contents and the external hyperlinks throughout.
\item \texttt{hyperref} takes many options to alter how links are displayed
\end{itemize}
\end{frame}
\begin{frame}{\href{https://ctan.org/pkg/booktabs?lang=en}%
{\text{\color{white} \tb usepackage\{booktabs\}}}}
Booktabs can be used to enhance default tabular.
\begin{table}
\begin{tabular}{|l|l|r|}
\hline
\multicolumn{2}{|c|}{Item} & \\ \cline{1-2}
Animal & Sold & Price (\$) \\ \hline
Gnat & per gram & 13.65 \\
& each & 0.01 \\
Gnu & stuffed & 92.50 \\
Emu & stuffed & 33.33 \\
\hline
\end{tabular}
\caption{Default \LaTeX\ table.}
\end{table}
\end{frame}
\begin{frame}[fragile]{\href{https://ctan.org/pkg/booktabs?lang=en}%
{\text{\color{white} \tb usepackage\{booktabs\}}}}
\begin{lstlisting}
\begin{tabular}{|l|l|r|}
\hline
\multicolumn{2}{|c|}{Item} & \\\cline{1-2}
Animal & Sold & Price (\$) \\\hline
Gnat & per gram & 13.65 \\
& each & 0.01 \\
Gnu & stuffed & 92.50 \\
Emu & stuffed & 33.33 \\\hline
\end{tabular}
\caption{Default \LaTeX\ table.}
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]{\href{https://ctan.org/pkg/booktabs?lang=en}%
{\text{\color{white} \tb usepackage\{booktabs\}}}}
\begin{table}
\begin{tabular}{llr}
\toprule
\multicolumn{2}{c}{Item} & \\ \cmidrule{1-2}
Animal & Sold & Price (\$) \\ \midrule
Gnat & per gram & 13.65 \\
& each & 0.01 \\
Gnu & stuffed & 92.50 \\
Emu & stuffed & 33.33 \\
\bottomrule
\end{tabular}
\caption{Booktabs table and styling.}
\end{table}
\end{frame}
\begin{frame}[fragile]{\href{https://ctan.org/pkg/booktabs?lang=en}%
{\text{\color{white} \tb usepackage\{booktabs\}}}}
\begin{lstlisting}
\begin{tabular}{llr}
\toprule
\multicolumn{2}{c}{Item} & \\\cmidrule{1-2}
Animal & Sold & Price (\$) \\\midrule
Gnat & per gram & 13.65 \\
& each & 0.01 \\
Gnu & stuffed & 92.50 \\
Emu & stuffed & 33.33 \\
\bottomrule
\end{tabular}
\caption{Booktabs improves table spacing.}
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]{\href{https://ctan.org/pkg/pgfplotstable?lang=en}%
{\text{\color{white} \tb usepackage\{pgfplotstable\}}}}
pgfplotstable can read data in from file (e.g. a \texttt{.csv} file) and
automatically format the data as a table.
Consider that I have some \texttt{.csv} file:
\begin{lstlisting}
Element, Number, Mass
H, 1, 1.00794
He, 2, 4.00260
Li, 3, 6.94100
Be, 4, 9.01218
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]{\href{https://ctan.org/pkg/pgfplotstable?lang=en}%
{\text{\color{white} \tb usepackage\{pgfplotstable\}}}}
\begin{table}
\pgfplotstabletypeset[
col sep=comma,
string type,
every head row/.style={%
before row={%
\toprule
& \multicolumn{2}{c}{Atomic} \\
\cmidrule{2-3}
},
after row={\midrule}
},
every last row/.style={after row=\bottomrule}
]%
{assets/elements.csv}
\caption{pgfplotstable can read input files.}
\end{table}
\end{frame}
\begin{frame}[fragile]{\href{https://ctan.org/pkg/pgfplotstable?lang=en}%
{\text{\color{white} \tb usepackage\{pgfplotstable\}}}}
\begin{lstlisting}
\pgfplotstabletypeset[
col sep=comma,
string type,
every head row/.style={%
before row={%
\toprule
& \multicolumn{2}{c}{Atomic} \\
\cmidrule{2-3}
},
after row={\midrule}
},
every last row/.style={after row=\bottomrule}
]%
{assets/elements.csv}
\end{lstlisting}
\end{frame}
\begin{frame}[fragile]{\href{https://ctan.org/pkg/pgfplotstable?lang=en}%
{\text{\color{white} \tb usepackage\{pgfplotstable\}}}}
pgfplotstable can round numbers as desired:
\begin{table}
\pgfplotstabletypeset[%
col sep=comma,
columns/Number/.style={string type},
columns/Element/.style={string type},
columns/Mass/.style={fixed zerofill, precision=3},
every head row/.style={%
before row={%
\toprule
& \multicolumn{2}{c}{Atomic} \\
\cmidrule{2-3}
},
after row={\midrule}
},
every last row/.style={after row=\bottomrule}
]%
{assets/elements.csv}
\caption{pgfplotstable understands precision and rounding.}
\end{table}
\end{frame}
\begin{frame}[fragile]{\href{https://ctan.org/pkg/pgfplotstable?lang=en}%
{\text{\color{white} \tb usepackage\{pgfplotstable\}}}}
\begin{lstlisting}
\pgfplotstabletypeset[
col sep=comma,
columns/Number/.style={string type},
columns/Element/.style={string type},
columns/Mass/.style={fixed zerofill,
precision=3},
\end{lstlisting}
\vspace{-0.5cm}\hspace{0.25cm}\vdots \\
\hspace{0.25cm}\vdots\quad(As in earlier example) \\
\hspace{0.25cm}\vdots
\begin{lstlisting}
\caption{pgfplotstable understands precision
and rounding.}
\end{lstlisting}
\end{frame}
\begin{frame}{Even \textit{more} table generators}
In addition to pgfplotstable there are various other table generators:
\begin{itemize}
\item \href{https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_latex.html}%
{pandas.DataFrame.to\_latex} (Python users)
\item \href{https://www.rdocumentation.org/packages/xtable/versions/1.8-3/topics/xtable}%
{xtable} (R users)
\item \href{https://ctan.org/pkg/excel2latex?lang=en}{Excel2latex}
(Excel users)
\item \href{https://uk.mathworks.com/matlabcentral/fileexchange/4894-matrix2latex?s_tid=FX_rc2_behav}%
{matrix2latex} (Matlab users)
\end{itemize}
\end{frame}
\begin{frame}{A few more packages\ldots}
\begin{columns}
\begin{column}{0.5\textwidth}
\begin{itemize}
\item \href{http://www.texample.net/tikz/}{tikz}
\item \href{https://ctan.org/pkg/standalone?lang=en}{standalone}
\item \href{https://ctan.org/pkg/fancyhdr?lang=en}{fancyhdr}
\item \href{https://ctan.org/pkg/multirow?lang=en}{multirow}
\item \href{https://ctan.org/pkg/ifdraft?lang=en}{ifdraft}
\end{itemize}
\end{column}
\begin{column}{0.5\textwidth}
\begin{itemize}
\item \href{https://ctan.org/pkg/titlesec?lang=en}{titlesec}
\item \href{https://ctan.org/pkg/microtype?lang=en}{microtype}
\item \href{https://ctan.org/pkg/natbib?lang=en}{natbib}
\item \href{https://ctan.org/pkg/geometry}{geometry}
\item \href{https://ctan.org/pkg/todonotes?lang=en}{todonotes}
\end{itemize}
\end{column}
\end{columns}
\end{frame}
\begin{frame}[standout]
\href{https://jwalton.info/assets/teaching/latex/exercise_3.pdf}%
{\color{white}Exercise 3}
\end{frame}
|
{"hexsha": "9a44e7eaac1ca8513d6171841a4b9dcebee81ab5", "size": 8611, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "slides/sections/packages.tex", "max_stars_repo_name": "jwalton3141/sage_latex", "max_stars_repo_head_hexsha": "6aed684ad18d4d7b7ae19bb60f99f03b5d95006d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-28T18:58:17.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-28T18:58:17.000Z", "max_issues_repo_path": "slides/sections/packages.tex", "max_issues_repo_name": "jwalton3141/latex_course", "max_issues_repo_head_hexsha": "6aed684ad18d4d7b7ae19bb60f99f03b5d95006d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-10-13T20:42:56.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-14T13:52:58.000Z", "max_forks_repo_path": "slides/sections/packages.tex", "max_forks_repo_name": "jwalton3141/sage_latex", "max_forks_repo_head_hexsha": "6aed684ad18d4d7b7ae19bb60f99f03b5d95006d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.9015748031, "max_line_length": 106, "alphanum_fraction": 0.5924979677, "num_tokens": 2700}
|
#!/usr/bin/env python3
import sys
import math
import numpy as np
class CountTwitterPartitions(object):
def __init__(self, filename, identifier, bits_per_vertex):
self.meta_info = {
'twitter_rv': {
'count': 40103281,
'seperator' : '\t'
},
'twitter_small': {
'count': 11316811,
'seperator': ','
},
'buzznet': {
'count': 101168,
'seperator': ','
}
}
self.filename = filename
self.identifier = identifier
self.vertex_per_partition = 2 ** bits_per_vertex
self.sep = self.meta_info[self.identifier]["seperator"]
self.count = self.meta_info[self.identifier]["count"]
self.edge_blocks = []
self.partition_count = int(2**math.ceil(math.log(self.count // self.vertex_per_partition) / math.log(2)))
print(self.partition_count)
self.partitions = [[[] for x in range(self.partition_count)] for x in range(self.partition_count)]
def generateIndex(self, i, j):
return (hash(i) % self.count , hash(j) % self.count)
def getPartition(self, x, y):
return (x % self.partition_count, y % self.partition_count)
def readTwitterSetIntoPartitions(self):
with open(self.filename) as f:
for line in iter(f):
line = line.strip()
vertices = line.split(self.sep)
(src, tgt) = self.generateIndex(vertices[0], vertices[1])
(src_part, tgt_part) = self.getPartition(src, tgt)
#print(vertices[0], vertices[1])
#print(src, tgt)
#print(src_part, tgt_part)
self.partitions[src_part][tgt_part].append((src, tgt))
def generateEdgeSets(self):
# Hilbert-order the tiles
# then build edge-set until reaching (len(src) = 4096 || len(tgt) == 4096)
edge_block = ({
"src":set(),
"tgt":set(),
"edges":[]
})
for i in range(0, self.partition_count**2):
(x, y) = d2xy(self.partition_count, i)
for edge in self.partitions[x][y]:
if len(edge_block["src"]) == self.vertex_per_partition or len(edge_block["tgt"]) == self.vertex_per_partition:
self.edge_blocks.append(edge_block)
edge_block = ({
"src":set(),
"tgt":set(),
"edges":[]
})
edge_block["src"].add(edge[0])
edge_block["tgt"].add(edge[1])
edge_block["edges"].append(edge)
self.edge_blocks.append(edge_block)
def print_statistics_partitions(self):
min_count = numpy.min(self.partitions)
max_count = numpy.max(self.partitions)
mean = numpy.mean(self.partitions)
median = numpy.median(self.partitions)
stddev = numpy.std(self.partitions)
print(min_count, max_count, mean, median, stddev)
def print_statistics_edges(self):
l = np.array(range(len(self.edge_blocks)))
i = 0
for eb in self.edge_blocks:
l[i] = len(eb["edges"])
i += 1
min = np.min(l)
max = np.max(l)
mean = np.mean(l)
median = np.median(l)
stddev = np.std(l)
print(min, max, mean, median, stddev)
# now print percentiles:
for i in range(10, 100, 10):
print('%s: %s' % (i, np.percentile(l, i)))
# code taken from https://en.wikipedia.org/wiki/Hilbert_curve
# rotate/flip a quadrant appropriately
def rot(n, x, y, rx, ry):
if ry == 0:
if rx == 1:
x = n - 1 - x
y = n - 1 - y
return y, x
return x, y
def d2xy(n, d):
assert(d <= n**2 - 1)
t = d
x, y = 0, 0
s = 1
while s < n:
rx = 1 & (t // 2)
ry = 1 & (t ^ rx)
x, y = rot(s, x, y, rx, ry)
x += s * rx
y += s * ry
t //= 4
s *= 2
return x, y
# convert d to (x,y)
def xy2d (n, x, y):
rx, ry, d = 0, 0, 0
s = n // 2
while s > 0:
rx = (x & s) > 0
ry = (y & s) > 0
d += s * s * ((3 * rx) ^ ry)
x, y = rot(s, x, y, rx, ry)
s = s // 2
return d
if __name__ == "__main__":
ctp = CountTwitterPartitions(sys.argv[1], sys.argv[2], int(sys.argv[3]))
ctp.readTwitterSetIntoPartitions()
ctp.generateEdgeSets()
ctp.print_statistics_edges()
|
{"hexsha": "0b1d122c7674bf50bedb73637b920579e9b5c50b", "size": 4606, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/tools/scripts/twitter_hilbertiled_partitions.py", "max_stars_repo_name": "rekords-uw/mosaic", "max_stars_repo_head_hexsha": "6484cc7f5e7918a363d578a89283e94771987d8a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 75, "max_stars_repo_stars_event_min_datetime": "2017-06-04T19:38:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T10:37:43.000Z", "max_issues_repo_path": "src/tools/scripts/twitter_hilbertiled_partitions.py", "max_issues_repo_name": "rekords-uw/mosaic", "max_issues_repo_head_hexsha": "6484cc7f5e7918a363d578a89283e94771987d8a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-11-29T03:29:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-15T12:26:52.000Z", "max_forks_repo_path": "src/tools/scripts/twitter_hilbertiled_partitions.py", "max_forks_repo_name": "rekords-uw/mosaic", "max_forks_repo_head_hexsha": "6484cc7f5e7918a363d578a89283e94771987d8a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2017-06-13T21:59:02.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T09:32:45.000Z", "avg_line_length": 31.5479452055, "max_line_length": 126, "alphanum_fraction": 0.5080330004, "include": true, "reason": "import numpy", "num_tokens": 1182}
|
import os
from os.path import join
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
from vae_model import BimodalVAE, CancerSamplesDataset
from mmd_vae_model import Bimodal_MMD_VAE
if __name__ == "__main__":
if not os.path.exists("embeddings"):
os.mkdir("embeddings")
# Embedding full dataset
full_ds = CancerSamplesDataset(join("data", "sample_subtype_encodings.csv"),
join("data", "sorted_mutations.json"),
join("data", "mutations_mapping_split.json"),
train=None)
checkpoint = torch.load(join("models", "mmd_vae.ckpt"))["state_dict"]
model = Bimodal_MMD_VAE() # BimodalVAE()
model.load_state_dict(checkpoint)
embeddings = []
with torch.no_grad():
model.eval()
for X_del, X_nd, subtypes in tqdm(full_ds):
z = model.encode(X_del, X_nd)
# mu, logvar = model.encode(X_del, X_nd)
embeddings.append(z)
# embeddings.append(mu)
embeddings = torch.stack(embeddings).detach().numpy()
embeddings_df = pd.DataFrame(embeddings, columns=["x{}".format(i) for i in range(embeddings.shape[-1])])
embeddings_df.insert(0, "ID_sample", full_ds.sample_ids)
embeddings_df.to_csv(join("embeddings", "mmd_vae_embeddings.csv"), index=False)
|
{"hexsha": "679cf6ca35da4fc1ae2d3918507107a610aaa4b2", "size": 1372, "ext": "py", "lang": "Python", "max_stars_repo_path": "embed_cancer_samples.py", "max_stars_repo_name": "gvisona/COSMIC_Embedding", "max_stars_repo_head_hexsha": "ee9d5f50f7ada832a83abf40b9f9ba30fc4a5220", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "embed_cancer_samples.py", "max_issues_repo_name": "gvisona/COSMIC_Embedding", "max_issues_repo_head_hexsha": "ee9d5f50f7ada832a83abf40b9f9ba30fc4a5220", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "embed_cancer_samples.py", "max_forks_repo_name": "gvisona/COSMIC_Embedding", "max_forks_repo_head_hexsha": "ee9d5f50f7ada832a83abf40b9f9ba30fc4a5220", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2, "max_line_length": 108, "alphanum_fraction": 0.6413994169, "include": true, "reason": "import numpy", "num_tokens": 322}
|
import numpy as np
import cv2
import math
import sys
if len(sys.argv) < 2:
print("Usage: compress.py [video]")
print("Note: video must have a resolution lower than or equal to 384x216 px.")
exit()
totalBytes = 0
video = cv2.VideoCapture(sys.argv[1])
frameCount = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
w = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(video.get(cv2.CAP_PROP_FPS))
scale = int(216 / h)
if w > 384 or h > 216:
print("Video must have a resolution lower than or equal to 384x216 px.")
exit()
print(f"H:{h}, W:{w}, S:{scale}, FPS:{fps}")
frameCounter = 0
continuing = True
f = open("data.bin", "wb")
f.write(frameCount.to_bytes(2, 'big'))
f.write(w.to_bytes(2, 'big'))
f.write(h.to_bytes(2, 'big'))
f.write(fps.to_bytes(2, 'big'))
f.write(scale.to_bytes(1, 'big'))
while (frameCounter < frameCount and continuing):
continuing, frame = video.read()
frameCounter += 1
frameArray = []
for y in range(h):
for x in range(w):
brightness = int(np.sum(frame[y, x])/3)
white = True if brightness > 127 else False
frameArray.append(white)
frameArray.append(None)
rep = []
previous = None
numRep = 0
for i in frameArray:
if i != previous and previous != None:
rep.append(numRep)
numRep = 0
previous = i
if i == previous:
numRep += 1
if previous == None:
numRep = 0
previous = i
rep[len(rep) - 1] = rep[len(rep) - 1] + 1
f.write(len(rep).to_bytes(2, 'big'))
for i in range(len(rep)):
f.write(rep[i].to_bytes(2, 'big'))
if frameArray[0] == True:
f.write(int(1).to_bytes(1, 'big'))
else:
f.write(int(0).to_bytes(1, 'big'))
print(f"Frame #{frameCounter} -> {len(rep) * 2} + 3 bytes")
totalBytes += (len(rep) * 2) + 3
print(f"TOTAL BYTES: {totalBytes}. Output at data.bin.")
video.release()
f.close()
|
{"hexsha": "075b9065e560c1ac4dcdeb3a533d04b795bc888e", "size": 2014, "ext": "py", "lang": "Python", "max_stars_repo_path": "compression/compress.py", "max_stars_repo_name": "oxixes/bad-apple-cg50", "max_stars_repo_head_hexsha": "9ef92d25fb723b122f8d7c65fc213e616c850fd2", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-04T01:47:38.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T01:47:38.000Z", "max_issues_repo_path": "compression/compress.py", "max_issues_repo_name": "oxixes/bad-apple-cg50", "max_issues_repo_head_hexsha": "9ef92d25fb723b122f8d7c65fc213e616c850fd2", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "compression/compress.py", "max_forks_repo_name": "oxixes/bad-apple-cg50", "max_forks_repo_head_hexsha": "9ef92d25fb723b122f8d7c65fc213e616c850fd2", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.175, "max_line_length": 82, "alphanum_fraction": 0.59632572, "include": true, "reason": "import numpy", "num_tokens": 590}
|
import numpy as np
from holoviews.element import BoxWhisker
from .test_plot import TestMPLPlot, mpl_renderer
class TestMPLBoxWhiskerPlot(TestMPLPlot):
def test_boxwhisker_simple(self):
values = np.random.rand(100)
boxwhisker = BoxWhisker(values)
plot = mpl_renderer.get_plot(boxwhisker)
data, style, axis_opts = plot.get_data(boxwhisker, {}, {})
self.assertEqual(data[0][0], values)
self.assertEqual(style['labels'], [''])
def test_boxwhisker_simple_overlay(self):
values = np.random.rand(100)
boxwhisker = BoxWhisker(values) * BoxWhisker(values)
plot = mpl_renderer.get_plot(boxwhisker)
p1, p2 = plot.subplots.values()
self.assertEqual(p1.handles['boxes'][0].get_path().vertices,
p2.handles['boxes'][0].get_path().vertices)
def test_box_whisker_padding_square(self):
curve = BoxWhisker([1, 2, 3]).options(padding=0.1)
plot = mpl_renderer.get_plot(curve)
y_range = plot.handles['axis'].get_ylim()
self.assertEqual(y_range[0], 0.8)
self.assertEqual(y_range[1], 3.2)
|
{"hexsha": "1da9dd2410eaaa544fffde88820eec2c834b4624", "size": 1137, "ext": "py", "lang": "Python", "max_stars_repo_path": "holoviews/tests/plotting/matplotlib/test_boxwhisker.py", "max_stars_repo_name": "TheoMathurin/holoviews", "max_stars_repo_head_hexsha": "0defcef994d6dd6d2054f75a0e332d02d121f8b0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 864, "max_stars_repo_stars_event_min_datetime": "2019-11-13T08:18:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T13:36:13.000Z", "max_issues_repo_path": "holoviews/tests/plotting/matplotlib/test_boxwhisker.py", "max_issues_repo_name": "chrinide/holoviews", "max_issues_repo_head_hexsha": "e1234a60ae0809ac561c204b1998dff0452b2bf0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1117, "max_issues_repo_issues_event_min_datetime": "2019-11-12T16:15:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T22:57:59.000Z", "max_forks_repo_path": "holoviews/tests/plotting/matplotlib/test_boxwhisker.py", "max_forks_repo_name": "chrinide/holoviews", "max_forks_repo_head_hexsha": "e1234a60ae0809ac561c204b1998dff0452b2bf0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 180, "max_forks_repo_forks_event_min_datetime": "2019-11-19T16:44:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T22:49:18.000Z", "avg_line_length": 35.53125, "max_line_length": 68, "alphanum_fraction": 0.6569920844, "include": true, "reason": "import numpy", "num_tokens": 302}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
##################################################################################################################
# ### Miscellaneous Functions
# ### Module responsible for storing extra data processing functions, accuracy measures and others.
##################################################################################################################
# Módulos dependentes
import gc
import scipy
import warnings
import math
import pandas as pd
import numpy as np
# Machine Learning
from sklearn import metrics
# Disable warnings
warnings.filterwarnings('ignore')
# Statistics methods
def concordance_measures(cm, y_true, y_pred):
# initial attributes
total = float(len(y_true))
m = 1.0 if len(cm) == 0 else len(cm[0])
marg = 0.0
# measures
acc = metrics.accuracy_score(y_true, y_pred)
f1score = metrics.f1_score(y_true, y_pred, average='weighted')
kappa = metrics.cohen_kappa_score(y_true, y_pred)
mcc = metrics.matthews_corrcoef(y_true, y_pred)
tau, p_value = scipy.stats.kendalltau(y_true, y_pred)
# marg
vtau = 0.0
vkappa = 0.0
if m > 1:
for i in range(0,m):
marg += sum(cm[i,])*sum(cm[:,i])
# Others
vtau = (1/total)*((acc*(1-acc))/((1-(1/m))**2)) if (1 - (1/m)) > 0 else 0
t1 = acc
t2 = marg/((total)**2)
t3 = 0.0
t4 = 0.0
for i in range(0,m):
t3 += cm[i,i]*(sum(cm[i,])+sum(cm[:,i]))
for i in range(0,m):
for j in range(0,m):
t4 += cm[i,j]*((sum(cm[j,])+sum(cm[:,i]))**2)
t3 = t3/((total)**2)
t4 = t4/((total)**3)
# Kappa Variance
vkappa = (1/total)*( ((t1*(1-t1))/((1-t2)**2)) + ((2*(1-t1)*(2*t1*t2-t3))/((1-t2)**3)) + ((((1-t1)**2)*(t4-4*(t2**2)))/((1-t2)**4)) )
# TP, FP, TN, FN
try:
tn, fp, fn, tp = cm.ravel()
except:
tn, fp, fn, tp = [0,0,0,0]
# fix values
acc = float(acc if not math.isnan(acc) else 0.0)
f1score = float(f1score if not math.isnan(f1score) else 0.0)
kappa, vkappa = float(kappa if not math.isnan(kappa) else 0.0), float(vkappa if not math.isnan(vkappa) else 0.0)
mcc = float(mcc if not math.isnan(mcc) else 0.0)
tau, vtau = float(tau if not math.isnan(tau) else 0.0), float(vtau if not math.isnan(vtau) else 0.0)
p_value = float(p_value if not math.isnan(p_value) else 0.0)
# String
string = 'Acc:'+str(round(acc,4))+', F1-Score:'+str(round(f1score,4))+', Kappa:'+str(round(kappa,4))+', vKappa:'+str(round(vkappa,4))+', Tau:'+str(round(tau,4))+', vTau:'+str(round(vtau,4))+', p-value:'+str(round(p_value,4))+', Mcc:'+str(round(mcc,4))+', TP:'+str(tp)+', FP:'+str(fp)+', TN:'+str(tn)+', FN:'+str(fn)
# Response
return {'total':total,'acc':acc,'f1score':f1score,'tau':tau,'vtau':vtau,'p_value':p_value,'kappa':kappa,'vkappa':vkappa,'mcc':mcc,'tp':tp,'fp':fp,'tn':tn,'fn':fn,'string':string}
# Remove duplicated dates
def remove_duplicated_dates(dates: list):
visited = []
for i,date in enumerate(dates):
if date.strftime("%Y-%m-%d") in visited:
del dates[i]
else:
visited.append(date.strftime("%Y-%m-%d"))
return dates
# Frame a time series as a supervised learning dataset.
def series_to_supervised(df, n_in=1, n_out=1, dropnan=True):
# configuration
n_vars = 1 if type(df) is list else df.shape[1]
df = pd.DataFrame(df)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# put it all together
df = pd.DataFrame(np.concatenate(cols, axis=1),columns=names)
# clear memory
del cols, names
gc.collect()
# drop rows with NaN values
if dropnan:
df.dropna(inplace=True)
# return value
return df
|
{"hexsha": "d211602357bb9f80881a08ae535b7821ded4b9e4", "size": 4099, "ext": "py", "lang": "Python", "max_stars_repo_path": "modules/misc.py", "max_stars_repo_name": "pedroananias/abd", "max_stars_repo_head_hexsha": "e5781a05704b3061e382030e8ec65c6030ccd98d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "modules/misc.py", "max_issues_repo_name": "pedroananias/abd", "max_issues_repo_head_hexsha": "e5781a05704b3061e382030e8ec65c6030ccd98d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "modules/misc.py", "max_forks_repo_name": "pedroananias/abd", "max_forks_repo_head_hexsha": "e5781a05704b3061e382030e8ec65c6030ccd98d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0234375, "max_line_length": 317, "alphanum_fraction": 0.5584288851, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1310}
|
[STATEMENT]
lemma list_assn_simps[simp]:
"hn_ctxt (list_assn P) [] l' = (\<up>(l'=[]))"
"hn_ctxt (list_assn P) l [] = (\<up>(l=[]))"
"hn_ctxt (list_assn P) [] [] = emp"
"hn_ctxt (list_assn P) (a#as) (c#cs) = hn_ctxt P a c * hn_ctxt (list_assn P) as cs"
"hn_ctxt (list_assn P) (a#as) [] = false"
"hn_ctxt (list_assn P) [] (c#cs) = false"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (hn_ctxt (list_assn P) [] l' = \<up> (l' = []) &&& hn_ctxt (list_assn P) l [] = \<up> (l = []) &&& hn_ctxt (list_assn P) [] [] = emp) &&& hn_ctxt (list_assn P) (a # as) (c # cs) = hn_ctxt P a c * hn_ctxt (list_assn P) as cs &&& hn_ctxt (list_assn P) (a # as) [] = false &&& hn_ctxt (list_assn P) [] (c # cs) = false
[PROOF STEP]
unfolding hn_ctxt_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (list_assn P [] l' = \<up> (l' = []) &&& list_assn P l [] = \<up> (l = []) &&& list_assn P [] [] = emp) &&& list_assn P (a # as) (c # cs) = P a c * list_assn P as cs &&& list_assn P (a # as) [] = false &&& list_assn P [] (c # cs) = false
[PROOF STEP]
apply (cases l')
[PROOF STATE]
proof (prove)
goal (7 subgoals):
1. l' = [] \<Longrightarrow> list_assn P [] l' = \<up> (l' = [])
2. \<And>a list. l' = a # list \<Longrightarrow> list_assn P [] l' = \<up> (l' = [])
3. list_assn P l [] = \<up> (l = [])
4. list_assn P [] [] = emp
5. list_assn P (a # as) (c # cs) = P a c * list_assn P as cs
6. list_assn P (a # as) [] = false
7. list_assn P [] (c # cs) = false
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (6 subgoals):
1. \<And>a list. l' = a # list \<Longrightarrow> list_assn P [] l' = \<up> (l' = [])
2. list_assn P l [] = \<up> (l = [])
3. list_assn P [] [] = emp
4. list_assn P (a # as) (c # cs) = P a c * list_assn P as cs
5. list_assn P (a # as) [] = false
6. list_assn P [] (c # cs) = false
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. list_assn P l [] = \<up> (l = [])
2. list_assn P [] [] = emp
3. list_assn P (a # as) (c # cs) = P a c * list_assn P as cs
4. list_assn P (a # as) [] = false
5. list_assn P [] (c # cs) = false
[PROOF STEP]
apply (cases l)
[PROOF STATE]
proof (prove)
goal (6 subgoals):
1. l = [] \<Longrightarrow> list_assn P l [] = \<up> (l = [])
2. \<And>a list. l = a # list \<Longrightarrow> list_assn P l [] = \<up> (l = [])
3. list_assn P [] [] = emp
4. list_assn P (a # as) (c # cs) = P a c * list_assn P as cs
5. list_assn P (a # as) [] = false
6. list_assn P [] (c # cs) = false
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (5 subgoals):
1. \<And>a list. l = a # list \<Longrightarrow> list_assn P l [] = \<up> (l = [])
2. list_assn P [] [] = emp
3. list_assn P (a # as) (c # cs) = P a c * list_assn P as cs
4. list_assn P (a # as) [] = false
5. list_assn P [] (c # cs) = false
[PROOF STEP]
apply simp
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. list_assn P [] [] = emp
2. list_assn P (a # as) (c # cs) = P a c * list_assn P as cs
3. list_assn P (a # as) [] = false
4. list_assn P [] (c # cs) = false
[PROOF STEP]
apply simp_all
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done
|
{"llama_tokens": 1486, "file": "Refine_Imperative_HOL_Sepref_HOL_Bindings", "length": 9}
|
# Quick and dirty XOR 2-3-1 example network
# for an example on /r/MLQuestions
# Vanilla gradient descent with no momentum
# Adam Smith 1.26.2017
import numpy as np
# X are the inputs, Y are the outputs (the XOR truth table)
X = np.array([[[1],[1]], [[0],[1]], [[1],[0]], [[0],[0]]])
Y = np.array([0, 1, 1, 0])
#Learning rate divided by batch size
learn = 0.5
batchSize = Y.shape[0] #4
# Weights from the inputs to the hidden layer
W0 = 0.2 * ( 2.0 * np.random.random((3,2)) - 1.0 )
# Hidden layer biases
B0 = np.zeros((3,1))
# Weights from the hidden layer to the output node
W1 = 0.2 * ( 2.0 * np.random.random((3,1)) - 1.0 )
# Output node bias
B1 = np.zeros((1,1))
# Training loop
for epoch in range(10000):
dW0 = np.zeros((3,2))
dW1 = np.zeros((3,1))
dB0 = np.zeros((3,1))
dB1 = np.zeros((1,1))
# Batch loop
for example in range(4):
# Hidden Layer forward prop
S0 = W0.dot(X[example]) + B0
A0 = np.tanh(S0)
# Output Layer forward prop
S1 = W1.T.dot(A0) + B1
# This is an unnecessary assignment I'm including for illustration
# purposes only. I'm using a linear output node which makes the
# activation function a no-op
A1 = S1
# Hidden to Output weights backprop
# The variables called 'delta' are the error signals, which are
# usually represented in the literature by Greek lowercase delta
delta1 = A1 - Y[example]
dW1 += -delta1 * A0
dB1 += -delta1
# Input to Hidden weights backprop
# Because the hidden layer has tanh activation, the error term
# has a derivative component ( 1 - A0 * A0 ) is the derivative
# of A0 w.r.t. S0, i.e. ( 1 - tanh^2(S0) ) and tanh(S0) is just A0
delta0 = -delta1 * ( 1 - A0 * A0 ) * W1
dW0 += delta0.dot(X[example].T)
dB0 += delta0
# Weight updates after each batch
W1 += learn / batchSize * dW1
B1 += learn / batchSize * dB1
W0 += learn / batchSize * dW0
B0 += learn / batchSize * dB0
# Print the results for each example case
for example in range(4):
S0 = W0.dot(X[example]) + B0
A0 = np.tanh(S0)
S1 = W1.T.dot(A0) + B1
A1 = S1
print (X[example].T[0],A1[0][0])
|
{"hexsha": "148c7d3ee17575267f7d94e9f8dfaddb953ed24b", "size": 2094, "ext": "py", "lang": "Python", "max_stars_repo_path": "xor.py", "max_stars_repo_name": "tamlovertt/DenseInL29", "max_stars_repo_head_hexsha": "d6546e3c2f4fa107bd0d34a352257972237b7cfe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "xor.py", "max_issues_repo_name": "tamlovertt/DenseInL29", "max_issues_repo_head_hexsha": "d6546e3c2f4fa107bd0d34a352257972237b7cfe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "xor.py", "max_forks_repo_name": "tamlovertt/DenseInL29", "max_forks_repo_head_hexsha": "d6546e3c2f4fa107bd0d34a352257972237b7cfe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.175, "max_line_length": 68, "alphanum_fraction": 0.6394460363, "include": true, "reason": "import numpy", "num_tokens": 746}
|
import inspect
import os
import sys
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import numpy as np
from keras.models import Model
from keras.layers import Dense, CuDNNLSTM, Bidirectional, Input, Dropout, concatenate, Conv1D, \
BatchNormalization
from keras.layers.wrappers import TimeDistributed
from layers.utility import slice_tensor
from keras import optimizers
from keras import regularizers
np.random.seed(0)
def model_a_cnn_bilstm(n_classes, convs=[3, 5, 7], dense_size=200, lstm_size=400, dropout_rate=0.5,
features_to_use=['onehot', 'pssm'], filter_size=256, lr=0.001):
'''
:param n_classes:
:param convs:
:param dense_size:
:param lstm_size:
:param dropout_rate:
:param features_to_use:
:param filter_size:
:return:
'''
visible = Input(shape=(None, 408))
# slice different feature types
biophysical = slice_tensor(2, 0, 16, name='biophysicalfeatures')(visible)
embedding = slice_tensor(2, 16, 66, name='skipgramembd')(visible)
onehot = slice_tensor(2, 66, 87, name='onehot')(visible)
pssm = slice_tensor(2, 87, 108, name='pssm')(visible)
elmo = slice_tensor(2, 108, 408, name='elmo')(visible)
# create input based-on selected features
input_dict = {'pssm': pssm, 'onehot': onehot, 'embedding': embedding, 'elmo': elmo,
'biophysical': biophysical}
features = []
for feature in features_to_use:
features.append(input_dict[feature])
## batch normalization on the input features
if len(features_to_use) == 1:
conclayers = features
input = BatchNormalization(name='batchnorm_input')(features[0])
else:
input = BatchNormalization(name='batchnorm_input')(concatenate(features))
conclayers = [input]
# performing the conlvolutions
for idx, conv in enumerate(convs):
idx = str(idx + 1)
conclayers.append(BatchNormalization(name='batch_norm_conv' + idx)(
Conv1D(filter_size, conv, activation="relu", padding="same", name='conv' + idx,
kernel_regularizer=regularizers.l2(0.001))(input)))
conc = concatenate(conclayers)
# Dropout and Dense Layer before LSTM
if dropout_rate > 0:
drop_before = Dropout(dropout_rate, name='dropoutonconvs')(conc)
dense_convinp = Dense(dense_size, activation='relu', name='denseonconvs')(drop_before)
else:
dense_convinp = Dense(dense_size, activation='relu', name='denseonconvs')(conc)
# Batch normalize the results of dropout
dense_convinpn = BatchNormalization(name='batch_norm_dense')(dense_convinp)
# LSTM
lstm = Bidirectional(CuDNNLSTM(lstm_size, return_sequences=True, name='bilstm'))(dense_convinpn)
drop_after_lstm = Dropout(dropout_rate)(lstm)
dense_out = Dense(dense_size, activation='relu')(drop_after_lstm)
# Labeling layer layer
timedist = TimeDistributed(Dense(n_classes, activation='softmax'))(dense_out)
model = Model(inputs=visible, outputs=timedist)
adam = optimizers.Adam(lr=lr)
model.compile(loss='categorical_crossentropy', optimizer=adam, weighted_metrics=['accuracy'],
sample_weight_mode='temporal')
# print model
print(model.summary())
return model, 'model_a_cnn_bilstm#' + '#'.join(features_to_use) + '@conv' + '_'.join(
[str(c) for c in convs]) + '@dense_' + str(dense_size) + '@lstm' + str(lstm_size) + '@drop_rate' + str(
dropout_rate) + '@filtersize_' + str(filter_size) + '@lr_' + str(lr)
|
{"hexsha": "c48d825ac04cae098a999350bb316110c8826816", "size": 3640, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/a_cnn_bilstm.py", "max_stars_repo_name": "etfrenchvcu/DeepPrime2Sec", "max_stars_repo_head_hexsha": "a61146c7c7d8a0f8821717b9f7aed8703cfdb2a1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 18, "max_stars_repo_stars_event_min_datetime": "2019-07-17T01:53:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-10T13:08:51.000Z", "max_issues_repo_path": "models/a_cnn_bilstm.py", "max_issues_repo_name": "etfrenchvcu/DeepPrime2Sec", "max_issues_repo_head_hexsha": "a61146c7c7d8a0f8821717b9f7aed8703cfdb2a1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2019-10-13T02:45:50.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T00:17:06.000Z", "max_forks_repo_path": "models/a_cnn_bilstm.py", "max_forks_repo_name": "etfrenchvcu/DeepPrime2Sec", "max_forks_repo_head_hexsha": "a61146c7c7d8a0f8821717b9f7aed8703cfdb2a1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-07-26T01:16:26.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-14T01:57:24.000Z", "avg_line_length": 39.5652173913, "max_line_length": 111, "alphanum_fraction": 0.6876373626, "include": true, "reason": "import numpy", "num_tokens": 950}
|
"""
Implements the Multimedia Self-Supervised Grid-based (proposal-free) CNN framework
"""
import numpy as np
import torch
from torch import nn
from maskrcnn_benchmark.structures.image_list import to_image_list
from ..backbone import build_backbone
from ..language_backbone import build_language_backbone
from ..mmss_heads import build_mmss_heads
class MMSSGridModel(nn.Module):
"""
Main class for Generalized R-CNN. Currently supports boxes and masks.
It consists of three main parts:
- backbone
- rpn
- heads: takes the features + the proposals from the RPN and computes
detections / masks from it.
"""
def __init__(self, cfg):
super(MMSSGridModel, self).__init__()
self.backbone = build_backbone(cfg)
self.language_backbone = build_language_backbone(cfg)
self.mmss_heads = build_mmss_heads(cfg,
v_dim=self.backbone.out_channels,
l_dim=self.language_backbone.out_channels,
loc_dim=2,
backbone=self.language_backbone.body,
)
self.mvm = cfg.MODEL.MMSS_HEAD.TRANSFORMER.MASKED_VISUAL_MODELING
self.spatial_dropout = cfg.MODEL.MMSS_HEAD.SPATIAL_DROPOUT
def forward(self, images, targets):
"""
Arguments:
images (list[Tensor] or ImageList): images to be processed
targets (list[str]): ground-truth captions for images (optional)
Returns:
result tuple: (dict[Tensor], dict[Tensor]): losses and other information.
"""
images = to_image_list(images)
visual_grid_features = self.backbone(images.tensors)[0]
_, _, image_h, image_w = images.tensors.shape
batch_size, dim, grid_h, grid_w = visual_grid_features.shape
max_num_regions = grid_h * grid_w
flattened_features = visual_grid_features.reshape(
[batch_size, dim, max_num_regions]).permute(0, 2, 1)
image_sizes = np.asarray(images.image_sizes, dtype=np.float32)
grid_sizes = np.zeros(image_sizes.shape, dtype=np.int32)
grid_sizes[:, 0] = np.ceil(image_sizes[:, 0] * grid_h / image_h)
grid_sizes[:, 1] = np.ceil(image_sizes[:, 1] * grid_w / image_w)
grid_mask = np.zeros([batch_size, grid_h, grid_w], dtype=np.uint8)
for i in range(batch_size):
grid_mask[i, :grid_sizes[i, 0], :grid_sizes[i, 1]] = 1
flattened_mask = grid_mask.reshape([batch_size, max_num_regions])
loc_x = np.zeros([batch_size, grid_h, grid_w], dtype=np.float32)
loc_y = np.zeros([batch_size, grid_h, grid_w], dtype=np.float32)
for i in range(batch_size):
y = (np.arange(grid_sizes[i, 0], dtype=np.float32) + 0.5) / grid_sizes[i, 0]
x = (np.arange(grid_sizes[i, 1], dtype=np.float32) + 0.5) / grid_sizes[i, 1]
loc_x[i, :grid_sizes[i, 0], :grid_sizes[i, 1]] = x[None, :]
loc_y[i, :grid_sizes[i, 0], :grid_sizes[i, 1]] = y[:, None]
flattened_loc = np.stack([loc_x, loc_y], axis=-1).reshape(
[batch_size, max_num_regions, 2])
flattened_loc = torch.tensor(flattened_loc).cuda()
if self.spatial_dropout > 0 and self.training:
subsampled_features = []
subsampled_loc = []
new_mask = np.zeros([batch_size, self.spatial_dropout], dtype=np.uint8)
for i in range(batch_size):
idx = np.where(flattened_mask[i])[0]
np.random.shuffle(idx)
n = min(self.spatial_dropout, idx.shape[0])
idx = idx[:n]
subsampled_features.append(flattened_features[i, idx])
subsampled_loc.append(flattened_loc[i, idx])
new_mask[i, :n] = 1
flattened_features = torch.nn.utils.rnn.pad_sequence(
subsampled_features, batch_first=True)
flattened_loc = torch.nn.utils.rnn.pad_sequence(
subsampled_loc, batch_first=True)
flattened_mask = new_mask
input_image = {
'region_features': flattened_features,
'region_mask': torch.tensor(flattened_mask).cuda(),
'region_loc': flattened_loc,
'mvm_mask': torch.zeros(batch_size, max_num_regions).cuda(),
'target_region_features': flattened_features,
}
if self.mvm:
raise NotImplementedError
input_caption = self.language_backbone(targets)
mmss_outputs = {}
mmss_losses = {}
for head in self.mmss_heads:
o, l = self.mmss_heads[head](input_image, input_caption)
mmss_outputs.update(o)
mmss_losses.update(l)
for v in mmss_losses.values():
if torch.isnan(v):
print(self.mmss_heads['GroundingHead'].log_info)
print(image_sizes, grid_sizes)
raise ValueError()
return mmss_outputs, mmss_losses
|
{"hexsha": "67064f763ed642f7cc55bdfc2773efbecfc6ed85", "size": 4946, "ext": "py", "lang": "Python", "max_stars_repo_path": "maskrcnn_benchmark/modeling/detector/mmss_gcnn.py", "max_stars_repo_name": "mmaaz60/ovr-cnn", "max_stars_repo_head_hexsha": "28d125ae51f0216575e12b5d5ded878874131b22", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 72, "max_stars_repo_stars_event_min_datetime": "2021-03-18T22:06:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T03:34:04.000Z", "max_issues_repo_path": "maskrcnn_benchmark/modeling/detector/mmss_gcnn.py", "max_issues_repo_name": "mmaaz60/ovr-cnn", "max_issues_repo_head_hexsha": "28d125ae51f0216575e12b5d5ded878874131b22", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2021-03-27T01:23:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T08:06:09.000Z", "max_forks_repo_path": "maskrcnn_benchmark/modeling/detector/mmss_gcnn.py", "max_forks_repo_name": "mmaaz60/ovr-cnn", "max_forks_repo_head_hexsha": "28d125ae51f0216575e12b5d5ded878874131b22", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11, "max_forks_repo_forks_event_min_datetime": "2021-04-13T04:57:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T19:55:43.000Z", "avg_line_length": 40.2113821138, "max_line_length": 88, "alphanum_fraction": 0.6249494541, "include": true, "reason": "import numpy", "num_tokens": 1158}
|
import numpy as np
from sklearn.linear_model import LinearRegression
import os
import pandas as pd
import statsmodels.api as sm
# Define working folder
out_folder = 'results'
if not os.path.exists(out_folder):
os.makedirs(out_folder)
os.chdir(out_folder)
# Define countries and states to loop over
file_list = ['Average_Temperature_Country_Level.csv','Average_Temperature_State_Level.csv','Average_Temperature_Global_Level.csv']
#Define years for linear regression
years = np.arange(1950,2021)
years_1970 = np.arange(1970,2021)
for file_name in file_list:
country_df = pd.read_csv(file_name)
if 'Global' in file_name:
country_df['Region'] = 'Global'
#Separate attribute columns and data
attribute_columns = [x for x in list(country_df) if x not in map(str, years)]
attribute_columns = [x for x in attribute_columns if x not in ['system:index','.geo']]
out_df = country_df.copy()[attribute_columns]
#Get temperature columns and data
temperature_columns = [x for x in list(country_df) if x in map(str, years)]
temperature_columns.sort()
temperature_data = country_df[temperature_columns]
#Reorder columns and resave CSV
select_columns = attribute_columns+temperature_columns
select_columns = [x for x in select_columns if x not in ['system:index','.geo']]
country_df = country_df[select_columns]
country_df.to_csv(file_name,index=False)
for i,row in temperature_data.iterrows():
#Check if data is available for all years
if ~row.isnull().any():
#Calculate change in average temperature over different time period lengths
change = np.mean(row[[str(x) for x in np.arange(2009,2021)]])-np.mean(row[[str(x) for x in np.arange(1950,1971)]])
out_df.at[i,'Change_2009_2020-1950_1970'] = change
#Get list of values for linear regression
row_temp_data = row.values
#Fit linear regression
model = LinearRegression().fit(years.reshape((-1, 1)), row_temp_data)
#Save values
out_df.at[i,'LinReg_1950_2020_Intercept'] = model.intercept_
out_df.at[i,'LinReg_1950_2020_Slope'] = model.coef_[0]
out_df.at[i,'LinReg_1950_2020_R_Squared'] = model.score(years.reshape((-1, 1)), row_temp_data)
#Get p-value
statsmodel = sm.OLS(row_temp_data,years)
fii = statsmodel.fit()
p_values = fii.summary2().tables[1]['P>|t|']
out_df.at[i,'LinReg_1950_2020_P_Value'] = p_values.values[0]
#Get list of values for linear regression
row_temp_data = row[map(str, years_1970)].values
#Fit linear regression
model = LinearRegression().fit(years_1970.reshape((-1, 1)), row_temp_data)
#Save values
out_df.at[i,'LinReg_1970_2020_Intercept'] = model.intercept_
out_df.at[i,'LinReg_1970_2020_Slope'] = model.coef_[0]
out_df.at[i,'LinReg_1970_2020_R_Squared'] = model.score(years_1970.reshape((-1, 1)), row_temp_data)
#Get p-value
statsmodel = sm.OLS(row_temp_data,years_1970)
fii = statsmodel.fit()
p_values = fii.summary2().tables[1]['P>|t|']
out_df.at[i,'LinReg_1970_2020_P_Value'] = p_values.values[0]
#if data is not available for all years, save values as Null
else:
out_df.at[i,'Change_2009_2020-1950_1970'] = None
out_df.at[i,'LinReg_1950_2020_Intercept'] = None
out_df.at[i,'LinReg_1950_2020_Slope'] = None
out_df.at[i,'LinReg_1950_2020_R_Squared'] = None
out_df.at[i,'LinReg_1950_2020_P_Value'] = None
out_df.at[i,'LinReg_1970_2020_Intercept'] = None
out_df.at[i,'LinReg_1970_2020_Slope'] = None
out_df.at[i,'LinReg_1970_2020_R_Squared'] = None
out_df.at[i,'LinReg_1970_2020_P_Value'] = None
#Save results
out_df.to_csv(file_name.replace('.csv','_regression_and_change.csv'),index=False)
|
{"hexsha": "42e05c5c91881b919c47aa26f040fd15c15b96f7", "size": 4148, "ext": "py", "lang": "Python", "max_stars_repo_path": "req_016_facebook_average_surface_temperature/regression_and_change.py", "max_stars_repo_name": "resource-watch/blog-analysis", "max_stars_repo_head_hexsha": "b1047f0d5bb9eb7ad037f9cb7a41f1bc8d6f3d28", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-09-15T03:52:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-22T02:44:45.000Z", "max_issues_repo_path": "req_016_facebook_average_surface_temperature/regression_and_change.py", "max_issues_repo_name": "resource-watch/blog-analysis", "max_issues_repo_head_hexsha": "b1047f0d5bb9eb7ad037f9cb7a41f1bc8d6f3d28", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "req_016_facebook_average_surface_temperature/regression_and_change.py", "max_forks_repo_name": "resource-watch/blog-analysis", "max_forks_repo_head_hexsha": "b1047f0d5bb9eb7ad037f9cb7a41f1bc8d6f3d28", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2019-05-29T19:02:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-12T13:31:12.000Z", "avg_line_length": 42.3265306122, "max_line_length": 130, "alphanum_fraction": 0.6494696239, "include": true, "reason": "import numpy,import statsmodels", "num_tokens": 1093}
|
[STATEMENT]
lemma 7: "{} \<turnstile> P IFF
(Q\<^sub>1 AND Neg (PfP \<guillemotleft>P IMP Q\<^sub>1\<guillemotright>) XOR
Q\<^sub>2 AND Neg (PfP \<guillemotleft>P AND Neg Q\<^sub>1 IMP Q\<^sub>2\<guillemotright>))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {} \<turnstile> local.P IFF (Q\<^sub>1 AND Neg (PfP \<guillemotleft>local.P IMP Q\<^sub>1\<guillemotright>) XOR Q\<^sub>2 AND Neg (PfP \<guillemotleft>local.P AND Neg Q\<^sub>1 IMP Q\<^sub>2\<guillemotright>))
[PROOF STEP]
using P'
[PROOF STATE]
proof (prove)
using this:
{} \<turnstile> local.P IFF local.H(X0::=\<guillemotleft>local.P\<guillemotright>)
goal (1 subgoal):
1. {} \<turnstile> local.P IFF (Q\<^sub>1 AND Neg (PfP \<guillemotleft>local.P IMP Q\<^sub>1\<guillemotright>) XOR Q\<^sub>2 AND Neg (PfP \<guillemotleft>local.P AND Neg Q\<^sub>1 IMP Q\<^sub>2\<guillemotright>))
[PROOF STEP]
unfolding H_def
[PROOF STATE]
proof (prove)
using this:
{} \<turnstile> local.P IFF (Q\<^sub>1 AND Neg (PfP \<guillemotleft>HVar X0 HIMP HFm Q\<^sub>1\<guillemotright>) XOR Q\<^sub>2 AND Neg (PfP \<guillemotleft>HVar X0 HAND HNeg (HFm Q\<^sub>1) HIMP HFm Q\<^sub>2\<guillemotright>))(X0::=\<guillemotleft>local.P\<guillemotright>)
goal (1 subgoal):
1. {} \<turnstile> local.P IFF (Q\<^sub>1 AND Neg (PfP \<guillemotleft>local.P IMP Q\<^sub>1\<guillemotright>) XOR Q\<^sub>2 AND Neg (PfP \<guillemotleft>local.P AND Neg Q\<^sub>1 IMP Q\<^sub>2\<guillemotright>))
[PROOF STEP]
by (simp add: Q_closed forget_subst_fm[unfolded fresh_def])
|
{"llama_tokens": 662, "file": "Surprise_Paradox_Surprise_Paradox", "length": 3}
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
from datetime import date
import time
def add():
try:
df = pd.read_csv('data pool/raw_android.csv', index_col=0)
except:
df = pd.DataFrame()
to_add = dict()
month = input('Month: ')
if month in df.columns:
command = input(f'{month} is already included in the file, do you want to replace it? (y/other keys to leave) ')
if command != 'y':
return None
df.drop(month, axis=1, inplace=True)
with open('names_urls.txt', 'r', encoding='UTF-8') as f:
names_urls = f.read().splitlines()
names_urls = [ele for ele in names_urls if ele != '' and ele != "\ufeff"]
for i, name_url in enumerate(names_urls):
if (i % 10 == 0 and i > 0):
print('Taking a 10-second break to pretend as a human...')
time.sleep(10)
app_name = name_url.split(',')[0]
url = name_url.split(',')[-1]
response = requests.get(url)
soup = BeautifulSoup(response.content, 'html.parser')
print(i+1, response, app_name)
target = [element for element in list(soup.body) if 'downloads_and_revenue' in element][0]
target = '{' + target.replace(' ', '') + '}'
kocomponent = 'kocomponent'
name = 'name'
params = 'params'
null = float('NaN')
true = True
false = False
target = eval(target)
downloads = target['kocomponent']['params']['downloads_and_revenue']['downloads']
try:
if '<' in downloads:
downloads = downloads.replace('<', '')
if 'k' in downloads:
downloads = int(downloads[:-1])*1000
elif 'm' in downloads:
downloads = int(downloads[:-1])*1000000
except:
pass
revenue = target['kocomponent']['params']['downloads_and_revenue']['revenue']
try:
if '<' in revenue:
revenue = revenue.replace('<', '')
if 'k' in revenue:
revenue = int(revenue[1:-1])*1000
elif 'm' in revenue:
revenue = int(revenue[1:-1])*1000000
except:
pass
rating = target['kocomponent']['params']['current_rating']
rating_count = target['kocomponent']['params']['current_rating_count']
to_add[app_name] = str([downloads, revenue, rating, rating_count])
d = {month:to_add}
to_add_df = pd.DataFrame(d)
df = pd.concat([df, to_add_df], axis=1)
df.to_csv('data pool/raw_android.csv')
def e0(self):
nan = float('NaN')
try:
return eval(self)[0]
except:
return float('NaN')
def e1(self):
nan = float('NaN')
try:
return eval(self)[1]
except:
return float('NaN')
def e2(self):
nan = float('NaN')
try:
return eval(self)[2]
except:
return float('NaN')
def e3(self):
nan = float('NaN')
try:
return eval(self)[3]
except:
return float('NaN')
def extract():
df = pd.read_csv('data pool/raw_android.csv', index_col=0)
df1 = pd.DataFrame()
for col in df:
df1[col] = df[col].apply(e0)
df1.to_excel('lab/downloads.xlsx')
df = pd.read_csv('data pool/raw_android.csv', index_col=0)
df1 = pd.DataFrame()
for col in df:
df1[col] = df[col].apply(e1)
df1.to_excel('lab/revenue.xlsx')
df1 = pd.DataFrame()
for col in df:
df1[col] = df[col].apply(e2)
df1.to_excel('lab/rating.xlsx')
df1 = pd.DataFrame()
for col in df:
df1[col] = df[col].apply(e3)
df1.to_excel('lab/rating_count.xlsx')
if __name__ == '__main__':
command = input("What can I help you? \'menu\' to check all the commands ")
while command != 'exit':
if command == 'add':
add()
elif command == 'extract':
extract()
elif command == 'menu':
print("add -- add new app data to data pool")
print('extract -- extract the excel file you want')
print('exit -- leave the survice')
command = input("What can I help you? \'menu\' to check all the commands ")
|
{"hexsha": "7757c9f7a51d3ef3389c73cf08a0797884498ffd", "size": 3665, "ext": "py", "lang": "Python", "max_stars_repo_path": "android/sensortower_android.py", "max_stars_repo_name": "zachkongoyu/Sensor-Tower", "max_stars_repo_head_hexsha": "4d4ebebde206b11316d8814a89659e1e79a8550a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "android/sensortower_android.py", "max_issues_repo_name": "zachkongoyu/Sensor-Tower", "max_issues_repo_head_hexsha": "4d4ebebde206b11316d8814a89659e1e79a8550a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "android/sensortower_android.py", "max_forks_repo_name": "zachkongoyu/Sensor-Tower", "max_forks_repo_head_hexsha": "4d4ebebde206b11316d8814a89659e1e79a8550a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.7043010753, "max_line_length": 114, "alphanum_fraction": 0.6436562074, "include": true, "reason": "import numpy", "num_tokens": 1046}
|
"""Unit tests for custom dot operation"""
import numpy as np
import torch
from absl.testing import absltest
from dlrm.cuda_ext import dotBasedInteract
DECIMAL_MATRIX = 0
DECIMAL_LINEAR = 0
MAX_INT_VALUE = 1024 # clip integers larger than `MAX_INT_VALUE` (used in debugging only).
SEED = 12345
SCALE = 1 # Scale the random numbers to check different ranges.
PADDING_SIZE = 1
VERBOSE = False
np.random.seed(seed=SEED)
def log(verbose, *args, **kwargs):
if verbose:
print(*args, **kwargs)
def elems_almost_equal(achived, reference, error_ratio):
"""Computes the relative error between to float values."""
ratio = abs(achived - reference)
if reference != 0:
ratio = ratio / reference
return ratio < error_ratio
def print_differences(arr, ref, error_ratio, verbose=False):
"""Prints values whose relative difference is larger than error_ratio."""
if not verbose:
return
arr = arr.astype(float)
ref = ref.astype(float)
assert arr.shape == ref.shape
if len(arr.shape) == 3:
batch_size = arr.shape[0]
num_rows = arr.shape[1]
num_cols = arr.shape[2]
for i in range(batch_size):
for j in range(num_rows):
for k in range(num_cols):
if not elems_almost_equal(arr[i][j][k], ref[i][j][k], error_ratio):
print(i, j, k, arr[i][j][k], ref[i][j][k])
elif len(arr.shape) == 2:
batch_size = arr.shape[0]
num_cols = arr.shape[1]
for i in range(batch_size):
for k in range(num_cols):
if not elems_almost_equal(arr[i][k], ref[i][k], error_ratio):
print(i, k, arr[i][k], ref[i][k])
else:
raise NotImplementedError
def dot_based_interact_test(num_rows,
num_cols,
batch_size,
synthesize_mode,
upstream_grad_synthesize_mode,
direction,
linear_output,
decimal,
max_value=MAX_INT_VALUE,
verbose=VERBOSE):
"""Computes the forward and backward for custom dot and checks the result."""
# Input tensor configuration and initialization
if synthesize_mode == 'seq':
bottom_mlp_output_np = np.arange(batch_size * num_cols).reshape(batch_size, num_cols)
bottom_mlp_output_np = bottom_mlp_output_np % max_value
embedding_outputs_np = []
for i in range(num_rows - 1): # `num_rows` embedding and one MLP
tmp = np.arange(batch_size * num_cols).reshape(batch_size, num_cols)
tmp = tmp % max_value
embedding_outputs_np.append(tmp)
elif synthesize_mode == 'rand':
bottom_mlp_output_np = np.random.randn(batch_size, num_cols)
bottom_mlp_output_np = bottom_mlp_output_np * SCALE
embedding_outputs_np = []
for i in range(num_rows - 1):
tmp = np.random.randn(batch_size, num_cols)
tmp = tmp * SCALE
embedding_outputs_np.append(tmp)
elif synthesize_mode == 'ones':
bottom_mlp_output_np = np.ones((batch_size, num_cols))
embedding_outputs_np = []
for i in range(num_rows - 1):
tmp = np.ones((batch_size, num_cols))
embedding_outputs_np.append(tmp)
else:
print('Invalid synthesize_mode {}'.format(synthesize_mode))
raise NotImplementedError
# Identical inputs for reference and test
ref_bottom_mlp_output = torch.Tensor(bottom_mlp_output_np).half().cuda().requires_grad_()
test_bottom_mlp_output = torch.Tensor(bottom_mlp_output_np).half().cuda().requires_grad_()
ref_embedding_outputs = []
test_embedding_outputs = []
for elem in embedding_outputs_np:
ref_embedding_outputs.append(torch.Tensor(elem).half().cuda().requires_grad_())
test_embedding_outputs.append(torch.Tensor(elem).half().cuda().requires_grad_())
assert ref_bottom_mlp_output.shape == test_bottom_mlp_output.shape
assert ref_bottom_mlp_output.shape[0] == batch_size
assert ref_bottom_mlp_output.shape[1] == num_cols
assert ref_embedding_outputs[0].shape == test_embedding_outputs[0].shape
assert len(ref_embedding_outputs) == len(test_embedding_outputs)
assert len(ref_embedding_outputs) == num_rows - 1
assert ref_embedding_outputs[0].shape[0] == batch_size
assert ref_embedding_outputs[0].shape[1] == num_cols
reference_input = torch.cat([ref_bottom_mlp_output] + ref_embedding_outputs, dim=1)
test_input = torch.cat([test_bottom_mlp_output] + test_embedding_outputs, dim=1)
reference_input = reference_input.view((batch_size, -1, num_cols))
test_input = test_input.view((batch_size, -1, num_cols))
assert reference_input.shape == test_input.shape
assert reference_input.shape[0] == batch_size
assert reference_input.shape[1] == num_rows
assert reference_input.shape[2] == num_cols
ref_pad = torch.zeros(batch_size, 1, dtype=ref_bottom_mlp_output.dtype, device=ref_bottom_mlp_output.device)
# FWD path in reference
interaction = torch.bmm(reference_input, torch.transpose(reference_input, 1, 2))
tril_indices_row = [i for i in range(interaction.shape[1]) for j in range(i)]
tril_indices_col = [j for i in range(interaction.shape[2]) for j in range(i)]
interaction_flat = interaction[:, tril_indices_row, tril_indices_col]
reference_output = torch.cat((ref_bottom_mlp_output, interaction_flat, ref_pad), dim=1)
num_output_elems = (num_rows * (num_rows - 1) >> 1) + num_cols + PADDING_SIZE
assert reference_output.shape[0] == batch_size
assert reference_output.shape[1] == num_output_elems
if linear_output:
reference_output = torch.sum(reference_output, dim=1)
reference_output = torch.sum(reference_output, dim=0)
# New FWD path
test_output = dotBasedInteract(test_input, test_bottom_mlp_output)
if linear_output:
test_output = torch.sum(test_output, dim=1)
test_output = torch.sum(test_output, dim=0)
assert test_output.shape == reference_output.shape
# FWD path test
if direction in ['fwd', "both"]:
log(verbose, 'Starting FWD Test ...')
print_differences(test_output.detach().cpu().numpy(), reference_output.detach().cpu().numpy(), decimal)
np.testing.assert_almost_equal(test_output.detach().cpu().numpy(),
desired=reference_output.detach().cpu().numpy(),
decimal=decimal)
log(verbose, 'FWD test ended successfully.')
if direction == 'fwd':
return
# BWD path
test_input.retain_grad()
reference_input.retain_grad()
if linear_output:
reference_output.backward()
test_output.backward()
else:
# Synthesize upstream gradient
if upstream_grad_synthesize_mode == 'ones':
upstream_grad = np.ones(reference_output.shape)
elif upstream_grad_synthesize_mode == 'seq':
upstream_grad = np.arange(reference_output.numel()).reshape(reference_output.shape)
upstream_grad = upstream_grad % max_value
elif upstream_grad_synthesize_mode == 'rand':
upstream_grad = np.random.randn(reference_output.numel()).reshape(reference_output.shape)
upstream_grad = upstream_grad * SCALE
else:
print('Invalid upstream_grad_synthesize_mode {}'.format(synthesize_mode))
raise NotImplementedError
reference_upstream_grad = torch.Tensor(upstream_grad).half().cuda()
test_upstream_grad = torch.Tensor(upstream_grad).half().cuda()
reference_output.backward(reference_upstream_grad)
test_output.backward(test_upstream_grad)
log(verbose, 'Starting BWD Test ...')
print_differences(test_input.grad.detach().cpu().numpy(), reference_input.grad.detach().cpu().numpy(), decimal)
print_differences(test_bottom_mlp_output.grad.detach().cpu().numpy(),
ref_bottom_mlp_output.grad.detach().cpu().numpy(), decimal)
np.testing.assert_almost_equal(test_input.grad.detach().cpu().numpy(),
desired=reference_input.grad.detach().cpu().numpy(),
decimal=decimal)
np.testing.assert_almost_equal(test_bottom_mlp_output.grad.detach().cpu().numpy(),
desired=ref_bottom_mlp_output.grad.detach().cpu().numpy(),
decimal=decimal)
log(verbose, 'BWD test ended successfully.')
class AccuracyWithoutSelfInteraction(absltest.TestCase):
"""Unit tests for testing forward and backward precision of custom dot-based interact"""
def test_dlrm_specific_matrix_output(self):
"Test for matrix output."
for num_rows in [27]:
for num_cols in [128]:
for batch_size in [1024, 2048, 8192, 16384]:
dot_based_interact_test(num_rows=num_rows,
num_cols=num_cols,
batch_size=batch_size,
synthesize_mode='rand',
upstream_grad_synthesize_mode='rand',
direction='both',
linear_output=False,
decimal=DECIMAL_MATRIX)
def test_dlrm_agnostic_matrix_output(self):
"Test for matrix output."
for num_rows in [6, 11, 22, 32]:
for num_cols in [16, 17, 64, 120]:
for batch_size in [2048]:
dot_based_interact_test(num_rows=num_rows,
num_cols=num_cols,
batch_size=batch_size,
synthesize_mode='rand',
upstream_grad_synthesize_mode='rand',
direction='both',
linear_output=False,
decimal=DECIMAL_MATRIX)
def test_dlrm_specific_linear_output(self):
"Test for linear output."
for num_rows in [27]:
for num_cols in [128]:
for batch_size in [2048]:
dot_based_interact_test(num_rows=num_rows,
num_cols=num_cols,
batch_size=batch_size,
synthesize_mode='rand',
upstream_grad_synthesize_mode='rand',
direction='both',
linear_output=True,
decimal=DECIMAL_LINEAR)
if __name__ == '__main__':
absltest.main()
|
{"hexsha": "913d36081051eaba589aec1df7e853e6643cd453", "size": 11175, "ext": "py", "lang": "Python", "max_stars_repo_path": "NVIDIA/benchmarks/dlrm/implementations/pytorch/tests/dot_based_interact.py", "max_stars_repo_name": "goswamig/training_results_v0.7", "max_stars_repo_head_hexsha": "4278ce8a0f3d4db6b5e6054277724ca36278d7a3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 48, "max_stars_repo_stars_event_min_datetime": "2020-07-29T18:09:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-09T01:53:33.000Z", "max_issues_repo_path": "NVIDIA/benchmarks/dlrm/implementations/pytorch/tests/dot_based_interact.py", "max_issues_repo_name": "goswamig/training_results_v0.7", "max_issues_repo_head_hexsha": "4278ce8a0f3d4db6b5e6054277724ca36278d7a3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-04-02T02:28:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T18:23:59.000Z", "max_forks_repo_path": "NVIDIA/benchmarks/dlrm/implementations/pytorch/tests/dot_based_interact.py", "max_forks_repo_name": "lablup/training_results_v0.7", "max_forks_repo_head_hexsha": "f5bb59aa0f8b18b602763abe47d1d24d0d54b197", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 42, "max_forks_repo_forks_event_min_datetime": "2020-08-01T06:41:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T10:33:08.000Z", "avg_line_length": 44.5219123506, "max_line_length": 119, "alphanum_fraction": 0.6020581655, "include": true, "reason": "import numpy", "num_tokens": 2268}
|
from scipy import misc
import numpy as np
from training import train_net
import inception
from inference import infere
def train(location='./train/'):
"""
The training procedure is triggered here. OPTIONAL to run; everything that is required for testing the model
must be saved to file (e.g., pickle) so that the test procedure can load, execute and report
:param location: The location of the training data folder hierarchy
:return: nothing
"""
train_net(train_dir=location)
def test(queries=list(), location='./test'):
"""
Test your system with the input. For each input, generate a list of IDs that is returned
:param queries: list of image-IDs. Each element is assumed to be an entry in the test set. Hence, the image
with id <id> is located on my computer at './test/pics/<id>.jpg'. Make sure this is the file you work with...
:param location: The location of the test data folder hierarchy
:return: a dictionary with keys equal to the images in the queries - list, and values a list of image-IDs
retrieved for that input
"""
# # ##### The following is an example implementation -- that would lead to 0 points in the evaluation :-)
# my_return_dict = {}
#
# # Load the dictionary with all training files. This is just to get a hold of which
# # IDs are there; will choose randomly among them
# training_labels = pickle.load(open('./train/pickle/combined.pickle', 'rb'))
# training_labels = list(training_labels.keys())
#
# for query in queries:
#
# # This is the image. Just opening if here for the fun of it; not used later
# image = Image.open(location + '/pics/' + query + '.jpg')
# image.show()
#
# # Generate a random list of 50 entries
# cluster = [training_labels[random.randint(0, len(training_labels) - 1)] for idx in range(50)]
# my_return_dict[query] = cluster
my_return_dict = {}
# Inception
inception.maybe_download()
model = inception.Inception()
for query in queries:
# Data
image_path = location + '/pics/' + query + '.jpg'
image_data = (misc.imread(image_path)[:, 0:192, :3]).astype(np.float32)
image_tranfer_values = model.transfer_values(image_path, image_data)
# Inference
my_return_dict[query] = infere(image_tranfer_values)
return my_return_dict
|
{"hexsha": "af06b74404d4a848fe3c856a7f8cf369a621be84", "size": 2411, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "Remeus/simage", "max_stars_repo_head_hexsha": "5a81f43288a594d499a894a2311497efc48b151d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-06-18T10:08:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-15T11:38:33.000Z", "max_issues_repo_path": "main.py", "max_issues_repo_name": "Remeus/simage", "max_issues_repo_head_hexsha": "5a81f43288a594d499a894a2311497efc48b151d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-02-18T00:09:45.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-18T00:09:45.000Z", "max_forks_repo_path": "main.py", "max_forks_repo_name": "Remeus/simage", "max_forks_repo_head_hexsha": "5a81f43288a594d499a894a2311497efc48b151d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-16T17:33:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-16T17:33:55.000Z", "avg_line_length": 37.0923076923, "max_line_length": 113, "alphanum_fraction": 0.6715055993, "include": true, "reason": "import numpy,from scipy", "num_tokens": 574}
|
SUBROUTINE reset_params
USE precon2d, ONLY: ictrl_prec2d
USE vmec_main, ONLY: iequi, ivac, ftolv, fsqr, fsqz
USE vsvd, ONLY: pfac, phifac
USE timer_sub, ONLY: timer
IMPLICIT NONE
! 2d preconditioner
ictrl_prec2d = 0
iequi = 0
ivac = -1
fsqr = 1
fsqz = 1
ftolv = fsqr
pfac = 1
phifac = 1
timer = 0
END SUBROUTINE reset_params
|
{"hexsha": "dd51f6529b3757a74a540afee004858ff54974e6", "size": 434, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/VMEC2000/Sources/Initialization_Cleanup/reset_params.f", "max_stars_repo_name": "jonathanschilling/VMEC_8_49", "max_stars_repo_head_hexsha": "9f1954d83b2db13f4f4b58676badda4425caeeee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/VMEC2000/Sources/Initialization_Cleanup/reset_params.f", "max_issues_repo_name": "jonathanschilling/VMEC_8_49", "max_issues_repo_head_hexsha": "9f1954d83b2db13f4f4b58676badda4425caeeee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/VMEC2000/Sources/Initialization_Cleanup/reset_params.f", "max_forks_repo_name": "jonathanschilling/VMEC_8_49", "max_forks_repo_head_hexsha": "9f1954d83b2db13f4f4b58676badda4425caeeee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.8695652174, "max_line_length": 57, "alphanum_fraction": 0.5599078341, "num_tokens": 163}
|
"""
pyrad.io.read_data_mxpol
========================
Functions for reading radar mxpol data files
.. autosummary::
:toctree: generated/
classes - MXPOL:
pyrad_MXPOL
classes - MCH:
pyrad_MCH
utilities - read:
row_stack
findTimes
int2float_radar
readMXPOLRadData
readCHRadData
utilities - config:
load_myconfig
get_mymetadata
get_elevation_metadata
generate_radar_table
generate_polvar_metadata
convert_polvar_name
"""
import time
import imp
import re
import os
import datetime
from copy import deepcopy
import warnings
import numpy as np
import netCDF4
try:
import h5py
_H5PY_AVAILABLE = True
except ImportError:
_H5PY_AVAILABLE = False
class MissingOptionalDependency(Exception):
""" Exception raised when a optional dependency is needed but not found. """
pass
import pyart
# -------------------------- classes - MXPOL ------------------------------ #
class pyrad_MXPOL(pyart.core.Radar):
def __init__(self, filename, field_names=None, max_range=np.Inf,
min_range=10000, pyrad_names=True):
# find information based on filename
all_files = [filename]
fname_basename = os.path.basename(filename)
if 'PPI' in fname_basename:
scan_type = 'ppi'
elif 'RHI' in fname_basename:
scan_type = 'rhi'
strdate = re.findall(r"([0-9]{8}-[0-9]{6})", fname_basename)[0]
date = datetime.datetime.strptime(strdate, '%Y%m%d-%H%M%S')
# if field name is None, take all available fields
if field_names is None:
field_names = ['Zh', 'Zdr', 'Kdp', 'Phidp', 'Rhohv', 'ZhCorr',
'ZdrCorr', 'RVel', 'Rvel', 'Sw', 'SNRh', 'SNRv', 'Psidp']
# convert fieldname if necessary
varnames = []
for fieldname in field_names:
newname = convert_polvar_name('LTE', fieldname)
varnames.append(newname)
# get labels, units etc
long_names = []
standard_names = []
units = []
vmin = []
vmax = []
for varname in varnames:
metadata = generate_polvar_metadata(varname)
standard_names.append(metadata['standard_name'])
long_names.append(metadata['long_name'])
units.append(metadata['units'])
vmin.append(metadata['valid_min'])
vmax.append(metadata['valid_max'])
# initiate empty vectors
N_sweeps = len(all_files)
fields = {}
fixed_angle = {}
fixed_angle['data'] = np.zeros(N_sweeps, )
sweep_start_ray_index = {}
sweep_start_ray_index['data'] = []
sweep_stop_ray_index = {}
sweep_stop_ray_index['data'] = []
for i, k in enumerate(varnames):
fields[k] = {}
fields[k]['data'] = []
fields[k]['long_name'] = long_names[i]
fields[k]['standard_name'] = standard_names[i]
fields[k]['units'] = units[i]
fields[k]['valid_min'] = vmin[i]
fields[k]['valid_max'] = vmax[i]
idx_start = 0
idx_stop = 0
elevations = []
azimuths = []
ranges = []
nyquist = []
# read data and create dictionaries
for i in range(N_sweeps):
metadata, data = readMXPOLRadData(
all_files[i], varnames, max_range)
if scan_type == 'rhi':
fixed_angle['data'][i] = np.round(np.mean(data['azimuth']))
elif scan_type == 'ppi':
fixed_angle['data'][i] = np.round(np.mean(data['elevation']))
[N_az, N_ranges] = data[varnames[0]].shape
idx_stop = idx_start + N_az - 1
sweep_start_ray_index['data'].append(idx_start)
sweep_stop_ray_index['data'].append(idx_stop)
idx_start = idx_stop + 1
elevations.extend(list(data['elevation']))
nyquist.extend([data['nyquist_vel']]*N_az)
azimuths.extend(list(data['azimuth']))
ranges.extend(list(data['range']))
for j, v in enumerate(varnames):
if v in data.keys():
if not(len(fields[v]['data'])):
fields[v]['data'] = data[v]
else:
fields[v]['data'] = row_stack(
fields[v]['data'], data[v])
else:
print('Variable '+v+' was not found in file!')
# mask NaNs
for v in varnames:
if not len(fields[v]['data']):
# Remove variable
fields.pop(v)
else:
fields[v]['data'] = np.ma.masked_equal(
fields[v]['data'], -99900.0)
[a, N_ranges] = fields[varnames[0]]['data'].shape
# create dictionaries according to pyART standard
latitude = {'data': np.asarray([data['latitude']]),
'units': data['lat_units']}
longitude = {'data': np.asarray([data['longitude']]),
'units': data['lon_units']}
altitude = {'data': np.asarray([data['altitude']]),
'units': data['alt_units']}
sweep_number = {'data': np.arange(0, len(all_files))}
sweep_mode = {'data': np.asarray([scan_type]*N_sweeps)}
instrument_parameters = {
'nyquist_velocity': {'data': np.asarray(nyquist)}}
azimuth = {'data': np.asarray(azimuths), 'units': data['azim_units']}
rrange = {'data': np.asarray(ranges),
'units': data['range_units']}
elevation = {'data': np.asarray(elevations),
'units': data['elev_units']}
sweep_start_ray_index['data'] = np.asarray(
sweep_start_ray_index['data'])
sweep_stop_ray_index['data'] = np.asarray(
sweep_stop_ray_index['data'])
time_units = 'seconds since ' + str(date)
time_data = {'data': data['time'], 'units': time_units}
# change keys to match pyART metranet keys
if pyrad_names:
fields_copy = deepcopy(fields)
for keys in fields_copy:
newkey = fields[keys]['standard_name']
fields[newkey] = fields.pop(keys)
# Create PyART instance
pyart.core.Radar.__init__(
self, time_data, rrange, fields, metadata, scan_type, latitude,
longitude, altitude, sweep_number, sweep_mode, fixed_angle,
sweep_start_ray_index, sweep_stop_ray_index, azimuth, elevation,
instrument_parameters=instrument_parameters)
# -------------------------- classes - IDL --------------------------- #
class pyrad_IDL(pyart.core.Radar):
def __init__(self, filename, field_names=None, max_range=np.Inf,
min_range=10000):
# find information based on filename
all_files = [filename]
fname_basename = os.path.basename(filename)
fname = netCDF4.Dataset(filename)
if 'PPI' in fname_basename:
scan_type = 'ppi'
elif 'RHI' in fname_basename:
scan_type = 'rhi'
strdate = re.findall(r"([0-9]{8}-[0-9]{6})", fname_basename)[0]
date = datetime.datetime.strptime(strdate, '%Y%m%d-%H%M%S')
# if field name is None, take all available fields
if field_names is None:
field_names = list(fname.variables.keys())
# convert fieldname if necessary
varnames = []
for fieldname in field_names:
newname = convert_polvar_name('IDL', fieldname)
varnames.append(newname)
# get labels, units etc
long_names = []
standard_names = []
units = []
vmin = []
vmax = []
for varname in varnames:
metadata = generate_polvar_metadata(varname)
standard_names.append(metadata['standard_name'])
long_names.append(metadata['long_name'])
units.append(metadata['units'])
vmin.append(metadata['valid_min'])
vmax.append(metadata['valid_max'])
# initiate empty vectors
N_sweeps = len(all_files)
fields = {}
fixed_angle = {}
fixed_angle['data'] = np.zeros(N_sweeps, )
sweep_start_ray_index = {}
sweep_start_ray_index['data'] = []
sweep_stop_ray_index = {}
sweep_stop_ray_index['data'] = []
for i, k in enumerate(varnames):
fields[k] = {}
fields[k]['data'] = []
fields[k]['long_name'] = long_names[i]
fields[k]['standard_name'] = standard_names[i]
fields[k]['units'] = units[i]
fields[k]['valid_min'] = vmin[i]
fields[k]['valid_max'] = vmax[i]
idx_start = 0
idx_stop = 0
elevations = []
azimuths = []
nyquist = []
# read data and create dictionaries
for i in range(N_sweeps):
metadata, data = readIDLRadData(
all_files[i], varnames, max_range)
if scan_type == 'rhi':
fixed_angle['data'][i] = np.round(np.mean(data['azimuth']))
elif scan_type == 'ppi':
fixed_angle['data'][i] = data['elevation'][0]
[N_az, N_ranges] = data[varnames[0]].shape
idx_stop = idx_start + N_az - 1
sweep_start_ray_index['data'].append(idx_start)
sweep_stop_ray_index['data'].append(idx_stop)
idx_start = idx_stop + 1
elevations.extend([data['elevation'][0]]*N_az)
nyquist.extend([data['nyquist_vel']]*N_az)
azimuths.extend(list(data['azimuth']))
warnings.warn("Warning, sweep rank could not be found, using first rank")
starttime, endtime = findTimes(1)
interval = ((endtime-starttime)/N_az)
#time_lapse = np.arange(starttime+(0.5*interval), endtime, interval)
# because this is a single sweep
time_lapse = np.around(
np.arange(0.+(0.5*interval), endtime-starttime, interval))
for j, v in enumerate(varnames):
if v in data.keys():
if fields[v]['data'].size == 0:
fields[v]['data'] = data[v]
else:
fields[v]['data'] = row_stack(
fields[v]['data'], data[v])
else:
print('Variable '+v+' was not found in file!')
# mask NaNs
for v in varnames:
fields[v]['data'] = np.ma.masked_equal(
fields[v]['data'], -99900.0)
[a, N_ranges] = fields[varnames[0]]['data'].shape
# create dictionaries according to pyART standard
latitude = {'data': np.asarray([data['latitude']]),
'units': data['lat_units']}
longitude = {'data': np.asarray([data['longitude']]),
'units': data['lon_units']}
altitude = {'data': np.asarray([data['altitude']]),
'units': data['alt_units']}
sweep_number = {'data': np.arange(0, len(all_files))}
sweep_mode = {'data': np.asarray([scan_type]*N_sweeps)}
instrument_parameters = {
'nyquist_velocity': {'data': np.asarray(nyquist)}}
azimuth = {'data': np.asarray(azimuths), 'units': data['azim_units']}
rrange = {'data': np.arange(N_ranges)*data['resolution'],
'units': data['range_units']}
elevation = {'data': np.asarray(elevations),
'units': data['elev_units']}
sweep_start_ray_index['data'] = np.asarray(
sweep_start_ray_index['data'])
sweep_stop_ray_index['data'] = np.asarray(
sweep_stop_ray_index['data'])
time_units = 'seconds since ' + str(date)
time_lapse = np.asarray(time_lapse)
time_data = {'data': time_lapse, 'units': time_units}
# change keys to match pyART metranet keys
fields_copy = deepcopy(fields)
for keys in fields_copy:
newkey = fields[keys]['standard_name']
fields[newkey] = fields.pop(keys)
# Create PyART instance
pyart.core.Radar.__init__(
self, time_data, rrange, fields, metadata, scan_type, latitude,
longitude, altitude, sweep_number, sweep_mode, fixed_angle,
sweep_start_ray_index, sweep_stop_ray_index, azimuth, elevation,
instrument_parameters=instrument_parameters)
# -------------------------- classes - MCH --------------------------- #
class pyrad_MCH(pyart.core.Radar):
def __init__(self, filename, field_names=None, max_range=np.Inf):
# find information based on filename
all_files = [filename]
N_sweeps = len(all_files)
fname_basename = os.path.basename(filename)
# Get name of radar
index_letter = fname_basename[2]
radar_info = generate_radar_table(index_letter)
radar_name = radar_info['radarID']
# Get radar resolution
if fname_basename[1] == 'L':
rres = 500.
else:
rres = 83.3
scan_type = 'ppi'
scandate = datetime.datetime.strptime(
fname_basename[3:12], '%y%j%H%M')
self.scan_date = scandate.timetuple()
# if field name is None, take all available fields
if field_names is None:
field_names = ['Z', 'ZDR', 'ZV', 'V', 'W', 'RHO', 'CLUT', 'PHIDP']
# convert fieldname if necessary
varnames = []
for fieldname in field_names:
newname = convert_polvar_name('MCH', fieldname)
varnames.append(newname)
# get labels, units etc
long_names = []
standard_names = []
units = []
vmin = []
vmax = []
for varname in varnames:
metadata = generate_polvar_metadata(varname)
standard_names.append(metadata['standard_name'])
long_names.append(metadata['long_name'])
units.append(metadata['units'])
vmin.append(metadata['valid_min'])
vmax.append(metadata['valid_max'])
# initiate empty vectors
fields = {}
fixed_angle = {}
fixed_angle['data'] = np.zeros(N_sweeps, )
sweep_start_ray_index = {}
sweep_start_ray_index['data'] = []
sweep_stop_ray_index = {}
sweep_stop_ray_index['data'] = []
for i, k in enumerate(varnames):
fields[k] = {}
fields[k]['data'] = []
fields[k]['long_name'] = long_names[i]
fields[k]['standard_name'] = standard_names[i]
fields[k]['units'] = units[i]
fields[k]['valid_min'] = vmin[i]
fields[k]['valid_max'] = vmax[i]
# Initialize
idx_start = 0
idx_stop = 0
elevations = []
azimuths = []
nyquist = []
time_lapse = []
# read and organise data
for i in range(N_sweeps):
data = readCHRadData(
all_files[i], radar_name, varnames, rres, max_range)
fixed_angle['data'][i] = data['elevation']
[N_ranges, N_az] = data[varnames[0]].shape
idx_stop = idx_start + N_az - 1
sweep_start_ray_index['data'].append(idx_start)
sweep_stop_ray_index['data'].append(idx_stop)
idx_start = idx_stop + 1
elevations.extend([data['elevation']]*N_az)
nyquist.extend([data['nyquist_vel']]*N_az)
azimuths.extend(list(data['azimuth']))
# create list of times at the center of each ray
sweep_rank = 1
print()
starttime, endtime = findTimes(sweep_rank)
interval = ((endtime-starttime)/len(list(data['azimuth'])))
time_lapse.extend(np.arange(
starttime+(0.5*interval), endtime, interval))
for j, v in enumerate(varnames):
if fields[v]['data'].size == 0:
fields[v]['data'] = data[v].T
else:
fields[v]['data'] = row_stack(
fields[v]['data'], data[v].T)
# mask nans
for v in varnames:
fields[v]['data'] = np.ma.array(
fields[v]['data'], mask=np.isnan(fields[v]['data']))
sweep_start_ray_index['data'] = np.asarray(
sweep_start_ray_index['data'])
sweep_stop_ray_index['data'] = np.asarray(
sweep_stop_ray_index['data'])
metadata = {}
[a, N_ranges] = fields[varnames[0]]['data'].shape
latitude = {'data': np.array([radar_info['coordinates'][0]]),
'units': "DegreesNorth"}
longitude = {'data': np.array([radar_info['coordinates'][1]]),
'units': "DegreesEast"}
altitude = {'data': np.array([radar_info['altitude']]),
'units': "MetersAboveSeaLevel"}
sweep_number = {'data': np.arange(0, len(all_files))}
sweep_mode = {'data': np.asarray(['ppi']*N_sweeps)}
instrument_parameters = {
'nyquist_velocity': {'data': np.array(nyquist)}}
metadata['Source'] = (
"Operational radar data processed at MeteoSwiss Locarno-Monti")
metadata['Institution'] = (
"MeteoSwiss, MDR, Locarno-Monti, Switzerland")
metadata['History'] = [
"created: %s, " % time.ctime(os.path.getctime(filename)) +
"last modified: %s" % time.ctime(os.path.getmtime(filename))]
metadata['ContactInformation'] = "marc.schneebeli@meteosvizzera.ch"
azimuth = {'data': np.array(azimuths), 'units': "Degrees"}
rrange = {'data': np.arange(N_ranges)*data['resolution'],
'units': "Meters"}
elevation = {'data': np.array(elevations), 'units': "Degrees"}
time_units = 'seconds since '+str(scandate)
time_lapse = np.asarray(time_lapse)
scantime = {'data': time_lapse, 'units': time_units}
# change keys to match pyART metranet keys
fields_copy = deepcopy(fields)
for keys in fields_copy:
newkey = fields[keys]['standard_name']
fields[newkey] = fields.pop(keys)
# Create PyART instance
pyart.core.Radar.__init__(
self, scantime, rrange, fields, metadata, scan_type, latitude,
longitude, altitude, sweep_number, sweep_mode, fixed_angle,
sweep_start_ray_index, sweep_stop_ray_index, azimuth, elevation,
instrument_parameters=instrument_parameters)
# ----------------------- utilities - read --------------------- #
def row_stack(a1, a2):
"""
Stacks data from subsequent sweeps, while padding "empty" columns from
subsequent sweeps.
Inputs
------
a1: np.array
destination array
a2: np.array
array which is added onto the first array
Returns
-------
out: np.array
stacked destination and additional array, with uniform shape
"""
[N1, M1] = a1.shape
[N2, M2] = a2.shape
if M1 > M2:
a2 = np.pad(a2, ((0, 0), (0, M1-M2)), mode='constant',
constant_values=-9999999)
elif M2 < M1:
a1 = np.pad(a2, ((0, 0), (0, M2-M1)), mode='constant',
constant_values=-9999999)
out = np.vstack((a1, a2))
out[out == -9999999] = np.nan
return out
def findTimes(num_sweep):
"""
Finds the times at the beginning and at the end of each sweep. Information
comes from the elapsed time since the beginning of the volume scan, from
the Rad4Alp: Specifications/ Request for Proposal (RFP) document.
Inputs
------
num_sweep: int
rank of the sweep
Returns
-------
elapsed_times[num_sweep][0]: float
the elapsed time since the beginning of the volume scan at the
beginning of the sweep
elapsed_times[num_sweep][1]: float
the elapsed time since the beginning of the volume scan at the end of
the sweep
"""
elapsed_times = {9: [0, 11.4],
7: [11.4, 22.8],
5: [22.8, 39.2],
3: [39.3, 60.5],
1: [60.5, 84.7],
19: [84.7, 97.2],
17: [97.2, 109.6],
15: [109.6, 121.6],
13: [121.6, 133.1],
11: [133.1, 144.4],
10: [144.4, 155.8],
8: [155.8, 172.2],
6: [172.2, 188.6],
4: [188.6, 204.9],
2: [204.9, 229.4],
20: [229.4, 241.9],
18: [241.9, 254.4],
16: [254.4, 266.6],
14: [266.6, 278.3],
12: [278.3, 289.9]}
return elapsed_times[num_sweep][0], elapsed_times[num_sweep][1]
def int2float_radar(data, varname, index_angle):
"""
Converts radar moments from bit to float
Inputs
------
data: np.array
moment data as loaded from h5 file
varname: str
name of the moment (i.e. 'ZH')
index_angle: int
rank of the sweep-1 (converted to base 0)
Returns
-------
output: np.array
moment data converted to float
"""
varname = convert_polvar_name('metranet', varname)
NYQUIST_VEL = get_mymetadata('nyq_vel')
output = np.zeros(data.shape)
if varname in ['ZH', 'ZV', 'Z', 'ZHC']:
output[data != 0] = (data[data != 0]-64)*0.5
output[data == 0] = float('nan')
elif varname == 'VEL':
output[data != 0] = (data[data != 0]-128)/127*NYQUIST_VEL[index_angle]
output[data == 0] = float('nan')
elif varname == 'WID':
output = data/255*NYQUIST_VEL[index_angle]
elif varname in ['ZDR', 'ZDRC']:
output[data != 0] = data[data != 0]*1.0/16.1259842 - 7.9375
output[data == 0] = float('nan')
elif varname == 'RHO':
output[data != 0] = 1.003-10**(-(data[data != 0]-1.0)/100)
output[data == 0] = float('nan')
elif varname == 'PHI':
output[data != 0] = (data[data != 0]-32768)/32767*180
output[data == 0] = float('nan')
elif varname == 'CLUT':
output = data
else:
output = data
warnings.warn(
("Warning, %s was not found and could not be converted")
% (varname))
return output
def readMXPOLRadData(filename, variableList, max_range=np.Inf, min_range=0):
"""
Reads a netcdf containing processed radar data in polar coordinates
Parameters
----------
filename: str
complete path of the file
variableList: list
list of variables to be read
Returns
-------
varPol: dict
dictionary containing the variables, the azimuth and the range
metadata: dict
dictionary containing the metadata of the file
"""
varPol = {}
metadata = {}
ncid = netCDF4.Dataset(filename)
time_data = ncid.variables['Time']
time_data -= time_data[0] # To get time in seconds from beginning of scan
rrange = ncid.variables['Range'][:]
# Get indexes between min_range and max_range
idx2keep = np.where(np.logical_and(
rrange < max_range, rrange > min_range))[0]
rrange = rrange[idx2keep]
# Get variables in polar coordinates
for varname in variableList:
try:
varPol[varname] = ncid.variables[varname][:].T
except:
pass
varPol['resolution'] = ncid.__dict__['RangeResolution-value']
varPol['range'] = rrange
varPol['range_units'] = ncid.__dict__['RangeResolution-unit']
varPol['azimuth'] = ncid.variables['Azimuth'][:]
try:
varPol['azim_units'] = ncid.__dict__['Azimuth-unit']
except KeyError:
varPol['azim_units'] = ncid.variables['Azimuth'].Units
varPol['elevation'] = ncid.variables['Elevation'][:]
try:
varPol['elev_units'] = ncid.__dict__['Elevation-unit']
except KeyError:
varPol['elev_units'] = ncid.variables['Elevation'].Units
varPol['nyquist_vel'] = ncid.__dict__['NyquistVelocity-value']
varPol['longitude'] = ncid.__dict__['Longitude-value']
varPol['lon_units'] = ncid.__dict__['Longitude-unit']
varPol['latitude'] = ncid.__dict__['Latitude-value']
varPol['lat_units'] = ncid.__dict__['Latitude-unit']
varPol['altitude'] = ncid.__dict__['Altitude-value']
varPol['alt_units'] = ncid.__dict__['Altitude-unit']
varPol['time'] = time_data
metadata['Source'] = ncid.__dict__['Source']
metadata['Institution'] = ncid.__dict__['Institution']
metadata['History'] = ncid.__dict__['History']
metadata['ContactInformation'] = ncid.__dict__['ContactInformation']
# Close netcdf
ncid.close()
return metadata, varPol
def readIDLRadData(filename, variableList, max_range=np.Inf, min_range=0):
"""
Reads a netcdf containing IDL processed radar data in polar coordinates
Parameters
----------
filename: str
complete path of the file
variableList: list
list of variables to be read
Returns
-------
varPol: dict
dictionary containing the variables, the azimuth and the range
metadata: dict
dictionary containing the metadata of the file
"""
varPol = {}
metadata = {}
ncid = netCDF4.Dataset(filename)
time_data = ncid.variables['Time']
time_data -= time_data[0] # To get time in seconds from beginning of scan
rrange = ncid.variables['Range'][:]
# Get indexes between min_range and max_range
idx2keep = np.where(np.logical_and(
rrange < max_range, rrange > min_range))[0]
rrange = rrange[idx2keep]
# Get variables in polar coordinates
for varname in variableList:
try:
varPol[varname] = ncid.variables[varname][:].T
except:
pass
varPol['resolution'] = ncid.__dict__['RangeResolution-value']
varPol['range'] = rrange
varPol['range_units'] = ncid.__dict__['RangeResolution-unit']
# because this data seems to be on -180 to 180
#varPol['azimuth'] = (ncid.variables['Azimuth'][:] + 180)%360
varPol['azimuth'] = ncid.variables['Azimuth'][:]
try:
varPol['azim_units'] = ncid.__dict__['Azimuth-unit']
except KeyError:
varPol['azim_units'] = ncid.variables['Azimuth'].Units
varPol['elevation'] = ncid.variables['Elevation'][:]
try:
varPol['elev_units'] = ncid.__dict__['Elevation-unit']
except KeyError:
varPol['elev_units'] = ncid.variables['Elevation'].Units
varPol['nyquist_vel'] = ncid.__dict__['NyquistVelocity-value']
varPol['longitude'] = ncid.__dict__['Longitude-value']
varPol['lon_units'] = ncid.__dict__['Longitude-unit']
varPol['latitude'] = ncid.__dict__['Latitude-value']
varPol['lat_units'] = ncid.__dict__['Latitude-unit']
varPol['altitude'] = ncid.__dict__['Altitude-value']
varPol['alt_units'] = ncid.__dict__['Altitude-unit']
varPol['time'] = time_data
metadata['Source'] = ncid.__dict__['Source']
metadata['Institution'] = ncid.__dict__['Institution']
metadata['History'] = ncid.__dict__['History']
metadata['ContactInformation'] = ncid.__dict__['ContactInformation']
# Close netcdf
ncid.close()
return metadata, varPol
def readCHRadData(filename, radar_name, variableList, radial_resolution,
max_range=np.Inf, min_range=0):
"""
Reads a HDF5 file containing processed radar data in polar coordinates
Parameters
----------
filename: str
complete path of the file
radar_name: str
name of MCH radar
variableList: list
list of variables to be read
radial_resolution: float
resolution of the radar in metres (i.e. high: 83.3, low: 500.)
max_range: float
maximum range upto which to read data
min_range: float
mimimum range from which to read data
Returns
-------
varPol: dict
the projected variables, the azimuth and the range
"""
# check that h5py library is available
if not _H5PY_AVAILABLE:
raise MissingOptionalDependency(
"h5py is required to use readCHRadData but is not installed")
varPol = {}
h5id = h5py.File(filename, 'r')
ELEVATION_ANGLES = get_elevation_metadata(radar_name)
radar_info = generate_radar_table(radar_name)
ANG_RES = radar_info['dbbeam']
NYQUIST_VEL = get_mymetadata('nyq_vel')
# Get dimensions
siz = h5id['moments']['Z'].shape
rng = np.arange(0, siz[1])*radial_resolution
idx2keep = np.where(np.logical_and(
rng < max_range, rng > min_range))[0]
rng = rng[idx2keep]
azimuth = np.arange(0, siz[0])*ANG_RES
index_angle = int(re.findall(r"\.([0-9]{3})\.", filename)[0])-1
elevation = ELEVATION_ANGLES[index_angle]
# Get variables in polar coordinates
for varname in variableList:
varname = convert_polvar_name('MCH', varname)
data = []
data = h5id['moments'][varname][:].T
data = np.asarray(data)
data = data.astype(float)
clut = h5id['moments']['CLUT'][:].T
data[clut >= 100] = float('nan') # Remove clutter
data = data[idx2keep, :]
varPol[varname] = int2float_radar(data, varname, index_angle)
varPol['resolution'] = rng[3]-rng[2]
varPol['range'] = rng
varPol['azimuth'] = azimuth
varPol['elevation'] = elevation
varPol['nyquist_vel'] = NYQUIST_VEL[index_angle]
# Close netcdf
h5id.close()
return varPol
# ------------------------ utilities - config ------------------------- #
_dirname = os.path.dirname(__file__)
_DEFAULT_CONFIG_FILE = os.path.join(_dirname, 'mxpol_config.py')
def load_myconfig(filename=None):
"""
Load configuration from a config file.
Parameters
----------
filename: str
Filename of the configuration file. If None the default configuration
file is loaded from the directory.
Returns
-------
_DEFAULT_METADATA: dict
Dictionary with metadata
"""
if filename is None:
filename = _DEFAULT_CONFIG_FILE
# private:
global cfile
global _DEFAULT_POLARNAMES
global _DEFAULT_METADATA
global _DEFAULT_RADAR_INFO
cfile = imp.load_source('metadata_config', filename)
_DEFAULT_METADATA = cfile.MY_METADATA
_DEFAULT_POLARNAMES = cfile.MY_POLARNAMES
_DEFAULT_RADAR_INFO = cfile.RADAR_INFO
return _DEFAULT_METADATA
def get_mymetadata(p, filename=None):
"""
Return a dictionary of metadata for a given parameter, p.
An empty dictionary will be returned if no metadata dictionary exists for
parameter p.
Parameters
----------
p: str
parameter name (i.e. Polvar) for which to return metadata
filename: str
Filename of the configuration file. If None the default configuration
file is loaded from the directory.
Returns
-------
_DEFAULT_METADATA[p].copy(): dict
a copy of the parameter of interest from the metadata dictionary
"""
load_myconfig(filename=filename)
if p in _DEFAULT_METADATA:
return _DEFAULT_METADATA[p].copy()
return {}
def get_elevation_metadata(radarname, filename=None):
"""
Gets the elevation angles for each sweep from the configuration file
Inputs
------
radarname: str
name of the radar for which to retrieve elevation angles
filename: str
name of the configuration file, if None, the default configuration
file is used
Returns
-------
_DEFAULT_RADAR_INFO['elevations'][radarname]: list
list of elevation angles in degrees
or None if not available
"""
load_myconfig(filename=filename)
if radarname in _DEFAULT_RADAR_INFO['elevations']:
return _DEFAULT_RADAR_INFO['elevations'][radarname]
else:
print(("no elevation angles in configfile for radar %s") % (radarname))
def generate_radar_table(radarname, filename=None):
"""
Generates a table with basic radar info, based on the given (or default)
configfile
Parameters
----------
radarname: str
name of the radar (i.e. 'ALB' or 'A', 'MXPOL' etc)
filename: str
path and name of the configfile, if None, the default configfile is
used
Returns
-------
radar_table: dict
table containing basic radar info
"""
load_myconfig(filename=filename)
if radarname in _DEFAULT_RADAR_INFO['radarID']:
radarname = _DEFAULT_RADAR_INFO['radarID'][radarname]
radar_table = get_mymetadata('Radar_info', filename=filename)
for key in radar_table:
if key in _DEFAULT_RADAR_INFO:
radar_table[key] = _DEFAULT_RADAR_INFO[key][radarname]
else:
radar_table[key] = None
return radar_table
return None
def generate_polvar_metadata(polvar, filename=None):
"""
Generates a dictionary with metadata for a polarimetric variable
Parameters
----------
polvar: str
polatimetric variable of interest
filename: str
Filename of the configuration file. If None the default configuration
file is loaded from the directory.
Returns
-------
polvar_metadata: dict
dictionary with metatdata for polarimetric variable of interest
"""
load_myconfig(filename=filename)
polvar = convert_polvar_name('LTE', polvar)
if polvar in _DEFAULT_POLARNAMES:
(standard_name, long_name, units, valid_min, valid_max,
plot_interval) = _DEFAULT_POLARNAMES[polvar]
else:
(standard_name, long_name, units, valid_min, valid_max,
plot_interval) = None, None, None, None, None, None
polvar_metadata = get_mymetadata('Polvar', filename)
polvar_metadata['units'] = units
polvar_metadata['standard_name'] = standard_name
polvar_metadata['short_name'] = convert_polvar_name('MCH', polvar)
polvar_metadata['long_name'] = long_name
polvar_metadata['valid_min'] = valid_min
polvar_metadata['valid_max'] = valid_max
polvar_metadata['plot_interval'] = plot_interval
return polvar_metadata
def convert_polvar_name(convention, polvar):
"""
Finds the correct variable name for a given convention (MXPOL, MCH) and
a given variable name which was spelled with a different case or
according to a different convention. For example, MXPOL convention uses
'Z' for the reflectivity variable, but if a user inserted 'Zh' this
function will convert it to 'Z'.
Parameters
----------
convention : str, destination convention; either MCH or LTE
polvar : str, key of polarimetric variable to be converted
Returns
-------
mykey : str, polarimertric variable key as used within the ProfileLab
toolbox context
"""
# Generate dictionary for the conversion
metranet_list = [
'ZH', 'ZV', 'ZDR', 'PHI', 'VEL', 'VEL', 'WID', 'RHO', 'CLUT', 'MPH',
'STA1', 'STA2', 'WBN', 'ZHC', 'ZDRC', 'ZDRP', 'Kdpc', 'Rhohvc']
MCH_list = [
'Z', 'ZV', 'ZDR', 'PHIDP', 'V', 'V', 'W', 'RHO', 'CLUT', 'MPH', 'STA1',
'STA2', 'WBN', 'Zhc', 'Zdrc', 'Hydrometeor_type_from_Besic1', 'Kdpc', 'RHOC']
# ZhCorr and ZdrCorr have been changed to Zhc and Zdrc!
LTE_list = [
'Zh', 'Zv', 'Zdr', 'Phidp', 'RVel', 'Rvel', 'Sw', 'Rhohv', 'Clut', 'mph',
'sta1', 'sta2', 'wbn', 'Zhc', 'Zdrc', 'Hydroclass', 'Kdpc', 'Rhohvc']
IDL_list = [
'Zh', 'Zv', 'Zdr', 'Phidp_raw', 'V', 'V', 'W', 'uRhohv', 'CLUT', 'MPH', 'STA1',
'STA2', 'WBN', 'Zhc', 'Zdrc', 'TYPECLUS2', 'Kdpc', 'Rhohvc']
pyrad_list = [
'reflectivity', 'reflectivity_vv', 'differential_reflectivity',
'differential_phase', 'velocity', 'velocity', 'spectrum_width',
'uncorrected_cross_correlation_ratio', 'radar_echo_id', 'MPH',
'STA1', 'STA2', 'WBN', 'corrected_reflectivity',
'corrected_differential_reflectivity', 'radar_echo_classification',
'corrected_specific_differential_phase',
'corrected_cross_correlation_ratio']
convertkeys = {}
convertkeys['MCH'] = {}
convertkeys['LTE'] = {}
convertkeys['metranet'] = {}
convertkeys['IDL'] = {}
convertkeys['pyrad'] = {}
for i, MCH in enumerate(MCH_list):
convertkeys['MCH'][MCH] = [
LTE_list[i], metranet_list[i], IDL_list[i], pyrad_list[i]]
convertkeys['LTE'] = {}
for i, LTE in enumerate(LTE_list):
convertkeys['LTE'][LTE] = [
MCH_list[i], metranet_list[i], IDL_list[i], pyrad_list[i]]
for i, metranet in enumerate(metranet_list):
convertkeys['metranet'][metranet] = [
MCH_list[i], LTE_list[i], IDL_list[i], pyrad_list[i]]
for i, IDL in enumerate(IDL_list):
convertkeys['IDL'][IDL] = [
metranet_list[i], LTE_list[i], MCH_list[i], pyrad_list[i]]
for i, pyrad in enumerate(pyrad_list):
convertkeys['pyrad'][pyrad] = [
metranet_list[i], LTE_list[i], MCH_list[i], IDL_list[i]]
# translate between conventions
mykey = polvar
for key, value in convertkeys[convention].items():
if polvar in value:
mykey = key
break
return mykey
|
{"hexsha": "c8db5e6a5f7700a4bf84b2a63a7247c6274d7867", "size": 37565, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/pyrad_proc/pyrad/io/read_data_mxpol.py", "max_stars_repo_name": "jfigui/pyrad", "max_stars_repo_head_hexsha": "7811d593bb09a7f8a621c0e8ae3f32c2b85a0254", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 41, "max_stars_repo_stars_event_min_datetime": "2016-12-01T08:46:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-24T21:14:33.000Z", "max_issues_repo_path": "src/pyrad_proc/pyrad/io/read_data_mxpol.py", "max_issues_repo_name": "jfigui/pyrad", "max_issues_repo_head_hexsha": "7811d593bb09a7f8a621c0e8ae3f32c2b85a0254", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 42, "max_issues_repo_issues_event_min_datetime": "2017-02-23T14:52:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-01T10:43:52.000Z", "max_forks_repo_path": "src/pyrad_proc/pyrad/io/read_data_mxpol.py", "max_forks_repo_name": "jfigui/pyrad", "max_forks_repo_head_hexsha": "7811d593bb09a7f8a621c0e8ae3f32c2b85a0254", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2016-08-25T15:02:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-27T04:09:40.000Z", "avg_line_length": 34.6860572484, "max_line_length": 87, "alphanum_fraction": 0.5816052176, "include": true, "reason": "import numpy", "num_tokens": 9397}
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import io
import sys
import logging
import subprocess
import shutil
import jaydebeapi
import jpype
import base64
import re
import json
import ssl
import requests
import pendulum
from itertools import zip_longest
from requests_kerberos import HTTPKerberosAuth
from requests.auth import HTTPBasicAuth
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES, PKCS1_OAEP
from subprocess import Popen, PIPE
from ConfigReader import configuration
import mysql.connector
from mysql.connector import errorcode
from datetime import date, datetime, time, timedelta, timezone
from dateutil import *
from dateutil.tz import *
import pandas as pd
import numpy as np
from sourceSchemaReader import schemaReader
from common.Singleton import Singleton
from common import constants as constant
from DBImportConfig import decryption as decryption
from common.Exceptions import *
import sqlalchemy as sa
from sqlalchemy.ext.automap import automap_base
from sqlalchemy_utils import create_view
from sqlalchemy_views import CreateView, DropView
from sqlalchemy.sql import text, alias, select
from sqlalchemy.orm import aliased, sessionmaker, Query
import pymongo
class config(object, metaclass=Singleton):
def __init__(self, Hive_DB=None, Hive_Table=None):
logging.debug("Executing common_config.__init__()")
self.Hive_DB = None
self.Hive_Table = None
if Hive_DB != None and self.Hive_DB == None:
self.Hive_DB = Hive_DB
if Hive_Table != None and self.Hive_Table == None:
self.Hive_Table = Hive_Table
self.mysql_conn = None
self.mysql_cursor = None
self.tempdir = None
self.startDate = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
self.dbAlias = None
# Variables used in lookupConnectionAlias
self.db_mssql = False
self.db_oracle = False
self.db_mysql = False
self.db_postgresql = False
self.db_progress = False
self.db_db2udb = False
self.db_db2as400 = False
self.db_mongodb = False
self.db_cachedb = False
self.jdbc_url = None
self.jdbc_hostname = None
self.jdbc_port = None
self.jdbc_database = None
self.jdbc_username = None
self.jdbc_password = None
self.jdbc_classpath = None
self.jdbc_driver = None
self.jdbc_driver_for_python = None
self.jdbc_classpath_for_python = None
self.jdbc_ad_domain = None
self.jdbc_encrypt = None
self.jdbc_encrypt_string = None
self.jdbc_trustedservercert = None
self.jdbc_trustedservercert_password = None
self.jdbc_hostincert = None
self.jdbc_logintimeout = None
self.jdbc_servertype = None
self.jdbc_oracle_sid = None
self.jdbc_oracle_servicename = None
self.jdbc_force_column_lowercase = None
self.jdbc_environment = None
self.seedFile = None
self.mongoClient = None
self.mongoDB = None
self.mongoAuthSource = None
self.kerberosInitiated = False
self.sparkPathAppend = None
self.sparkPysparkSubmitArgs = None
self.sparkJarFiles = None
self.sparkPyFiles = None
self.sparkMaster = None
self.sparkDeployMode = None
self.sparkYarnQueue = None
self.sparkDynamicAllocation = None
self.sparkMinExecutors = None
self.sparkMaxExecutors = None
self.sparkExecutorMemory = None
self.sparkHDPversion = None
self.sparkHiveLibrary = None
self.sourceSchema = None
self.tableType = None
self.custom_max_query = None
self.source_columns_df = pd.DataFrame()
self.source_keys_df = pd.DataFrame()
self.JDBCConn = None
self.JDBCCursor = None
self.sourceSchema = schemaReader.source()
# SQLAlchemy connection variables
self.configDB = None
self.configDBSession = None
self.debugLogLevel = False
if logging.root.level == 10: # DEBUG
self.debugLogLevel = True
self.kerberosPrincipal = configuration.get("Kerberos", "principal")
self.kerberosKeytab = configuration.get("Kerberos", "keytab")
self.sparkPathAppend = configuration.get("Spark", "path_append")
self.sparkJarFiles = configuration.get("Spark", "jar_files")
self.sparkPyFiles = configuration.get("Spark", "py_files")
self.sparkMaster = configuration.get("Spark", "master")
self.sparkDeployMode = configuration.get("Spark", "deployMode")
self.sparkYarnQueue = configuration.get("Spark", "yarnqueue")
self.sparkExecutorMemory = configuration.get("Spark", "executor_memory")
self.sparkHDPversion = configuration.get("Spark", "hdp_version")
self.sparkHiveLibrary = configuration.get("Spark", "hive_library")
if configuration.get("Spark", "dynamic_allocation").lower() == "true":
self.sparkDynamicAllocation = True
else:
self.sparkDynamicAllocation = False
self.crypto = decryption.crypto()
# Sets and create a temporary directory
self.tempdir = "/tmp/dbimport." + str(os.getpid()) + ".tmp"
# If the temp dir exists, we just remove it as it's a leftover from another DBImport execution
try:
shutil.rmtree(self.tempdir)
except FileNotFoundError:
pass
except PermissionError:
logging.error("The temporary directory (%s) already exists but cant be removed due to permission error"%(self.tempdir))
sys.exit(1)
try:
os.mkdir(self.tempdir)
except OSError:
logging.error("Creation of the temporary directory %s failed" % self.tempdir)
sys.exit(1)
try:
os.chmod(self.tempdir, 0o700)
except OSError:
logging.error("Error while changing permission on %s to 700" % self.tempdir)
self.remove_temporary_files()
sys.exit(1)
# # Fetch configuration about HDFS
# Fetch configuration about MySQL database and how to connect to it
mysql_hostname = configuration.get("Database", "mysql_hostname")
mysql_port = configuration.get("Database", "mysql_port")
mysql_database = configuration.get("Database", "mysql_database")
mysql_username = configuration.get("Database", "mysql_username")
mysql_password = configuration.get("Database", "mysql_password")
# Esablish a connection to the DBImport database in MySQL
try:
self.mysql_conn = mysql.connector.connect(host=mysql_hostname,
port=mysql_port,
database=mysql_database,
user=mysql_username,
password=mysql_password)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
logging.error("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
logging.error("Database does not exist")
else:
logging.error("%s"%err)
logging.error("Error: There was a problem connecting to the MySQL database. Please check configuration and serverstatus and try again")
self.remove_temporary_files()
sys.exit(1)
else:
self.mysql_cursor = self.mysql_conn.cursor(buffered=True)
logging.debug("startDate = %s"%(self.startDate))
logging.debug("Executing common_config.__init__() - Finished")
def disconnectConfigDatabase(self):
""" Closing connection against config database. Only used during debug and development """
logging.debug("Connection against MySQL Config database have been closed")
self.mysql_cursor.close()
self.mysql_conn.close()
def reconnectConfigDatabase(self):
if self.mysql_conn.is_connected() == False:
logging.warn("Connection to MySQL have been lost. Reconnecting...")
# Fetch configuration about MySQL database and how to connect to it
mysql_hostname = configuration.get("Database", "mysql_hostname")
mysql_port = configuration.get("Database", "mysql_port")
mysql_database = configuration.get("Database", "mysql_database")
mysql_username = configuration.get("Database", "mysql_username")
mysql_password = configuration.get("Database", "mysql_password")
# Esablish a connection to the DBImport database in MySQL
try:
self.mysql_conn = mysql.connector.connect(host=mysql_hostname,
port=mysql_port,
database=mysql_database,
user=mysql_username,
password=mysql_password)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
logging.error("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
logging.error("Database does not exist")
else:
logging.error("%s"%err)
logging.error("Error: There was a problem connecting to the MySQL database. Please check configuration and serverstatus and try again")
self.remove_temporary_files()
sys.exit(1)
else:
self.mysql_cursor = self.mysql_conn.cursor(buffered=True)
def getAtlasJdbcConnectionData(self, dbAlias=None):
""" Reads the extended information in jdbc_connections needed by Atlas (contact_info, owner and more) """
logging.debug("Executing common_config.getAtlasJdbcConnectionData()")
if dbAlias == None:
dbAlias = self.dbAlias
# Fetch data from jdbc_connection table
query = "select contact_info, description, owner, atlas_discovery, atlas_include_filter, atlas_exclude_filter, atlas_last_discovery from jdbc_connections where dbalias = %s "
logging.debug("Executing the following SQL: %s" % (query))
self.mysql_cursor.execute(query, (dbAlias, ))
if self.mysql_cursor.rowcount != 1:
raise invalidConfiguration("The requested connection alias cant be found in the 'jdbc_connections' table")
row = self.mysql_cursor.fetchone()
returnDict = {}
returnDict["contact_info"] = row[0]
returnDict["description"] = row[1]
returnDict["owner"] = row[2]
returnDict["atlas_discovery"] = row[3]
returnDict["atlas_include_filter"] = row[4]
returnDict["atlas_exclude_filter"] = row[5]
returnDict["atlas_last_discovery"] = row[6]
if returnDict["contact_info"] == None: returnDict["contact_info"] = ""
if returnDict["description"] == None: returnDict["description"] = ""
if returnDict["owner"] == None: returnDict["owner"] = ""
logging.debug("Executing common_config.getAtlasJdbcConnectionData() - Finished")
return returnDict
def connectSQLAlchemy(self, exitIfFailure=True, logger=""):
log = logging.getLogger(logger)
""" Connects to the configuration database with SQLAlchemy """
if self.configDBSession != None:
# If we already have a connection, we just say that it's ok....
return True
self.connectStr = "mysql+pymysql://%s:%s@%s:%s/%s"%(
configuration.get("Database", "mysql_username"),
configuration.get("Database", "mysql_password"),
configuration.get("Database", "mysql_hostname"),
configuration.get("Database", "mysql_port"),
configuration.get("Database", "mysql_database"))
try:
self.configDB = sa.create_engine(self.connectStr, echo = self.debugLogLevel, pool_pre_ping=True)
self.configDB.connect()
self.configDBSession = sessionmaker(bind=self.configDB)
except sa.exc.OperationalError as err:
logging.error("%s"%err)
if exitIfFailure == True:
self.remove_temporary_files()
sys.exit(1)
else:
self.configDBSession = None
self.configDB= None
return False
except:
print("Unexpected error: ")
print(sys.exc_info())
if exitIfFailure == True:
self.remove_temporary_files()
sys.exit(1)
else:
self.configDBSession = None
self.configDB= None
return False
else:
return True
def setHiveTable(self, Hive_DB, Hive_Table):
""" Sets the parameters to work against a new Hive database and table """
self.Hive_DB = Hive_DB.lower()
self.Hive_Table = Hive_Table.lower()
def getMysqlCursor(self):
return self.mysql_cursor
def getMysqlConnector(self):
return self.mysql_conn
def remove_temporary_files(self):
logging.debug("Executing common_config.remove_temporary_files()")
# Remove the kerberos ticket file
if self.kerberosInitiated == True:
klistCommandList = ['kdestroy']
klistProc = subprocess.Popen(klistCommandList , stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdOut, stdErr = klistProc.communicate()
# We add this check just to make sure that self.tempdir contains a valid path and that no exception forces us to remove example /
if len(self.tempdir) > 13 and self.tempdir.startswith( '/tmp/' ):
try:
shutil.rmtree(self.tempdir)
except FileNotFoundError:
# This can happen as we might call this function multiple times during an error and we dont want an exception because of that
pass
logging.debug("Executing common_config.remove_temporary_files() - Finished")
def checkTimeWindow(self, connection_alias, atlasDiscoveryMode=False):
logging.debug("Executing common_config.checkTimeWindow()")
if atlasDiscoveryMode == False:
logging.info("Checking if we are allowed to use this jdbc connection at this time")
self.timeZone = self.getConfigValue("timezone")
query = "select timewindow_start, timewindow_stop, timewindow_timezone from jdbc_connections where dbalias = %s"
self.mysql_cursor.execute(query, (connection_alias, ))
logging.debug("SQL Statement executed: %s" % (self.mysql_cursor.statement) )
row = self.mysql_cursor.fetchone()
hour = row[0]
minute = row[1]
# If there is a local timezone set on the JDBC connection, we use that instead of the configured default timezone
if row[2] is not None and row[2].strip() != "":
self.timeZone = row[2]
currentTime = pendulum.now(self.timeZone) # Get time in configured timeZone
timeWindowStart = None
timeWindowStop = None
if hour != None:
if hour == timedelta(days=1):
hour = timedelta(days=0)
timeWindowStart = currentTime.set(
hour=int(str(hour).split(":")[0]),
minute=int(str(hour).split(":")[1]),
second=int(str(hour).split(":")[2]))
if row[1] != None:
timeWindowStop = currentTime.set(
hour=int(str(minute).split(":")[0]),
minute=int(str(minute).split(":")[1]),
second=int(str(minute).split(":")[2]))
passedMidnight = False
if timeWindowStart != None and timeWindowStop != None and timeWindowStart > timeWindowStop:
# This happens if we pass midnight
timeWindowStop = timeWindowStop.add(days=1)
passedMidnight = True
logging.debug("timeWindowStart: %s"%(timeWindowStart))
logging.debug("timeWindowStop: %s"%(timeWindowStop))
if timeWindowStart == None and timeWindowStop == None:
if atlasDiscoveryMode == False:
logging.info("SUCCESSFUL: This import is allowed to run at any time during the day.")
return True
elif timeWindowStart == None or timeWindowStop == None:
if atlasDiscoveryMode == False:
logging.error("Atleast one of the TimeWindow settings are NULL in the database. Only way to disable the Time Window")
logging.error("function is to put NULL into both columns. Otherwise the configuration is marked as invalid and will exit")
logging.error("as it's not running inside a correct Time Window.")
logging.error("Invalid TimeWindow configuration")
self.remove_temporary_files()
sys.exit(1)
else:
return False
elif timeWindowStart == timeWindowStop:
if atlasDiscoveryMode == False:
logging.error("The value in timewindow_start column is the same as the value in timewindow_stop.")
logging.error("Invalid TimeWindow configuration")
self.remove_temporary_files()
sys.exit(1)
else:
return False
else:
allowedTime = False
if currentTime > timeWindowStart and currentTime < timeWindowStop:
allowedTime = True
if passedMidnight == True:
# If we passed midnight, it means that we added one day to the stop time. But depending on what time it is, and when the
# window starts, we might be on the previous days window and should be allowed to run. So we need to test for that aswell
timeWindowStart = timeWindowStart.add(days=-1)
timeWindowStop = timeWindowStop.add(days=-1)
logging.debug("timeWindowStart: %s"%(timeWindowStart))
logging.debug("timeWindowStop: %s"%(timeWindowStop))
if currentTime > timeWindowStart and currentTime < timeWindowStop:
allowedTime = True
if allowedTime == False:
if atlasDiscoveryMode == False:
logging.error("We are not allowed to run this import outside the configured Time Window")
logging.info(" Current time: %s (%s)"%(currentTime.to_time_string(), self.timeZone))
logging.info(" TimeWindow Start: %s"%(timeWindowStart.to_time_string()))
logging.info(" TimeWindow Stop: %s"%(timeWindowStop.to_time_string()))
self.remove_temporary_files()
sys.exit(1)
else:
return False
else:
if atlasDiscoveryMode == False:
logging.info("SUCCESSFUL: There is a configured Time Window for this operation, and we are running inside that window.")
logging.debug(" currentTime = %s"%(currentTime.to_time_string()))
logging.debug(" timeWindowStart = %s"%(timeWindowStart.to_time_string()))
logging.debug(" timeWindowStop = %s"%(timeWindowStop.to_time_string()))
logging.debug("Executing common_config.checkTimeWindow() - Finished")
return True
def checkConnectionAlias(self, connection_alias):
""" Will check if the connection alias exists in the jdbc_connection table """
logging.debug("Executing common_config.checkConnectionAlias()")
RC = False
query = "select count(1) from jdbc_connections where dbalias = %s "
logging.debug("Executing the following SQL: %s" % (query))
self.mysql_cursor.execute(query, (connection_alias, ))
row = self.mysql_cursor.fetchone()
if row[0] == 1:
RC = True
logging.debug("Executing common_config.checkConnectionAlias() - Finished")
return RC
def checkKerberosTicket(self):
""" Checks if there is a valid kerberos ticket or not. Runst 'klist -s' and checks the exitCode. Returns True or False """
logging.debug("Executing common_config.checkKerberosTicket()")
os.environ["KRB5CCNAME"] = "FILE:/tmp/krb5cc_%s_%s"%(os.getuid(), os.getpid())
if self.kerberosPrincipal == "" or self.kerberosKeytab == "":
logging.error("The kerberos information is not correct in configuration file.")
self.remove_temporary_files()
sys.exit(1)
logging.info("Initialize Kerberos ticket")
self.kerberosInitiated = True
kinitCommandList = ['kinit', '-kt', self.kerberosKeytab, self.kerberosPrincipal]
kinitProc = subprocess.Popen(kinitCommandList , stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdOut, stdErr = kinitProc.communicate()
stdOut = stdOut.decode('utf-8').rstrip()
stdErr = stdErr.decode('utf-8').rstrip()
klistCommandList = ['klist', '-s']
klistProc = subprocess.Popen(klistCommandList , stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdOut, stdErr = klistProc.communicate()
stdOut = stdOut.decode('utf-8').rstrip()
stdErr = stdErr.decode('utf-8').rstrip()
if klistProc.returncode == 0:
return True
else:
return False
logging.debug("Executing common_config.checkKerberosTicket() - Finished")
def encryptString(self, strToEncrypt):
self.crypto.setPrivateKeyFile(configuration.get("Credentials", "private_key"))
self.crypto.setPublicKeyFile(configuration.get("Credentials", "public_key"))
encryptedStr = self.crypto.encrypt(strToEncrypt)
return(encryptedStr)
def encryptUserPassword(self, connection_alias, username, password):
# Fetch public/private key from jdbc_connection table if it exists
query = "select private_key_path, public_key_path from jdbc_connections where dbalias = %s "
logging.debug("Executing the following SQL: %s" % (query))
self.mysql_cursor.execute(query, (connection_alias, ))
if self.mysql_cursor.rowcount != 1:
raise invalidConfiguration("The requested connection alias cant be found in the 'jdbc_connections' table")
row = self.mysql_cursor.fetchone()
privateKeyFile = row[0]
publicKeyFile = row[1]
if privateKeyFile != None and publicKeyFile != None and privateKeyFile.strip() != '' and publicKeyFile.strip() != '':
self.crypto.setPrivateKeyFile(privateKeyFile)
self.crypto.setPublicKeyFile(publicKeyFile)
else:
self.crypto.setPrivateKeyFile(configuration.get("Credentials", "private_key"))
self.crypto.setPublicKeyFile(configuration.get("Credentials", "public_key"))
strToEncrypt = "%s %s\n"%(username, password)
encryptedStr = self.crypto.encrypt(strToEncrypt)
if encryptedStr != None and encryptedStr != "":
query = "update jdbc_connections set credentials = %s where dbalias = %s "
logging.debug("Executing the following SQL: %s" % (query))
self.mysql_cursor.execute(query, (encryptedStr, connection_alias))
self.mysql_conn.commit()
# decryptedStr = self.crypto.decrypt(encryptedStr)
#
# print("DECRYPTED VALUE USING STANDARD DECODING")
# print(decryptedStr)
def getJDBCDriverConfig(self, databaseType, version):
logging.debug("Executing common_config.getJDBCDriverConfig()")
query = "select driver, classpath from jdbc_connections_drivers where database_type = %s and version = %s"
logging.debug("Executing the following SQL: %s" % (query))
self.mysql_cursor.execute(query, (databaseType, version ))
if self.mysql_cursor.rowcount != 1:
raise invalidConfiguration("Error: Cant find JDBC driver with database_type as '%s' and version as '%s'"%(databaseType, version))
row = self.mysql_cursor.fetchone()
driver = row[0]
classPath = row[1]
if classPath == "add path to JAR file" or not classPath.startswith("/"):
raise invalidConfiguration("Error: You need to specify the full path to the JAR files in the table 'jdbc_connections_drivers'")
logging.debug("Executing common_config.getJDBCDriverConfig() - Finished")
return driver, classPath
def printConnectionAliasDetails(self):
logging.debug("Executing common_config.printConnectionAliasDetails()")
query = "select dbalias from jdbc_connections"
logging.debug("Executing the following SQL: %s" % (query))
self.mysql_cursor.execute(query, [])
counter = 0
for row in self.mysql_cursor.fetchall():
counter = counter + 1
try:
self.lookupConnectionAlias(connection_alias = row[0], exceptionIfFailureToDecrypt=False)
except:
pass
# print("%s -- Unable to decrypt password"%(row[0]))
# print("'%s','%s','%s','%s'"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_url))
else:
# print("'%s','%s','%s','%s','%s','%s'"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_hostname, self.jdbc_port, self.jdbc_database))
if self.jdbc_username == None:
self.jdbc_username = ""
if self.jdbc_password == None:
self.jdbc_password = ""
# print("£%s£¤£%s£¤£%s£¤£%s£"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_url))
print("£%s£¤£%s£¤£%s£"%(self.dbAlias, self.jdbc_username, self.jdbc_password))
if 0 == 1:
if "," in self.jdbc_username:
print("self.jdbc_username contains ,")
print("'%s','%s','%s','%s'"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_url))
if "'" in self.jdbc_username:
print("self.jdbc_username contains '")
print("'%s','%s','%s','%s'"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_url))
if "£" in self.jdbc_username:
print("self.jdbc_username contains £")
print("'%s','%s','%s','%s'"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_url))
if "¤" in self.jdbc_username:
print("self.jdbc_username contains ¤")
print("'%s','%s','%s','%s'"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_url))
# if "," in self.jdbc_password:
# print("self.jdbc_password contains ,")
# print("'%s','%s','%s','%s'"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_url))
if "'" in self.jdbc_password:
print("self.jdbc_password contains '")
print("'%s','%s','%s','%s'"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_url))
if "£" in self.jdbc_password:
print("self.jdbc_password contains £")
print("'%s','%s','%s','%s'"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_url))
if "¤" in self.jdbc_username:
print("self.jdbc_username contains ¤")
print("'%s','%s','%s','%s'"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_url))
if "," in self.jdbc_url:
print("self.jdbc_url contains ,")
print("'%s','%s','%s','%s'"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_url))
# if "'" in self.jdbc_url:
# print("self.jdbc_url contains '")
# print("'%s','%s','%s','%s'"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_url))
if "£" in self.jdbc_url:
print("self.jdbc_url contains £")
print("'%s','%s','%s','%s'"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_url))
if "¤" in self.jdbc_url:
print("self.jdbc_url contains ¤")
print("'%s','%s','%s','%s'"%(self.dbAlias, self.jdbc_username, self.jdbc_password, self.jdbc_url))
# print('.', end='', flush=True)
# if counter > 5:
# break
print()
logging.debug("Executing common_config.printConnectionAliasDetails() - Finished")
def lookupConnectionAlias(self, connection_alias, decryptCredentials=True, copySlave=False, exceptionIfFailureToDecrypt=True):
logging.debug("Executing common_config.lookupConnectionAlias()")
exit_after_function = False
self.dbAlias = connection_alias
self.atlasJdbcSourceSupport = False
self.db_mssql = False
self.db_oracle = False
self.db_mysql = False
self.db_postgresql = False
self.db_progress = False
self.db_db2udb = False
self.db_db2as400 = False
self.db_mongodb = False
self.db_cachedb = False
# Fetch data from jdbc_connection table
query = "select jdbc_url, credentials, private_key_path, public_key_path, environment from jdbc_connections where dbalias = %s "
logging.debug("Executing the following SQL: %s" % (query))
self.mysql_cursor.execute(query, (connection_alias, ))
if self.mysql_cursor.rowcount != 1:
raise invalidConfiguration("The requested connection alias cant be found in the 'jdbc_connections' table")
row = self.mysql_cursor.fetchone()
self.jdbc_url = row[0]
privateKeyFile = row[2]
publicKeyFile = row[3]
self.jdbc_environment = row[4]
if decryptCredentials == True and copySlave == False:
if privateKeyFile != None and publicKeyFile != None and privateKeyFile.strip() != '' and publicKeyFile.strip() != '':
self.crypto.setPrivateKeyFile(privateKeyFile)
self.crypto.setPublicKeyFile(publicKeyFile)
else:
self.crypto.setPrivateKeyFile(configuration.get("Credentials", "private_key"))
self.crypto.setPublicKeyFile(configuration.get("Credentials", "public_key"))
credentials = self.crypto.decrypt(row[1])
if credentials == None and exceptionIfFailureToDecrypt == True:
raise invalidConfiguration("Cant decrypt username and password. Check private/public key in config file")
if credentials != None:
self.jdbc_username = credentials.split(" ")[0]
self.jdbc_password = credentials.split(" ")[1]
# Sets a creates the password file that is used by sqoop and other tools
self.jdbc_password_file = self.tempdir + "/jdbc_passwd"
f = open(self.jdbc_password_file, "w")
f.write(self.jdbc_password)
f.close()
os.chmod(self.jdbc_password_file, 0o600)
else:
self.jdbc_username = None
self.jdbc_password = None
self.jdbc_password_file = None
else:
self.jdbc_username = None
self.jdbc_password = None
self.jdbc_password_file = None
# Lookup Connection details based on JDBC STRING for all different types we support
if self.jdbc_url.startswith( 'jdbc:sqlserver:'):
self.atlasJdbcSourceSupport = True
self.db_mssql = True
self.jdbc_servertype = constant.MSSQL
self.jdbc_driver, self.jdbc_classpath = self.getJDBCDriverConfig("SQL Server", "default")
self.jdbc_classpath_for_python = self.jdbc_classpath
self.jdbc_hostname = self.jdbc_url[17:].split(':')[0].split(';')[0]
try:
self.jdbc_port = self.jdbc_url[17:].split(':')[1].split(';')[0]
except:
self.jdbc_port = "1433"
if self.jdbc_port.isdigit() == False: self.jdbc_port = "1433"
try:
self.jdbc_database = self.jdbc_url.split("database=")[1].split(';')[0]
except:
logging.error("Cant determine database based on jdbc_string")
exit_after_function = True
try:
self.jdbc_encrypt = self.jdbc_url.split("encrypt=")[1].split(';')[0].lower()
if self.jdbc_encrypt == "true": self.jdbc_encrypt = True
except:
self.jdbc_encrypt = False
try:
self.jdbc_trustedservercert = self.jdbc_url.split("trustServerCertificate=")[1].split(';')[0]
except:
self.jdbc_trustedservercert = None
try:
self.jdbc_hostincert = self.jdbc_url.split("hostNameInCertificate=")[1].split(';')[0]
except:
self.jdbc_hostincert = None
try:
self.jdbc_logintimeout = self.jdbc_url.split("loginTimeout=")[1].split(';')[0]
except:
self.jdbc_logintimeout = None
# If all encrypt settings are available, we create an encryption string that will be used by the Schema Python Program
if self.jdbc_encrypt == True and self.jdbc_trustedservercert != None and self.jdbc_hostincert != None and self.jdbc_logintimeout != None:
self.jdbc_encrypt_string = "encrypt=true;trustServerCertificate=" + self.jdbc_trustedservercert + ";hostNameInCertificate=" + self.jdbc_hostincert + ";loginTimeout=" + self.jdbc_logintimeout
if self.jdbc_url.startswith( 'jdbc:jtds:sqlserver:'):
self.atlasJdbcSourceSupport = True
self.db_mssql = True
self.jdbc_servertype = constant.MSSQL
self.jdbc_force_column_lowercase = True
self.jdbc_driver, self.jdbc_classpath = self.getJDBCDriverConfig("SQL Server", "jTDS")
self.jdbc_classpath_for_python = self.jdbc_classpath
self.jdbc_hostname = self.jdbc_url[22:].split(':')[0].split(';')[0]
try:
self.jdbc_port = self.jdbc_url[22:].split(':')[1].split(';')[0]
except:
self.jdbc_port = "1433"
if self.jdbc_port.isdigit() == False: self.jdbc_port = "1433"
try:
self.jdbc_database = self.jdbc_url.split("databaseName=")[1].split(';')[0]
except:
logging.error("Cant determine database based on jdbc_string")
exit_after_function = True
if self.jdbc_url.startswith( 'jdbc:oracle:'):
self.atlasJdbcSourceSupport = True
self.db_oracle = True
self.jdbc_servertype = constant.ORACLE
self.jdbc_force_column_lowercase = False
self.jdbc_driver, self.jdbc_classpath = self.getJDBCDriverConfig("Oracle", "default")
self.jdbc_classpath_for_python = self.jdbc_classpath
self.jdbc_database = "-"
try:
self.jdbc_hostname = self.jdbc_url.split("(HOST=")[1].split(')')[0]
except:
logging.error("Cant determine hostname based on jdbc_string")
exit_after_function = True
try:
self.jdbc_port = self.jdbc_url.split("(PORT=")[1].split(')')[0]
except:
logging.error("Cant determine port based on jdbc_string")
exit_after_function = True
try:
self.jdbc_oracle_sid = self.jdbc_url.split("(SID=")[1].split(')')[0]
except:
self.jdbc_oracle_sid = None
try:
self.jdbc_oracle_servicename = self.jdbc_url.split("(SERVICE_NAME=")[1].split(')')[0]
except:
self.jdbc_oracle_servicename = None
if self.jdbc_oracle_sid == None and self.jdbc_oracle_servicename == None:
logging.error("Cant find either SID or SERVICE_NAME in Oracle URL")
exit_after_function = True
if self.jdbc_url.startswith( 'jdbc:mysql:'):
self.atlasJdbcSourceSupport = True
self.db_mysql = True
self.jdbc_servertype = constant.MYSQL
self.jdbc_force_column_lowercase = True
self.jdbc_driver, self.jdbc_classpath = self.getJDBCDriverConfig("MySQL", "default")
self.jdbc_classpath_for_python = self.jdbc_classpath
self.jdbc_hostname = self.jdbc_url[13:].split(':')[0].split(';')[0].split('/')[0]
try:
self.jdbc_port = self.jdbc_url[13:].split(':')[1].split('/')[0]
except:
self.jdbc_port = "3306"
if self.jdbc_port.isdigit() == False: self.jdbc_port = "3306"
try:
self.jdbc_database = self.jdbc_url[13:].split('/')[1].split(';')[0].split('?')[0]
except:
logging.error("Cant determine database based on jdbc_string")
exit_after_function = True
if self.jdbc_url.startswith( 'jdbc:postgresql:'):
self.atlasJdbcSourceSupport = True
self.db_postgresql = True
self.jdbc_servertype = constant.POSTGRESQL
self.jdbc_force_column_lowercase = True
self.jdbc_driver, self.jdbc_classpath = self.getJDBCDriverConfig("PostgreSQL", "default")
self.jdbc_classpath_for_python = self.jdbc_classpath
self.jdbc_hostname = self.jdbc_url[18:].split(':')[0].split(';')[0].split('/')[0]
try:
self.jdbc_port = self.jdbc_url[18:].split(':')[1].split('/')[0]
except:
self.jdbc_port = "5432"
if self.jdbc_port.isdigit() == False: self.jdbc_port = "5432"
try:
self.jdbc_database = self.jdbc_url[18:].split('/')[1].split(';')[0]
except:
logging.error("Cant determine database based on jdbc_string")
exit_after_function = True
if self.jdbc_url.startswith( 'jdbc:datadirect:openedge:'):
self.atlasJdbcSourceSupport = True
self.db_progress = True
self.jdbc_servertype = constant.PROGRESS
self.jdbc_force_column_lowercase = True
self.jdbc_driver, self.jdbc_classpath = self.getJDBCDriverConfig("Progress DB", "default")
self.jdbc_classpath_for_python = self.jdbc_classpath
self.jdbc_hostname = self.jdbc_url[27:].split(':')[0].split(';')[0]
try:
self.jdbc_port = self.jdbc_url[27:].split(':')[1].split(';')[0]
except:
self.jdbc_port = "9999"
if self.jdbc_port.isdigit() == False: self.jdbc_port = "9999"
try:
self.jdbc_database = self.jdbc_url.split("databaseName=")[1].split(';')[0]
except:
logging.error("Cant determine database based on jdbc_string")
exit_after_function = True
if self.jdbc_url.startswith( 'jdbc:db2://'):
self.atlasJdbcSourceSupport = True
self.db_db2udb = True
self.jdbc_servertype = constant.DB2_UDB
self.jdbc_force_column_lowercase = False
self.jdbc_driver, self.jdbc_classpath = self.getJDBCDriverConfig("DB2 UDB", "default")
self.jdbc_classpath_for_python = self.jdbc_classpath
self.jdbc_hostname = self.jdbc_url[11:].split(':')[0].split(';')[0].split('/')[0]
try:
self.jdbc_port = self.jdbc_url[11:].split(':')[1].split('/')[0]
except:
self.jdbc_port = "50000"
if self.jdbc_port.isdigit() == False: self.jdbc_port = "50000"
try:
self.jdbc_database = self.jdbc_url[11:].split('/')[1].split(';')[0].split(':')[0]
except:
logging.error("Cant determine database based on jdbc_string")
exit_after_function = True
try:
self.jdbc_encrypt = self.jdbc_url.split("sslConnection=")[1].split(';')[0].lower()
if self.jdbc_encrypt == "true": self.jdbc_encrypt = True
except:
self.jdbc_encrypt = False
try:
self.jdbc_trustedservercert = self.jdbc_url.split("sslTrustStoreLocation=")[1].split(';')[0]
except:
self.jdbc_trustedservercert = False
try:
self.jdbc_trustedservercert_password = self.jdbc_url.split("sslTrustStorePassword=")[1].split(';')[0]
except:
self.jdbc_trustedservercert_password = False
if self.jdbc_url.startswith( 'jdbc:as400://'):
self.atlasJdbcSourceSupport = True
self.db_db2as400 = True
self.jdbc_servertype = constant.DB2_AS400
self.jdbc_force_column_lowercase = False
self.jdbc_driver, self.jdbc_classpath = self.getJDBCDriverConfig("DB2 AS400", "default")
self.jdbc_classpath_for_python = self.jdbc_classpath
self.jdbc_hostname = self.jdbc_url[13:].split(':')[0].split(';')[0].split('/')[0]
try:
self.jdbc_port = self.jdbc_url[13:].split(':')[1].split('/')[0]
except:
self.jdbc_port = "446"
if self.jdbc_port.isdigit() == False: self.jdbc_port = "446"
try:
self.jdbc_database = self.jdbc_url[13:].split('/')[1].split(';')[0].split(':')[0]
except:
logging.error("Cant determine database based on jdbc_string")
exit_after_function = True
if self.jdbc_url.startswith( 'mongo://'):
self.atlasJdbcSourceSupport = False
self.db_mongodb = True
self.jdbc_servertype = constant.MONGO
self.jdbc_driver, self.jdbc_classpath = self.getJDBCDriverConfig("MongoDB", "default")
# self.jdbc_driver = "no-jdbc-driver-for-mongodb"
# self.jdbc_classpath = "/usr/share/java/mongo/mongo-spark-connector_2.11-2.3.3.jar:/usr/share/java/mongo/bson-3.11.2.jar:/usr/share/java/mongo/mongodb-driver-core-3.11.2.jar:/usr/share/java/mongo/mongo-java-driver-3.11.2.jar"
self.jdbc_classpath_for_python = self.jdbc_classpath
self.jdbc_hostname = self.jdbc_url[8:].split(':')[0]
self.jdbc_port = self.jdbc_url[8:].split(':')[1].split('/')[0]
self.jdbc_database = self.jdbc_url[8:].split('/')[1].split('?')[0].strip()
# Get all options in order to find authSource
try:
mongoOptions = dict(x.split("=") for x in self.jdbc_url.split('?')[1].split("&"))
for k, v in mongoOptions.items():
if ( k.lower() == "authsource" ):
self.mongoAuthSource = v
except IndexError:
pass
if self.jdbc_url.startswith( 'jdbc:Cache://'):
self.atlasJdbcSourceSupport = True
self.db_cachedb = True
self.jdbc_servertype = constant.CACHEDB
self.jdbc_force_column_lowercase = False
self.jdbc_driver, self.jdbc_classpath = self.getJDBCDriverConfig("CacheDB", "default")
self.jdbc_classpath_for_python = self.jdbc_classpath
self.jdbc_hostname = self.jdbc_url[13:].split(':')[0].split(';')[0].split('/')[0]
try:
self.jdbc_port = self.jdbc_url[13:].split(':')[1].split('/')[0]
except:
self.jdbc_port = "1972"
if self.jdbc_port.isdigit() == False: self.jdbc_port = "1972"
try:
self.jdbc_database = self.jdbc_url[13:].split('/')[1].split(';')[0].split(':')[0]
except:
logging.error("Cant determine database based on jdbc_string")
exit_after_function = True
# Check to make sure that we have a supported JDBC string
if self.jdbc_servertype == "":
logging.error("JDBC Connection '%s' is not supported."%(self.jdbc_url))
exit_after_function = True
if copySlave == True:
self.jdbc_hostname = None
self.jdbc_port = None
self.jdbc_database = None
logging.debug(" db_mssql = %s"%(self.db_mssql))
logging.debug(" db_oracle = %s"%(self.db_oracle))
logging.debug(" db_mysql = %s"%(self.db_mysql))
logging.debug(" db_postgresql = %s"%(self.db_postgresql))
logging.debug(" db_progress = %s"%(self.db_progress))
logging.debug(" db_db2udb = %s"%(self.db_db2udb))
logging.debug(" db_db2as400 = %s"%(self.db_db2as400))
logging.debug(" db_mongodb = %s"%(self.db_mongodb))
logging.debug(" db_cachedb = %s"%(self.db_cachedb))
logging.debug(" jdbc_servertype = %s"%(self.jdbc_servertype))
logging.debug(" jdbc_url = %s"%(self.jdbc_url))
logging.debug(" jdbc_username = %s"%(self.jdbc_username))
logging.debug(" jdbc_password = %s"%(self.jdbc_password))
logging.debug(" jdbc_hostname = %s"%(self.jdbc_hostname))
logging.debug(" jdbc_port = %s"%(self.jdbc_port))
logging.debug(" jdbc_database = %s"%(self.jdbc_database))
logging.debug(" jdbc_classpath = %s"%(self.jdbc_classpath))
logging.debug(" jdbc_classpath_for_python = %s"%(self.jdbc_classpath_for_python))
logging.debug(" jdbc_driver = %s"%(self.jdbc_driver))
logging.debug(" jdbc_driver_for_python = %s"%(self.jdbc_driver_for_python))
logging.debug(" jdbc_ad_domain = %s"%(self.jdbc_ad_domain))
logging.debug(" jdbc_encrypt = %s"%(self.jdbc_encrypt))
logging.debug(" jdbc_encrypt_string = %s"%(self.jdbc_encrypt_string))
logging.debug(" jdbc_trustedservercert = %s"%(self.jdbc_trustedservercert))
logging.debug(" jdbc_trustedservercert_password = %s"%(self.jdbc_trustedservercert_password))
logging.debug(" jdbc_hostincert = %s"%(self.jdbc_hostincert))
logging.debug(" jdbc_logintimeout = %s"%(self.jdbc_logintimeout))
logging.debug(" jdbc_password_file = %s"%(self.jdbc_password_file))
logging.debug(" jdbc_oracle_sid = %s"%(self.jdbc_oracle_sid))
logging.debug(" jdbc_oracle_servicename = %s"%(self.jdbc_oracle_servicename))
logging.debug("Executing common_config.lookupConnectionAlias() - Finished")
if exit_after_function == True:
raise Exception
# def getMongoUri(self):
# mongoUri = "mongodb://%s:%s@%s:%s/"%(
# self.jdbc_username,
# self.jdbc_password,
# self.jdbc_hostname,
# self.jdbc_port)
#
# return mongoUri
def getJDBCTableDefinition(self, source_schema, source_table, printInfo=True):
logging.debug("Executing common_config.getJDBCTableDefinition()")
if printInfo == True:
logging.info("Reading SQL table definitions from source database")
self.source_schema = source_schema
self.source_table = source_table
# Connect to the source database
self.connectToJDBC()
self.source_columns_df = self.sourceSchema.readTableColumns( self.JDBCCursor,
serverType = self.jdbc_servertype,
database = self.jdbc_database,
schema = self.source_schema,
table = self.source_table)
self.source_keys_df = self.sourceSchema.readTableKeys( self.JDBCCursor,
serverType = self.jdbc_servertype,
database = self.jdbc_database,
schema = self.source_schema,
table = self.source_table)
self.source_index_df = self.sourceSchema.readTableIndex( self.JDBCCursor,
serverType = self.jdbc_servertype,
database = self.jdbc_database,
schema = self.source_schema,
table = self.source_table)
try:
self.tableType = self.sourceSchema.getJdbcTableType(self.jdbc_servertype, self.source_columns_df.iloc[0]["TABLE_TYPE"])
except IndexError:
self.tableType = "unknown"
logging.debug("Executing common_config.getSourceTableDefinition() - Finished")
def disconnectFromJDBC(self):
logging.debug("Disconnect from JDBC database")
if self.db_mongodb == True:
# This is a MongoDB connection. Lets rediret to disconnectFromMongo instead
return self.disconnectFromMongo()
try:
self.JDBCCursor.close()
self.JDBCConn.close()
except AttributeError:
pass
except jpype.JavaException as exception:
logging.info("Disconnection to database over JDBC failed with the following error:")
logging.info(exception.message())
pass
except Exception as exception:
logging.info("Unknown error during disconnection to JDBC database:")
logging.info(exception.message())
pass
self.JDBCCursor = None
def connectToJDBC(self, allJarFiles=False, exitIfFailure=True, logger="", printError=True):
log = logging.getLogger(logger)
if self.db_mongodb == True:
# This is a MongoDB connection. Lets rediret to connectToMongo instead
return self.connectToMongo(exitIfFailure=exitIfFailure, logger=logger)
if allJarFiles == True:
query = "select classpath from jdbc_connections_drivers"
log.debug("Executing the following SQL: %s" % (query))
self.mysql_cursor.execute(query, )
self.jdbc_classpath_for_python = []
for row in self.mysql_cursor.fetchall():
if row[0] != "add path to JAR file":
self.jdbc_classpath_for_python.append(row[0])
if self.JDBCCursor == None:
log.debug("Connecting to database over JDBC")
log.debug(" self.jdbc_username = %s"%(self.jdbc_username))
log.debug(" self.jdbc_password = %s"%(self.jdbc_password))
log.debug(" self.jdbc_driver = %s"%(self.jdbc_driver))
log.debug(" self.jdbc_url = %s"%(self.jdbc_url))
log.debug(" self.jdbc_classpath_for_python = %s"%(self.jdbc_classpath_for_python))
JDBCCredentials = [ self.jdbc_username, self.jdbc_password ]
try:
self.JDBCConn = jaydebeapi.connect(self.jdbc_driver, self.jdbc_url, JDBCCredentials , self.jdbc_classpath_for_python)
self.JDBCCursor = self.JDBCConn.cursor()
except jpype.JavaException as exception:
if printError == True:
log.error("Connection to database over JDBC failed with the following error:")
log.error(exception.message())
if exitIfFailure == True:
self.remove_temporary_files()
sys.exit(1)
else:
return False
return True
def getJDBCcolumnMaxValue(self, source_schema, source_table, column):
logging.debug("Executing common_config.getJDBCcolumnMaxValue()")
self.connectToJDBC()
query = None
if self.db_mssql == True:
query = "select max(%s) from [%s].[%s].[%s]"%(column, self.jdbc_database, source_schema, source_table)
if self.db_oracle == True:
query = "select max(%s) from \"%s\".\"%s\""%(column, source_schema.upper(), source_table.upper())
if self.db_mysql == True:
query = "select max(%s) from %s"%(column, source_table)
if self.db_postgresql == True:
query = "select max(%s) from \"%s\".\"%s\""%(column, source_schema.lower(), source_table.lower())
if self.db_progress == True:
query = "select max(%s) from \"%s\".\"%s\""%(column, source_schema, source_table)
if self.db_db2udb == True:
query = "select max(%s) from \"%s\".\"%s\""%(column, source_schema, source_table)
if self.db_db2as400 == True:
query = "select max(%s) from \"%s\".\"%s\""%(column, source_schema, source_table)
if self.custom_max_query != None:
# If a custom query is configured, we just use that instead and ignore the config above
query = self.custom_max_query
logging.info("Using a custom query to get Max value from source table (%s)"%(query))
self.JDBCCursor.execute(query)
logging.debug("SQL Statement executed: %s" % (query) )
row = self.JDBCCursor.fetchone()
logging.info("Max value that will be used in the incremental import is '%s'"%(row[0]))
return row[0]
logging.debug("Executing common_config.getJDBCcolumnMaxValue() - Finished")
def truncateJDBCTable(self, schema, table):
""" Truncates a table on the JDBC connection """
logging.debug("Executing common_config.truncateJDBCTable()")
self.connectToJDBC()
query = None
if self.db_oracle == True:
query = "truncate table \"%s\".\"%s\""%(schema.upper(), table.upper())
if self.db_mssql == True:
query = "truncate table %s.%s"%(schema, table)
if self.db_db2udb == True:
query = "truncate table \"%s\".\"%s\" immediate"%(schema.upper(), table.upper())
if self.db_mysql == True:
query = "truncate table %s"%(table)
if self.db_postgresql == True:
query = "truncate table %s.%s"%(schema.lower(), table.lower())
if query == None:
raise undevelopedFeature("There is no support for this database type in common_config.truncateJDBCTable()")
logging.debug("SQL Statement executed: %s" % (query) )
self.JDBCCursor.execute(query)
logging.debug("Executing common_config.truncateJDBCTable() - Finished")
def checkJDBCTable(self, schema, table):
""" Checks if a table exists on the JDBC connections. Return True or False """
logging.debug("Executing common_config.checkJDBCTable()")
self.connectToJDBC()
query = None
if self.db_oracle == True:
query = "select count(owner) from all_tables where owner = '%s' and table_name = '%s'"%(schema.upper(), table.upper())
if self.db_mssql == True:
query = "select count(table_name) from INFORMATION_SCHEMA.COLUMNS where table_schema = '%s' and table_name = '%s'"%(schema, table)
if self.db_db2udb == True:
query = "select count(name) from SYSIBM.SYSTABLES where upper(creator) = '%s' and upper(name) = '%s'"%(schema.upper(), table.upper())
if self.db_mysql == True:
query = "select count(table_name) from information_schema.tables where table_schema = '%s' and table_name = '%s'"%(self.jdbc_database, table)
if self.db_postgresql == True:
query = "select count(table_name) from information_schema.tables where table_catalog = '%s' and table_schema = '%s' and table_name = '%s'"%(self.jdbc_database, schema.lower(), table.lower())
if query == None:
raise undevelopedFeature("There is no support for this database type in common_config.checkJDBCTable()")
logging.debug("SQL Statement executed: %s" % (query) )
self.JDBCCursor.execute(query)
row = self.JDBCCursor.fetchone()
tableExists = False
if int(row[0]) > 0:
tableExists = True
logging.debug("Executing common_config.checkJDBCTable() - Finished")
return tableExists
def executeJDBCquery(self, query):
""" Executes a query against the JDBC database and return the values in a Pandas DF """
logging.debug("Executing common_config.executeJDBCquery()")
logging.debug("Query to execute: %s"%(query))
try:
self.connectToJDBC()
self.JDBCCursor.execute(query)
except jaydebeapi.DatabaseError as errMsg:
raise SQLerror(errMsg)
result_df = pd.DataFrame()
try:
result_df = pd.DataFrame(self.JDBCCursor.fetchall())
if result_df.empty == False:
result_df_columns = []
for columns in self.JDBCCursor.description:
result_df_columns.append(columns[0]) # Name of the column is in the first position
result_df.columns = result_df_columns
except jaydebeapi.Error:
logging.debug("An error was raised during JDBCCursor.fetchall(). This happens during SQL operations that dont return any rows like 'create table'")
# Set the correct column namnes in the DataFrame
logging.debug("Executing common_config.executeJDBCquery() - Finished")
return result_df
def getJDBCTableRowCount(self, source_schema, source_table, whereStatement=None):
logging.debug("Executing common_config.getJDBCTableRowCount()")
self.connectToJDBC()
query = None
if self.db_mssql == True:
query = "select count_big(1) from [%s].[%s].[%s]"%(self.jdbc_database, source_schema, source_table)
if self.db_oracle == True:
query = "select count(1) from \"%s\".\"%s\""%(source_schema.upper(), source_table.upper())
if self.db_mysql == True:
query = "select count(1) from %s" % source_table
if self.db_postgresql == True:
query = "select count(1) from \"%s\".\"%s\""%(source_schema.lower(), source_table.lower())
if self.db_progress == True:
query = "select count(1) from \"%s\".\"%s\""%(source_schema, source_table)
if self.db_db2udb == True:
query = "select count(1) from \"%s\".\"%s\""%(source_schema, source_table)
if self.db_db2as400 == True:
query = "select count(1) from \"%s\".\"%s\""%(source_schema, source_table)
if whereStatement != None and whereStatement != "":
query = query + " where " + whereStatement
logging.debug("SQL Statement executed: %s" % (query) )
self.JDBCCursor.execute(query)
row = self.JDBCCursor.fetchone()
return int(row[0])
logging.debug("Executing common_config.getJDBCTableRowCount() - Finished")
def dropJDBCTable(self, schema, table):
logging.debug("Executing common_config.dropJDBCTable()")
self.connectToJDBC()
query = "drop table %s"%(self.getJDBCsqlFromTable(schema=schema, table=table))
self.JDBCCursor.execute(query)
logging.debug("Executing common_config.dropJDBCTable() - Finished")
def logImportFailure(self, errorText, severity, importType=None, hiveDB=None, hiveTable=None):
if hiveDB == None: hiveDB = self.Hive_DB
if hiveTable == None: hiveTable = self.Hive_Table
query = "insert into import_failure_log "
query += "( hive_db, hive_table, eventtime, severity, import_type, error_text ) "
query += "values "
query += "( %s, %s, %s, %s, %s, %s )"
logging.debug("SQL Statement executed: %s" % (query))
self.mysql_cursor.execute(query, (hiveDB, hiveTable, datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), severity, importType, errorText))
self.mysql_conn.commit()
def logHiveColumnAdd(self, column, columnType=None, description=None, hiveDB=None, hiveTable=None):
if description == None:
description = "Column '%s' added to table with type '%s'"%(column, columnType)
if hiveDB == None: hiveDB = self.Hive_DB
if hiveTable == None: hiveTable = self.Hive_Table
# query = "insert into hive_table_change_history "
query = "insert into table_change_history "
query += "( hive_db, hive_table, column_name, eventtime, event, value, description ) "
query += "values "
query += "( %s, %s, %s, %s, 'column_added', %s, %s )"
logging.debug("SQL Statement executed: %s" % (query))
self.mysql_cursor.execute(query, (hiveDB, hiveTable, column, datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), columnType, description))
self.mysql_conn.commit()
def logHiveColumnTypeChange(self, column, columnType, previous_columnType=None, description=None, hiveDB=None, hiveTable=None):
if description == None:
if previous_columnType == None:
description = "Column '%s' type changed to %s"%(column, columnType)
else:
description = "Column '%s' type changed from %s to %s"%(column, previous_columnType, columnType)
if hiveDB == None: hiveDB = self.Hive_DB
if hiveTable == None: hiveTable = self.Hive_Table
# query = "insert into hive_table_change_history "
query = "insert into table_change_history "
query += "( hive_db, hive_table, column_name, eventtime, event, previous_value, value, description ) "
query += "values "
query += "( %s, %s, %s, %s, 'column_type_change', %s, %s, %s )"
logging.debug("SQL Statement executed: %s" % (query))
self.mysql_cursor.execute(query, (hiveDB, hiveTable, column, datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), previous_columnType, columnType, description))
self.mysql_conn.commit()
def logHiveColumnRename(self, columnName, previous_columnName, description=None, hiveDB=None, hiveTable=None):
if description == None:
description = "Column '%s' renamed to %s"%(previous_columnName, columnName)
if hiveDB == None: hiveDB = self.Hive_DB
if hiveTable == None: hiveTable = self.Hive_Table
# query = "insert into hive_table_change_history "
query = "insert into table_change_history "
query += "( hive_db, hive_table, column_name, eventtime, event, previous_value, value, description ) "
query += "values "
query += "( %s, %s, %s, %s, 'column_rename', %s, %s, %s )"
logging.debug("SQL Statement executed: %s" % (query))
self.mysql_cursor.execute(query, (hiveDB, hiveTable, columnName, datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), previous_columnName, columnName, description))
self.mysql_conn.commit()
def logJDBCColumnAdd(self, column, columnType=None, description=None, dbAlias=None, database=None, schema=None, table=None):
if description == None:
description = "Column '%s' added to table with type '%s'"%(column, columnType)
query = "insert into jdbc_table_change_history "
query += "( dbalias, db_name, schema_name, table_name, column_name, eventtime, event, value, description ) "
query += "values "
query += "( %s, %s, %s, %s, %s, %s, 'column_added', %s, %s )"
logging.debug("SQL Statement executed: %s" % (query))
self.mysql_cursor.execute(query, (dbAlias, database, schema, table, column, datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), columnType, description))
self.mysql_conn.commit()
def logJDBCColumnTypeChange(self, column, columnType, previous_columnType=None, description=None, dbAlias=None, database=None, schema=None, table=None):
if description == None:
if previous_columnType == None:
description = "Column '%s' type changed to %s"%(column, columnType)
else:
description = "Column '%s' type changed from %s to %s"%(column, previous_columnType, columnType)
query = "insert into jdbc_table_change_history "
query += "( dbalias, db_name, schema_name, table_name, column_name, eventtime, event, previous_value, value, description ) "
query += "values "
query += "( %s, %s, %s, %s, %s, %s, 'column_type_change', %s, %s, %s )"
logging.debug("SQL Statement executed: %s" % (query))
self.mysql_cursor.execute(query, (dbAlias, database, schema, table, column, datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), previous_columnType, columnType, description))
self.mysql_conn.commit()
def logJDBCColumnRename(self, columnName, previous_columnName, description=None, dbAlias=None, database=None, schema=None, table=None):
if description == None:
description = "Column '%s' renamed to %s"%(previous_columnName, columnName)
query = "insert into jdbc_table_change_history "
query += "( dbalias, db_name, schema_name, table_name, column_name, eventtime, event, previous_value, value, description ) "
query += "values "
query += "( %s, %s, %s, %s, %s, %s, 'column_rename', %s, %s, %s )"
logging.debug("SQL Statement executed: %s" % (query))
self.mysql_cursor.execute(query, (dbAlias, database, schema, table, columnName, datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f'), previous_columnName, columnName, description))
self.mysql_conn.commit()
def getQuoteAroundColumn(self):
quoteAroundColumn = ""
if self.jdbc_servertype == constant.MSSQL: quoteAroundColumn = "\""
if self.jdbc_servertype == constant.ORACLE: quoteAroundColumn = "\""
if self.jdbc_servertype == constant.MYSQL: quoteAroundColumn = "`"
if self.jdbc_servertype == constant.POSTGRESQL: quoteAroundColumn = "\""
if self.jdbc_servertype == constant.PROGRESS: quoteAroundColumn = "\""
if self.jdbc_servertype == constant.DB2_UDB: quoteAroundColumn = "\""
if self.jdbc_servertype == constant.DB2_AS400: quoteAroundColumn = "\""
return quoteAroundColumn
def getJDBCUpperCase(self):
upperCase = False
if self.jdbc_servertype == constant.ORACLE: upperCase = True
if self.jdbc_servertype == constant.DB2_UDB: upperCase = True
return upperCase
def getJDBCsqlFromTable(self, schema, table):
logging.debug("Executing common_config.getJDBCsqlFromTable()")
fromTable = ""
if self.jdbc_servertype == constant.MSSQL: fromTable = "[%s].[%s].[%s]"%(self.jdbc_database, schema, table)
if self.jdbc_servertype == constant.ORACLE: fromTable = "\"%s\".\"%s\""%(schema.upper(), table.upper())
if self.jdbc_servertype == constant.MYSQL: fromTable = "%s"%(table)
if self.jdbc_servertype == constant.POSTGRESQL: fromTable = "\"%s\".\"%s\""%(schema.lower(), table.lower())
if self.jdbc_servertype == constant.PROGRESS: fromTable = "\"%s\".\"%s\""%(schema, table)
if self.jdbc_servertype == constant.DB2_UDB: fromTable = "\"%s\".\"%s\""%(schema, table)
if self.jdbc_servertype == constant.DB2_AS400: fromTable = "\"%s\".\"%s\""%(schema, table)
logging.debug("Executing common_config.getJDBCsqlFromTable() - Finished")
return fromTable
def getConfigValue(self, key):
""" Returns a value from the configuration table based on the supplied key. Value returned can be Int, Str or DateTime"""
logging.debug("Executing common_config.getConfigValue()")
returnValue = None
boolValue = False
if key in ("hive_remove_locks_by_force", "airflow_disable", "import_start_disable", "import_stage_disable", "export_start_disable", "export_stage_disable", "hive_validate_before_execution", "hive_print_messages", "import_process_empty", "hive_major_compact_after_merge", "hive_insert_only_tables", "hive_acid_with_clusteredby", "post_data_to_kafka", "post_data_to_kafka_extended", "post_data_to_rest", "post_data_to_rest_extended", "post_airflow_dag_operations", "rest_verifyssl"):
valueColumn = "valueInt"
boolValue = True
elif key in ("sqoop_import_default_mappers", "sqoop_import_max_mappers", "sqoop_export_default_mappers", "sqoop_export_max_mappers", "spark_export_default_executors", "spark_export_max_executors", "spark_import_default_executors", "spark_import_max_executors", "atlas_discovery_interval", "airflow_major_version", "rest_timeout"):
valueColumn = "valueInt"
elif key in ("import_staging_database", "export_staging_database", "hive_validate_table", "airflow_sudo_user", "airflow_dbimport_commandpath", "airflow_dag_directory", "airflow_dag_staging_directory", "timezone", "airflow_dag_file_group", "airflow_dag_file_permission", "airflow_dummy_task_queue", "cluster_name", "hdfs_address", "hdfs_blocksize", "hdfs_basedir", "kafka_brokers", "kafka_saslmechanism", "kafka_securityprotocol", "kafka_topic", "kafka_trustcafile", "rest_url", "rest_trustcafile"):
valueColumn = "valueStr"
else:
logging.error("There is no configuration with the name '%s'"%(key))
self.remove_temporary_files()
sys.exit(1)
query = "select %s from configuration where configkey = '%s'"%(valueColumn, key)
logging.debug("SQL Statement executed: %s" % (query) )
self.mysql_cursor.execute(query)
row = self.mysql_cursor.fetchone()
if valueColumn == "valueInt":
returnValue = int(row[0])
if valueColumn == "valueStr":
returnValue = row[0]
if returnValue == None or returnValue.strip() == "":
logging.error("Configuration Key '%s' must have a value in '%s'"%(key, valueColumn))
self.remove_temporary_files()
sys.exit(1)
if boolValue == True:
if returnValue == 1:
returnValue = True
elif returnValue == 0:
returnValue = False
else:
logging.error("Configuration Key '%s' can only have 0 or 1 in column '%s'"%(key, valueColumn))
self.remove_temporary_files()
sys.exit(1)
logging.debug("Fetched configuration '%s' as '%s'"%(key, returnValue))
logging.debug("Executing common_config.getConfigValue() - Finished")
return returnValue
def connectToMongo(self, exitIfFailure=True, logger=""):
log = logging.getLogger(logger)
log.debug("Executing common_config.connectToMongo()")
if self.mongoClient != None:
return True
try:
self.mongoClient = pymongo.MongoClient("mongodb://%s:%s/"%(self.jdbc_hostname, self.jdbc_port))
self.mongoDB = self.mongoClient[self.jdbc_database.strip()]
if ( self.mongoAuthSource == None ):
self.mongoDB.authenticate(self.jdbc_username, self.jdbc_password )
else:
self.mongoDB.authenticate(self.jdbc_username, self.jdbc_password, source=self.mongoAuthSource )
except pymongo.errors.ServerSelectionTimeoutError:
log.error("Timeout Error when connecting to Mongo at %s.%s"%(self.jdbc_hostname, self.jdbc_port))
if exitIfFailure == True:
self.remove_temporary_files()
sys.exit(1)
else:
return False
return True
log.debug("Executing common_config.connectToMongo() - Finished")
def disconnectFromMongo(self):
logging.debug("Executing common_config.disconnectFromMongo()")
self.mongoClient.close()
self.mongoClient = None
self.mongoDB = None
logging.debug("Executing common_config.disconnectFromMongo() - Finished")
def getMongoCollections(self, collectionFilter=None):
logging.debug("Executing common_config.getMongoTables()")
# Connect to MongoDB and get a list of all collections
self.connectToMongo()
mongoCollections = self.mongoDB.list_collection_names()
self.disconnectFromMongo()
if collectionFilter == None:
collectionFilter = ""
collectionFilter = collectionFilter.replace('*', '.*')
collectionList = []
for collection in mongoCollections:
if re.search(collectionFilter, collection):
collectionDict = {"schema": "-"}
collectionDict["table"] = collection
collectionList.append(collectionDict)
result_df = pd.DataFrame(collectionList)
logging.debug("Executing common_config.getMongoTables() - Finished")
return result_df
def getJDBCtablesAndViews(self, schemaFilter=None, tableFilter=None):
logging.debug("Executing common_config.getJDBCtablesAndViews()")
self.connectToJDBC()
result_df = self.sourceSchema.getJDBCtablesAndViews(
JDBCCursor=self.JDBCCursor,
serverType=self.jdbc_servertype,
database=self.jdbc_database,
schemaFilter=schemaFilter,
tableFilter=tableFilter)
logging.debug("Executing common_config.getJDBCtablesAndViews() - Finished")
return result_df
def getAtlasDiscoverConfigObject(self, dbAlias=None, logger=""):
""" Discover all RDBMS objects on the 'dbAlias' and populate Atlas with them """
log = logging.getLogger(logger)
log.debug("Executing common_config.getAtlasDiscoverConfigObject()")
if dbAlias == None:
dbAlias = self.dbAlias
self.lookupConnectionAlias(dbAlias)
jdbcConnectionData = self.getAtlasJdbcConnectionData(dbAlias)
configObject = {}
configObject["dbAlias"] = dbAlias
configObject["contactInfo"] = jdbcConnectionData.get('contact_info')
configObject["description"] = jdbcConnectionData.get('description')
configObject["owner"] = jdbcConnectionData.get('owner')
configObject["atlasIncludeFilter"] = jdbcConnectionData.get('atlas_include_filter')
configObject["atlasExcludeFilter"] = jdbcConnectionData.get('atlas_exclude_filter')
configObject["jdbc_hostname"] = self.jdbc_hostname
configObject["jdbc_port"] = self.jdbc_port
configObject["jdbc_servertype"] = self.jdbc_servertype
configObject["jdbc_database"] = self.jdbc_database
configObject["jdbc_oracle_sid"] = self.jdbc_oracle_sid
configObject["jdbc_oracle_servicename"] = self.jdbc_oracle_servicename
configObject["jdbc_username"] = self.jdbc_username
configObject["jdbc_password"] = self.jdbc_password
configObject["jdbc_driver"] = self.jdbc_driver
configObject["jdbc_url"] = self.jdbc_url
configObject["jdbc_classpath_for_python"] = self.jdbc_classpath_for_python
configObject["jdbc_environment"] = self.jdbc_environment
configObject["hdfs_address"] = self.getConfigValue(key = "hdfs_address")
configObject["cluster_name"] = self.getConfigValue(key = "cluster_name")
log.debug("Executing common_config.getAtlasDiscoverConfigObject() - Finished")
return configObject
def getAnonymizationSeed(self):
logging.debug("Executing common_config.getAnonymizationSeed()")
# Set the default seed from the configuration file
seed = configuration.get("Anonymization", "seed")
query = "select seed_file from jdbc_connections where dbalias = %s "
logging.debug("Executing the following SQL: %s" % (query))
self.mysql_cursor.execute(query, (self.dbAlias, ))
row = self.mysql_cursor.fetchone()
seedFile = row[0]
if seedFile != None:
if os.path.exists(seedFile) == False:
logging.error("The connection have a seed file configured, but the tool is unable to find that file.")
self.remove_temporary_files()
sys.exit(1)
seed = open(seedFile,"r").read()
if len(seed) > 16:
logging.warning("The seed is longer that 16 characters. Will truncate the seed to only include the first 16 characters")
seed = seed[:16]
logging.debug("Executing common_config.getAnonymizationSeed() - Finished")
return seed
def stripUnwantedCharComment(self, work_string):
if work_string == None: return
work_string = work_string.replace('`', '')
work_string = work_string.replace('\'', '')
work_string = work_string.replace(';', '')
work_string = work_string.replace('\n', '')
work_string = work_string.replace('\\', '')
work_string = work_string.replace('’', '')
work_string = work_string.replace('"', '')
return work_string.strip()
def stripUnwantedCharColumnName(self, work_string):
if work_string == None: return
work_string = work_string.replace('`', '')
work_string = work_string.replace('\'', '')
work_string = work_string.replace(';', '')
work_string = work_string.replace('\n', '')
work_string = work_string.replace('\\', '')
work_string = work_string.replace('’', '')
work_string = work_string.replace(':', '')
work_string = work_string.replace(',', '')
work_string = work_string.replace('.', '')
work_string = work_string.replace('"', '')
return work_string.strip()
def saveJsonToDatabase(self, datatype, destination, json):
""" Saves a json that was unable to be sent successfully to the 'json_to_send' table so it can be sent at a later time """
logging.debug("Executing common_config.saveJsonToDatabase()")
query = "insert into json_to_send "
query += "( type, status, destination, jsondata ) "
query += "values "
query += "( %s, 0, %s, %s )"
logging.debug("SQL Statement executed: %s" % (query))
self.mysql_cursor.execute(query, (datatype, destination, json))
self.mysql_conn.commit()
# query = "insert into json_to_rest (type, status, jsondata) values ('import_statistics', 0, %s)"
# self.mysql_cursor.execute(query, (json.dumps(jsonDataREST), ))
# self.mysql_conn.commit()
# logging.debug("SQL Statement executed: %s" % (self.mysql_cursor.statement) )
logging.debug("Executing common_config.saveJsonToDatabase() - Finished")
|
{"hexsha": "b8c383e5ae321b29a52959ea2173826dd02ea466", "size": 68655, "ext": "py", "lang": "Python", "max_stars_repo_path": "bin/DBImportConfig/common_config.py", "max_stars_repo_name": "Middlecon/DBImport", "max_stars_repo_head_hexsha": "8f19776a5f6a6e5d6eebe5c312432b8058a2e579", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-05-22T04:17:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-05T16:54:08.000Z", "max_issues_repo_path": "bin/DBImportConfig/common_config.py", "max_issues_repo_name": "Middlecon/DBImport", "max_issues_repo_head_hexsha": "8f19776a5f6a6e5d6eebe5c312432b8058a2e579", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 73, "max_issues_repo_issues_event_min_datetime": "2019-05-22T04:19:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-18T05:09:26.000Z", "max_forks_repo_path": "bin/DBImportConfig/common_config.py", "max_forks_repo_name": "Middlecon/DBImport", "max_forks_repo_head_hexsha": "8f19776a5f6a6e5d6eebe5c312432b8058a2e579", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-05-19T23:46:56.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-12T12:02:37.000Z", "avg_line_length": 39.9854397204, "max_line_length": 500, "alphanum_fraction": 0.7166411769, "include": true, "reason": "import numpy", "num_tokens": 17234}
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test Two Layer QNN."""
import unittest
from test import QiskitMachineLearningTestCase
import numpy as np
from ddt import ddt, data
from qiskit.providers.aer import StatevectorSimulator
from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap
from qiskit.utils import QuantumInstance
from qiskit_machine_learning.neural_networks import TwoLayerQNN
@ddt
class TestTwoLayerQNN(QiskitMachineLearningTestCase):
"""Two Layer QNN Tests."""
def setUp(self):
super().setUp()
# specify "run configuration"
quantum_instance = QuantumInstance(StatevectorSimulator())
# define QNN
num_qubits = 2
feature_map = ZZFeatureMap(num_qubits)
ansatz = RealAmplitudes(num_qubits, reps=1)
self.qnn = TwoLayerQNN(
num_qubits,
feature_map=feature_map,
ansatz=ansatz,
quantum_instance=quantum_instance,
)
self.qnn_no_qi = TwoLayerQNN(num_qubits, feature_map=feature_map, ansatz=ansatz)
@data(
("qi", True),
("no_qi", True),
("qi", False),
("no_qi", False),
)
def test_qnn_simple_new(self, config):
"""Simple Opflow QNN Test for a specified neural network."""
qnn_type, input_grad_required = config
input_data = np.zeros(self.qnn.num_inputs)
weights = np.zeros(self.qnn.num_weights)
if qnn_type == "qi":
qnn = self.qnn
else:
qnn = self.qnn_no_qi
qnn.input_gradients = input_grad_required
# test forward pass
result = qnn.forward(input_data, weights)
self.assertEqual(result.shape, (1, *qnn.output_shape))
# test backward pass
result = qnn.backward(input_data, weights)
# batch dimension of 1
if qnn.input_gradients:
self.assertEqual(result[0].shape, (1, *qnn.output_shape, qnn.num_inputs))
else:
self.assertIsNone(result[0])
self.assertEqual(result[1].shape, (1, *qnn.output_shape, qnn.num_weights))
@data(
("qi", True),
("no_qi", True),
("qi", False),
("no_qi", False),
)
def _test_qnn_batch(self, config):
"""Batched Opflow QNN Test for the specified network."""
qnn_type, input_grad_required = config
batch_size = 10
input_data = np.arange(batch_size * self.qnn.num_inputs).reshape(
(batch_size, self.qnn.num_inputs)
)
weights = np.zeros(self.qnn.num_weights)
if qnn_type == "qi":
qnn = self.qnn
else:
qnn = self.qnn_no_qi
qnn.input_gradients = input_grad_required
# test forward pass
result = qnn.forward(input_data, weights)
self.assertEqual(result.shape, (batch_size, *qnn.output_shape))
# test backward pass
result = qnn.backward(input_data, weights)
if qnn.input_gradients:
self.assertEqual(result[0].shape, (batch_size, *qnn.output_shape, qnn.num_inputs))
else:
self.assertIsNone(result[0])
self.assertEqual(result[1].shape, (batch_size, *qnn.output_shape, qnn.num_weights))
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "8d5c46dd9880f301b24271e448741200c6e12a58", "size": 3719, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/neural_networks/test_two_layer_qnn.py", "max_stars_repo_name": "FrankFeenix/qiskit-machine-learning", "max_stars_repo_head_hexsha": "ec800bc8a58b32f3052ff399d54f19a74417fc7d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/neural_networks/test_two_layer_qnn.py", "max_issues_repo_name": "FrankFeenix/qiskit-machine-learning", "max_issues_repo_head_hexsha": "ec800bc8a58b32f3052ff399d54f19a74417fc7d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/neural_networks/test_two_layer_qnn.py", "max_forks_repo_name": "FrankFeenix/qiskit-machine-learning", "max_forks_repo_head_hexsha": "ec800bc8a58b32f3052ff399d54f19a74417fc7d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-15T16:41:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-15T16:41:39.000Z", "avg_line_length": 30.2357723577, "max_line_length": 94, "alphanum_fraction": 0.6431836515, "include": true, "reason": "import numpy", "num_tokens": 911}
|
#!/usr/bin/env python -O
import os
import sys
import bisect
import os.path
import json
import numpy as np
from datetime import date, datetime, timedelta
from netCDF4 import Dataset
def main(elFile, ncFile):
# read all of json in one go
fp = open(elFile, "r+")
lines = fp.readlines()
slines = "".join(lines)
js = json.loads(slines)
fp.close()
#time stamp stuff - convert time stamp into CF Convention one
# e.g "timestamp": "2017.07.31-12:10:56" to "seconds since 2017-07-31 12:10:56"
firstTime=str()
firstTime=js["environment_sensor_readings"][0]["timestamp"]
firstTime=firstTime.replace("."," ").replace("-"," ")
ArrayTime=firstTime.split(" ")
if len(ArrayTime) != 4:
print("Cannot Parse first timestamp {0}".format(js["environment_sensor_readings"][0]["timestamp"]))
exit(1)
sinceTime="seconds since {0}-{1}-{2} {3}".format( ArrayTime[0],ArrayTime[1],ArrayTime[2],ArrayTime[3] )
# create dims and coordinate vars
output_handle=Dataset(ncFile, "w", format="NETCDF4")
# nb time is unlimitted
output_handle.createDimension("time", 0)
nTime=output_handle.createVariable("time", "f8", ("time",))
setattr(nTime, "units", sinceTime)
nSunDirection=output_handle.createVariable("sunDirection", "f8", ("time",))
setattr(nSunDirection, "units", "degrees")
# get wavelength from first record only as it is repeated
wavelength=js["environment_sensor_readings"][0]["spectrometer"]["wavelength"]
output_handle.createDimension("wvl_lgr", len(wavelength))
output_handle.createVariable("wvl_lgr", "f8", ("wvl_lgr",))
output_handle.variables["wvl_lgr"][:] = [fd * 1e-9 for fd in wavelength]
setattr(output_handle.variables["wvl_lgr"], "units", "meter")
nSpectrum=output_handle.createVariable("weighted_average_downwelling_irradiance", "f8", ("time","wvl_lgr",))
setattr(nSpectrum, "units", "watt meter-2 meter-1")
sz = len(js["environment_sensor_readings"])
itime=0.0 # assume that time increaes in 5 second increments across records
for idx in range(sz):
# output_handle.variables["time"][idx]=itime
nTime[idx] = itime
# sun Direction
nSunDirection[idx]=js["environment_sensor_readings"][idx]["weather_station"]["sunDirection"]["value"]
#
spectrum=js["environment_sensor_readings"][idx]["spectrometer"]["spectrum"]
nSpectrum[idx,:] = spectrum
# assume that time increaes in 5 second increments across records
itime+=5.0
output_handle.close()
if __name__ == "__main__":
main(sys.argv[1],sys.argv[2])
|
{"hexsha": "8ef1e55f0531b033c484645c01f8d07a0ae9067e", "size": 2644, "ext": "py", "lang": "Python", "max_stars_repo_path": "scratch/EnvironmentLogger2NetCDF.py", "max_stars_repo_name": "terraref/extractors-hyperspectral", "max_stars_repo_head_hexsha": "782e3c62088257bb9c8f0f4f1d8943fa02881dc0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2016-10-11T19:05:23.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-08T03:44:33.000Z", "max_issues_repo_path": "scratch/EnvironmentLogger2NetCDF.py", "max_issues_repo_name": "terraref/extractors-hyperspectral", "max_issues_repo_head_hexsha": "782e3c62088257bb9c8f0f4f1d8943fa02881dc0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 27, "max_issues_repo_issues_event_min_datetime": "2017-01-17T21:30:17.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-12T21:10:26.000Z", "max_forks_repo_path": "scratch/EnvironmentLogger2NetCDF.py", "max_forks_repo_name": "terraref/extractors-hyperspectral", "max_forks_repo_head_hexsha": "782e3c62088257bb9c8f0f4f1d8943fa02881dc0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2017-01-18T18:57:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-10T15:12:11.000Z", "avg_line_length": 29.7078651685, "max_line_length": 112, "alphanum_fraction": 0.6728441755, "include": true, "reason": "import numpy", "num_tokens": 708}
|
function export_protocol(iProtocol, iSubject, OutputFile)
% EXPORT_PROTOCOL: Export a protocol into a zip file.
%
% USAGE: export_protocol(iProtocol, iSubject, OutputFile)
% export_protocol(iProtocol, iSubject) : Ask for the output filename
% export_protocol(iProtocol) : Export all the subjects of protocol, ask for the output filename
% export_protocol() : Export current protocol, ask for the output filename
% @=============================================================================
% This function is part of the Brainstorm software:
% https://neuroimage.usc.edu/brainstorm
%
% Copyright (c) University of Southern California & McGill University
% This software is distributed under the terms of the GNU General Public License
% as published by the Free Software Foundation. Further details on the GPLv3
% license can be found at http://www.gnu.org/copyleft/gpl.html.
%
% FOR RESEARCH PURPOSES ONLY. THE SOFTWARE IS PROVIDED "AS IS," AND THE
% UNIVERSITY OF SOUTHERN CALIFORNIA AND ITS COLLABORATORS DO NOT MAKE ANY
% WARRANTY, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO WARRANTIES OF
% MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, NOR DO THEY ASSUME ANY
% LIABILITY OR RESPONSIBILITY FOR THE USE OF THIS SOFTWARE.
%
% For more information type "brainstorm license" at command prompt.
% =============================================================================@
%
% Authors: Francois Tadel, 2012-2015; Martin Cousineau, 2019
global GlobalData;
%% ===== PARSE INPUTS =====
if (nargin < 3)
OutputFile = [];
end
if (nargin < 2)
iSubject = [];
end
if (nargin < 1) || isempty(iProtocol)
iProtocol = bst_get('iProtocol');
else
gui_brainstorm('SetCurrentProtocol', iProtocol);
end
% Check protocol indice
if isempty(iProtocol) || (iProtocol == 0)
bst_error('Invalid protocol indice.', 'Export protocol', 0);
return
end
% Get protocol description
ProtocolInfo = bst_get('ProtocolInfo');
% Get output filename
if isempty(OutputFile)
% Get default directories
LastUsedDirs = bst_get('LastUsedDirs');
% Default output filename
if isempty(iSubject)
OutputFile = bst_fullfile(LastUsedDirs.ExportProtocol, file_standardize([ProtocolInfo.Comment, '.zip']));
else
sSubject = bst_get('Subject', iSubject);
OutputFile = bst_fullfile(LastUsedDirs.ExportProtocol, file_standardize([ProtocolInfo.Comment, '_', sSubject.Name, '.zip']));
end
% File selection
OutputFile = java_getfile('save', 'Export protocol', OutputFile, 'single', 'files', ...
{{'.zip'}, 'Zip files (*.zip)', 'ZIP'}, 1);
if isempty(OutputFile)
return
end
% Save new default export path
LastUsedDirs.ExportProtocol = bst_fileparts(OutputFile);
bst_set('LastUsedDirs', LastUsedDirs);
end
% Force saving database
db_save(1);
%% ===== ZIP FILES =====
% Progress bar
bst_progress('start', 'Export protocol', 'Creating zip file...');
% Cd to protocol folder
prevFolder = pwd;
cd(bst_fileparts(ProtocolInfo.SUBJECTS, 1));
% Prefixes to add to the folders
[tmp__, anatFolder] = bst_fileparts(ProtocolInfo.SUBJECTS, 1);
[tmp__, dataFolder] = bst_fileparts(ProtocolInfo.STUDIES, 1);
% Build list of files to zip
if isempty(iSubject)
% Add the entire subject folder
ListZip = {anatFolder};
% List files in studies: add all files
allFiles = dir(ProtocolInfo.STUDIES);
for i = 1:length(allFiles)
if (allFiles(i).name(1) ~= '.')
ListZip{end+1} = bst_fullfile(dataFolder, allFiles(i).name);
end
end
else
% Get default study for this subject
sSubject = bst_get('Subject', iSubject, 1);
sStudy = bst_get('AnalysisIntraStudy', iSubject);
% Create a temporary protocol.mat for future database update information
ProtocolMat = struct();
ProtocolMat.ProtocolInfo = GlobalData.DataBase.ProtocolInfo(iProtocol);
ProtocolMat.ProtocolSubjects = GlobalData.DataBase.ProtocolSubjects(iProtocol);
ProtocolMat.ProtocolStudies = GlobalData.DataBase.ProtocolStudies(iProtocol);
ProtocolMat.DbVersion = GlobalData.DataBase.DbVersion;
ProtocolMat.LastAccessDate = datestr(now);
ProtocolMat.LastAccessUserDir = bst_get('UserDir');
% Remove useless fields
StudyDir = ProtocolMat.ProtocolInfo.STUDIES;
ProtocolMat.ProtocolInfo = rmfield(ProtocolMat.ProtocolInfo, 'STUDIES');
ProtocolMat.ProtocolInfo = rmfield(ProtocolMat.ProtocolInfo, 'SUBJECTS');
ProtocolMat.ProtocolSubjects.Subject = ProtocolMat.ProtocolSubjects.Subject(iSubject);
[sStudies, iStudies] = bst_get('StudyWithSubject', ProtocolMat.ProtocolSubjects.Subject.FileName, 'default_study', 'intra_subject');
ProtocolMat.ProtocolStudies.Study = ProtocolMat.ProtocolStudies.Study(iStudies);
ProtocolFile = bst_fullfile(dataFolder, 'protocol.mat');
bst_save(ProtocolFile, ProtocolMat, 'v7');
% List all files that might be useful for this subject
ListZip = {bst_fullfile(anatFolder, bst_fileparts(sSubject.FileName)), ...
bst_fullfile(anatFolder, bst_get('DirDefaultSubject')), ...
bst_fullfile(dataFolder, bst_fileparts(bst_fileparts(sStudy.FileName))), ...
bst_fullfile(dataFolder, bst_get('DirDefaultStudy')), ...
bst_fullfile(dataFolder, bst_get('DirAnalysisInter')), ...
ProtocolFile};
end
% Zip
zip(OutputFile, ListZip);
% Restore protocol file
if ~isempty(iSubject)
% Remove temporary protocol file
file_delete(bst_fullfile(StudyDir, 'protocol.mat'), 1);
% Save again the original one
GlobalData.DataBase.isProtocolModified(iProtocol) = 1;
db_save(1);
end
% Restore initial folder
cd(prevFolder);
% Close progress bar
bst_progress('stop');
|
{"author": "brainstorm-tools", "repo": "brainstorm3", "sha": "a892cfaabde1eaa2f9a3ac015c05b73f3739433a", "save_path": "github-repos/MATLAB/brainstorm-tools-brainstorm3", "path": "github-repos/MATLAB/brainstorm-tools-brainstorm3/brainstorm3-a892cfaabde1eaa2f9a3ac015c05b73f3739433a/toolbox/io/export_protocol.m"}
|
# Utility Functions
# Authors: Adam Li
# Edited by: Adam Li
'''
Used by the user to define channels that are hard coded for analysis.
'''
# Imports necessary for this function
import numpy as np
import re
from itertools import combinations
def splitpatient(patient):
stringtest = patient.find('seiz')
if stringtest == -1:
stringtest = patient.find('sz')
if stringtest == -1:
stringtest = patient.find('aw')
if stringtest == -1:
stringtest = patient.find('aslp')
if stringtest == -1:
stringtest = patient.find('_')
if stringtest == -1:
print("Not sz, seiz, aslp, or aw! Please add additional naming possibilities, or tell data gatherers to rename datasets.")
else:
pat_id = patient[0:stringtest]
seiz_id = patient[stringtest:]
# remove any underscores
pat_id = re.sub('_', '', pat_id)
seiz_id = re.sub('_', '', seiz_id)
return pat_id, seiz_id
def returnindices(pat_id, seiz_id=None):
included_indices, onsetelecs, clinresult = returnnihindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnlaindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnummcindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnjhuindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returntngindices(
pat_id, seiz_id)
return included_indices, onsetelecs, clinresult
def returntngindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'id001ac':
# included_indices = np.concatenate((np.arange(0,4), np.arange(5,55),
# np.arange(56,77), np.arange(78,80)))
included_indices = np.array([0, 1, 5, 6, 7, 8, 9, 10, 11, 12, 13,
15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48,
49, 50, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 68,
69, 70, 71, 72, 73, 74, 75, 76, 78, 79])
elif pat_id == 'id002cj':
# included_indices = np.array(np.arange(0,184))
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
30, 31, 32, 33, 34, 35, 36, 37, 38,
45, 46, 47, 48, 49, 50, 51, 52, 53,
60, 61, 62, 63, 64, 65, 66, 67, 70, 71, 72, 73, 74, 75, 76, 85, 86, 87, 88, 89,
90, 91, 92, 93, 100, 101, 102, 103, 104, 105,
106, 107, 108, 115, 116, 117, 118, 119,
120, 121, 122, 123, 129, 130, 131, 132, 133,
134, 135, 136, 137,
# np.arange(143, 156)
143, 144, 145, 146, 147,
148, 149, 150, 151, 157, 158, 159, 160, 161,
162, 163, 164, 165, 171, 172, 173, 174, 175,
176, 177, 178, 179, 180, 181, 182])
elif pat_id == 'id003cm':
included_indices = np.concatenate((np.arange(0,13), np.arange(25,37),
np.arange(40,50), np.arange(55,69), np.arange(70,79)))
elif pat_id == 'id004cv':
# removed OC'10, SC'5, CC'14/15
included_indices = np.concatenate((np.arange(0,23), np.arange(25,39),
np.arange(40,59), np.arange(60,110)))
elif pat_id == 'id005et':
included_indices = np.concatenate((np.arange(0,39), np.arange(39,47),
np.arange(52,62), np.arange(62,87)))
elif pat_id == 'id006fb':
included_indices = np.concatenate((np.arange(10,19), np.arange(40,50),
np.arange(115,123)))
elif pat_id == 'id008gc':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 61, 62, 63, 64, 65,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93,
94, 95, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 110, 111])
elif pat_id == 'id009il':
included_indices = np.concatenate((np.arange(0,10), np.arange(10,152)))
elif pat_id == 'id010js':
included_indices = np.concatenate((np.arange(0,14),
np.arange(15,29), np.arange(30,42), np.arange(43,52),
np.arange(53,65), np.arange(66,75), np.arange(76,80),
np.arange(81,85), np.arange(86,94), np.arange(95,98),
np.arange(99,111),
np.arange(112,124)))
elif pat_id == 'id011ml':
included_indices = np.concatenate((np.arange(0,18), np.arange(21,68),
np.arange(69,82), np.arange(82,125)))
elif pat_id == 'id012pc':
included_indices = np.concatenate((np.arange(0,4), np.arange(9,17),
np.arange(18,28), np.arange(31,41), np.arange(44,56),
np.arange(57,69), np.arange(70,82), np.arange(83,96),
np.arange(97,153)))
elif pat_id == 'id013pg':
included_indices = np.array([2, 3, 4, 5, 15, 18, 19, 20, 21, 23, 24,
25, 30, 31, 32, 33, 34, 35, 36, 37, 38, 50, 51, 52, 53, 54, 55, 56,
57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 70, 71, 72, 73, 74, 75,
76, 77, 78])
elif pat_id == 'id014rb':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
130, 131, 132, 133, 135, 136, 140, 141, 142, 143, 144, 145, 146,
147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164])
elif pat_id == 'id015sf':
included_indices = np.concatenate((np.arange(0,37), np.arange(38,77),
np.arange(78,121)))
return included_indices, onsetelecs, clinresult
def returnnihindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'pt1':
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 69), np.arange(71, 95)))
onsetelecs = set(['ATT1', 'ATT2', 'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4'])
resectelecs = set(['ATT1', 'ATT2', 'ATT3', 'ATT4', 'ATT5', 'ATT6', 'ATT7', 'ATT8',
'AST1', 'AST2', 'AST3', 'AST4',
'PST1', 'PST2', 'PST3', 'PST4',
'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4',
'PLT5', 'PLT6', 'SLT1'])
clinresult = 1
elif pat_id == 'pt2':
# [1:14 16:19 21:25 27:37 43 44 47:74]
included_indices = np.concatenate((np.arange(0, 14), np.arange(15, 19),
np.arange(
20, 25), np.arange(
26, 37), np.arange(
42, 44),
np.arange(46, 74)))
onsetelecs = set(['MST1', 'PST1', 'AST1', 'TT1'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT6', 'TT6',
'G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11', 'G12', 'G18', 'G19',
'G20', 'G26', 'G27',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt3':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 69), np.arange(70, 107)))
onsetelecs = set(['SFP1', 'SFP2', 'SFP3',
'IFP1', 'IFP2', 'IFP3',
'MFP2', 'MFP3',
'OF1', 'OF2', 'OF3', 'OF4'])
resectelecs = set(['FG1', 'FG2', 'FG9', 'FG10', 'FG17', 'FG18', 'FG25',
'SFP1', 'SFP2', 'SFP3', 'SFP4', 'SFP5', 'SFP6', 'SFP7', 'SFP8',
'MFP1', 'MFP2', 'MFP3', 'MFP4', 'MFP5', 'MFP6',
'IFP1', 'IFP2', 'IFP3', 'IFP4',
'OF3', 'OF4'])
clinresult = 1
elif pat_id == 'pt4':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt5':
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt6':
# [1:36 42:43 46 52:56 58:71 73:95]
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 46), np.arange(51, 56), np.arange(57, 71), np.arange(72, 95)))
onsetelecs = set(['LA1', 'LA2', 'LA3', 'LA4',
'LAH1', 'LAH2', 'LAH3', 'LAH4',
'LPH1', 'LPH2', 'LPH3', 'LPH4'])
resectelecs = set(['LALT1', 'LALT2', 'LALT3', 'LALT4', 'LALT5', 'LALT6',
'LAST1', 'LAST2', 'LAST3', 'LAST4',
'LA1', 'LA2', 'LA3', 'LA4', 'LPST4',
'LAH1', 'LAH2', 'LAH3', 'LAH4',
'LPH1', 'LPH2'])
clinresult = 2
elif pat_id == 'pt7':
# [1:17 19:35 37:38 41:62 67:109]
included_indices = np.concatenate((np.arange(0, 17), np.arange(18, 35),
np.arange(36, 38), np.arange(40, 62), np.arange(66, 109)))
onsetelecs = set(['MFP1', 'LFP3',
'PT2', 'PT3', 'PT4', 'PT5',
'MT2', 'MT3',
'AT3', 'AT4',
'G29', 'G30', 'G39', 'G40', 'G45', 'G46'])
resectelecs = set(['G28', 'G29', 'G30', 'G36', 'G37', 'G38', 'G39',
'G41', 'G44', 'G45', 'G46',
'LFP1', 'LFP2', 'LSF3', 'LSF4'])
clinresult = 3
elif pat_id == 'pt8':
# [1:19 21 23 30:37 39:40 43:64 71:76]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 21),
np.arange(
22, 23), np.arange(
29, 37), np.arange(
38, 40),
np.arange(42, 64), np.arange(70, 76)))
onsetelecs = set(['G19', 'G23', 'G29', 'G30', 'G31',
'TO6', 'TO5',
'MST3', 'MST4',
'O8', 'O9'])
resectelecs = set(['G22', 'G23', 'G27', 'G28', 'G29', 'G30', 'G31',
'MST2', 'MST3', 'MST4', 'PST2', 'PST3', 'PST4'])
clinresult = 1
elif pat_id == 'pt10':
# [1:3 5:19 21:35 48:69]
included_indices = np.concatenate((np.arange(0, 3), np.arange(4, 19),
np.arange(20, 35), np.arange(47, 69)))
onsetelecs = set(['TT1', 'TT2', 'TT4', 'TT6',
'MST1',
'AST2'])
resectelecs = set(['G3', 'G4', 'G5', 'G6', 'G11', 'G12', 'G13', 'G14',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6', 'AST1', 'AST2', 'AST3', 'AST4'])
clinresult = 2
elif pat_id == 'pt11':
# [1:19 21:35 37 39 40 43:74 76:81 83:84]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 35),
np.arange(
36, 37), np.arange(
38, 40), np.arange(
42, 74),
np.arange(75, 81), np.arange(82, 84)))
onsetelecs = set(['RG29', 'RG30', 'RG31', 'RG37', 'RG38', 'RG39',
'RG44', 'RG45'])
resectelecs = set(['RG4', 'RG5', 'RG6', 'RG7', 'RG12', 'RG13', 'RG14', 'RG15',
'RG21', 'RG22', 'RG23', 'RG29', 'RG30', 'RG31', 'RG37', 'RG38', 'RG39', 'RG45', 'RG46', 'RG47'])
resectelecs = set(['RG4', 'RG5', 'RG6', 'RG7', 'RG12',
'RG13', 'RG14', 'RG15',
'RG21', 'RG22', 'RG23', 'RG29', 'RG30',
'RG31', 'RG37', 'RG38', 'RG39', 'RG45', 'RG46', 'RG47'])
clinresult = 1
elif pat_id == 'pt12':
# [1:15 17:33 38:39 42:61]
included_indices = np.concatenate((np.arange(0, 15), np.arange(16, 33),
np.arange(37, 39), np.arange(41, 61)))
onsetelecs = set(['AST1', 'AST2',
'TT2', 'TT3', 'TT4', 'TT5'])
resectelecs = set(['G19', 'G20', 'G21', 'G22', 'G23', 'G27', 'G28', 'G29', 'G30', 'G31',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 2
elif pat_id == 'pt13':
# [1:36 39:40 43:66 69:74 77 79:94 96:103 105:130]
included_indices = np.concatenate((np.arange(0, 36), np.arange(38, 40),
np.arange(
42, 66), np.arange(
68, 74), np.arange(
76, 77),
np.arange(78, 94), np.arange(95, 103), np.arange(104, 130)))
onsetelecs = set(['G1', 'G2', 'G9', 'G10', 'G17', 'G18'])
resectelecs = set(['G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11',
'G17', 'G18', 'G19',
'AP2', 'AP3', 'AP4'])
clinresult = 1
elif pat_id == 'pt14':
# [1:19 21:37 41:42 45:61 68:78]
included_indices = np.concatenate((np.arange(0, 3), np.arange(6, 10),
np.arange(
11, 17), np.arange(
18, 19), np.arange(
20, 37),
np.arange(40, 42), np.arange(44, 61), np.arange(67, 78)))
onsetelecs = set(['MST1', 'MST2',
'TT1', 'TT2', 'TT3',
'AST1', 'AST2'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'AST1', 'AST2',
'MST1', 'MST2', 'PST1'])
clinresult = 4
elif pat_id == 'pt15':
# [2:7 9:30 32:36 41:42 45:47 49:66 69 71:85];
included_indices = np.concatenate((np.arange(1, 7), np.arange(8, 30),
np.arange(
31, 36), np.arange(
40, 42), np.arange(
44, 47),
np.arange(48, 66), np.arange(68, 69), np.arange(70, 85)))
onsetelecs = set(['TT1', 'TT2', 'TT3', 'TT4',
'MST1', 'MST2', 'AST1', 'AST2', 'AST3'])
resectelecs = set(['G2', 'G3', 'G4', 'G5', 'G10', 'G11', 'G12', 'G13',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt16':
# [1:19 21:37 42:43 46:53]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 53)))
onsetelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST3', 'MST4',
'G26', 'G27', 'G28', 'G18', 'G19', 'G20', 'OF4'])
resectelecs = set(['G18', 'G19', 'G20', 'G26', 'G27', 'G28',
'G29', 'G30', 'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'
])
clinresult = 1
elif pat_id == 'pt17':
# [1:19 21:37 42:43 46:51]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 51)))
onsetelecs = set(['TT1', 'TT2'])
resectelecs = set(['G27', 'G28', 'G29', 'G30',
'TT', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
return included_indices, onsetelecs, clinresult
def returnlaindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
spreadelecs = None
if pat_id == 'la01':
# [1 3 7:8 11:13 17:19 22:26 32 34:35 37 42 50:55 58 ...
# 62:65 70:72 77:81 84:97 100:102 105:107 110:114 120:121 130:131];
# onset_electrodes = {'Y''1', 'X''4', ...
# 'T''5', 'T''6', 'O''1', 'O''2', 'B1', 'B2',...% rare onsets
# }
included_indices = np.concatenate((np.arange(0, 3), np.arange(6, 8), np.arange(10, 13),
np.arange(
16, 19), np.arange(
21, 26), np.arange(
31, 32),
np.arange(
33, 35), np.arange(
36, 37), np.arange(
41, 42),
np.arange(
49, 55), np.arange(
57, 58), np.arange(
61, 65),
np.arange(
69, 72), np.arange(
76, 81), np.arange(
83, 97),
np.arange(
99, 102), np.arange(
104, 107), np.arange(
109, 114),
np.arange(119, 121), np.arange(129, 131)))
onsetelecs = ["X'4", "T'5", "T'6", "O'1", "O'2", "B1", "B2"]
spreadelecs = ["P1", "P2", 'P6', "X1", "X8", "X9", "E'2", "E'3"
"T'1"]
if seiz_id == 'inter2':
included_indices = np.concatenate((np.arange(0, 1), np.arange(7, 16), np.arange(21, 28),
np.arange(
33, 36), np.arange(
39, 40), np.arange(
42, 44), np.arange(
46, 50),
np.arange(
56, 58), np.arange(
62, 65), np.arange(
66, 68), np.arange(
69, 75),
np.arange(76, 83), np.arange(85, 89), np.arange(96, 103),
np.arange(106, 109), np.arange(111, 115), np.arange(116, 117),
np.arange(119, 123), np.arange(126, 127), np.arange(130, 134),
np.arange(136, 137), np.arange(138, 144), np.arange(146, 153)))
if seiz_id == 'ictal2':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19), np.arange(20, 33),
np.arange(
34, 37), np.arange(
38, 40), np.arange(
42, 98),
np.arange(107, 136), np.arange(138, 158)))
onsetelecs = ["Y'1"]
clinresult = 1
elif pat_id == 'la02':
# [1:4 7 9 11:12 15:18 21:28 30:34 47 50:62 64:67 ...
# 70:73 79:87 90 95:99]
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 7), np.arange(8, 9),
np.arange(
10, 12), np.arange(
14, 18), np.arange(
20, 28),
np.arange(
29, 34), np.arange(
46, 47), np.arange(
49, 62),
np.arange(
63, 67), np.arange(
69, 73), np.arange(
78, 87),
np.arange(89, 90), np.arange(94, 99)))
onsetelecs = ["L'2", "L'3", "L'4"]
clinresult = 1
elif pat_id == 'la03':
# [1:3 6:33 36:68 77:163]
included_indices = np.concatenate((np.arange(0, 3), np.arange(5, 33),
np.arange(35, 68), np.arange(76, 163)))
onsetelecs = ["L7"]
clinresult = 2
elif pat_id == 'la04':
# [1:4 9:13 15:17 22 24:32 44:47 52:58 60 63:64 ...
# 67:70 72:74 77:84 88:91 94:96 98:101 109:111 114:116 121 123:129];
included_indices = np.concatenate((np.arange(0, 4), np.arange(8, 13),
np.arange(
14, 17), np.arange(
21, 22), np.arange(
23, 32),
np.arange(43, 47), np.arange(51, 58), np.arange(59, 60),
np.arange(62, 64), np.arange(66, 70), np.arange(71, 74),
np.arange(76, 84), np.arange(87, 91), np.arange(93, 96),
np.arange(97, 101), np.arange(108, 111), np.arange(113, 116),
np.arange(120, 121), np.arange(122, 129)))
# FIRST ABLATION WAS A FAILURE
onsetelecs = ["L'4", "G'1", # 2ND RESECTION REMOVED ALL OF M' ELECTRODES
"M'1", "M'2", "M'3", "M'4", "M'5", "M'6", "M'7",
"M'8", "M'9", "M'10", "M'11", "M'12", "M'13", "M'14", "M'15", "M'16"]
clinresult = 2
elif pat_id == 'la05':
# [2:4 7:15 21:39 42:82 85:89 96:101 103:114 116:121 ...
# 126:145 147:152 154:157 160:161 165:180 182:191];
included_indices = np.concatenate((np.arange(1, 4), np.arange(6, 15),
np.arange(
20, 39), np.arange(
41, 82), np.arange(
84, 89),
np.arange(95, 101), np.arange(102, 114), np.arange(115, 121),
np.arange(125, 145), np.arange(146, 152), np.arange(153, 157),
np.arange(159, 161), np.arange(164, 180), np.arange(181, 191)))
onsetelecs = ["T'1", "T'2", "D'1", "D'2"]
clinresult = 1
elif pat_id == 'la06':
# [1:4 7:12 14:17 19 21:33 37 46:47 50:58 61:62 70:73 77:82 ...
# 84:102 104:112 114:119];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 12),
np.arange(
13, 17), np.arange(
18, 19), np.arange(
20, 33),
np.arange(36, 37), np.arange(45, 47), np.arange(49, 58),
np.arange(60, 62), np.arange(69, 73), np.arange(76, 82),
np.arange(83, 102), np.arange(103, 112), np.arange(113, 119)))
onsetelecs = ["Q'3", "Q'4", "R'3", "R'4"]
clinresult = 2
elif pat_id == 'la07':
# [1:18 22:23 25 34:37 44 48:51 54:55 57:69 65:66 68:78 ...
# 82:83 85:93 96:107 114:120];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 18), np.arange(21, 23),
np.arange(
24, 25), np.arange(
33, 37), np.arange(
43, 44),
np.arange(47, 51), np.arange(53, 55), np.arange(56, 69),
np.arange(64, 66), np.arange(67, 78), np.arange(81, 83),
np.arange(84, 93), np.arange(95, 107), np.arange(113, 120)))
onsetelecs = ["T'1", "T'3", "R'8", "R'9"]
clinresult = 1
elif pat_id == 'la08':
# [1:2 8:13 15:19 22 25 27:30 34:35 46:48 50:57 ...
# 65:68 70:72 76:78 80:84 87:93 100:102 105:108 110:117 123:127 130:131 133:137 ...
# 140:146]
included_indices = np.concatenate((np.arange(0, 2), np.arange(7, 13),
np.arange(
14, 19), np.arange(
21, 22), np.arange(
24, 25),
np.arange(26, 30), np.arange(33, 35), np.arange(45, 48),
np.arange(49, 57), np.arange(64, 68), np.arange(69, 72),
np.arange(75, 78), np.arange(79, 84), np.arange(86, 93),
np.arange(99, 102), np.arange(104, 108), np.arange(109, 117),
np.arange(122, 127), np.arange(129, 131), np.arange(132, 137),
np.arange(139, 146)))
onsetelecs = ["Q2"]
clinresult = 2
elif pat_id == 'la09':
# [3:4 7:17 21:28 33:38 42:47 51:56 58:62 64:69 ...
# 73:80 82:84 88:92 95:103 107:121 123 126:146 150:161 164:169 179:181 ...
# 183:185 187:191]
# 2/7/18 - got rid of F10 = looking at edf was super noisy
included_indices = np.concatenate((np.arange(2, 3), np.arange(6, 17),
np.arange(
20, 28), np.arange(
32, 38), np.arange(
41, 47),
np.arange(
50, 56), np.arange(
57, 62), np.arange(
63, 66), np.arange(
67, 69),
np.arange(72, 80), np.arange(81, 84), np.arange(87, 92),
np.arange(94, 103), np.arange(106, 121), np.arange(122, 123),
np.arange(125, 146), np.arange(149, 161), np.arange(163, 169),
np.arange(178, 181), np.arange(182, 185), np.arange(186, 191)))
onsetelecs = ["X'1", "X'2", "X'3", "X'4", "U'1", "U'2"]
if seiz_id == 'ictal2':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19),
np.arange(20, 39), np.arange(41, 189)))
onsetelecs = ["P'1", "P'2"]
clinresult = 2
elif pat_id == 'la10':
# [1:4 7:13 17:19 23:32 36:37 46:47 50 54:59 62:66 68:79 82:96 ...
# 99:106 108:113 117:127 135:159 163:169 172:173 176:179 181:185];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 13),
np.arange(
16, 19), np.arange(
22, 32), np.arange(
35, 37),
np.arange(45, 47), np.arange(49, 50), np.arange(53, 59),
np.arange(61, 66), np.arange(67, 79), np.arange(81, 96),
np.arange(98, 106), np.arange(107, 113), np.arange(116, 127),
np.arange(134, 159), np.arange(162, 169), np.arange(171, 173),
np.arange(175, 179), np.arange(180, 185)))
onsetelecs = ["S1", "S2", "R2", "R3"]
clinresult = 2
elif pat_id == 'la11':
# [3:4 7:16 22:30 33:39 42 44:49 53:62 64:87 91:100 ...
# 102:117 120:127 131:140 142:191];
included_indices = np.concatenate((np.arange(2, 4), np.arange(6, 16),
np.arange(
21, 30), np.arange(
32, 39), np.arange(
41, 42), np.arange(
43, 49),
np.arange(
52, 62), np.arange(
63, 87), np.arange(
90, 100), np.arange(
101, 117),
np.arange(119, 127), np.arange(130, 140), np.arange(141, 191)))
onsetelecs = ["D6", "Z10"]
clinresult = 2
elif pat_id == 'la12':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 15),
np.arange(
19, 23), np.arange(
24, 31), np.arange(
34, 36), np.arange(
42, 44), np.arange(
47, 48),
np.arange(
49, 59), np.arange(
61, 66), np.arange(
68, 86), np.arange(
87, 90),
np.arange(
91, 100), np.arange(
101, 119), np.arange(
121, 129), np.arange(
131, 134),
np.arange(136, 150), np.arange(153, 154), np.arange(156, 161),
np.arange(167, 178), np.arange(187, 191)))
onsetelecs = ["S1", "S2", "R2", "R3"]
clinresult = 3
elif pat_id == 'la13':
# [1:4 7:12 23:33 36:37 44:45 48:70 72:93]
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 12),
np.arange(
22, 33), np.arange(
35, 37), np.arange(
43, 45),
np.arange(47, 70), np.arange(71, 93)))
onsetelecs = ["Y13", "Y14"]
clinresult = 2
elif pat_id == 'la15':
# included_channels = [1:4 9:12 15:19 21:27 30:34 36:38 43:57 62:66 ...
# 68:71 76:85 89:106 108:112 114:115 118:124 127:132 135:158 ...
# 161:169 171:186]
included_indices = np.concatenate((np.arange(0, 4), np.arange(8, 12),
np.arange(
14, 19), np.arange(
20, 27), np.arange(
29, 34),
np.arange(35, 38), np.arange(42, 57), np.arange(61, 66),
np.arange(67, 71), np.arange(75, 85), np.arange(88, 106),
np.arange(107, 112), np.arange(113, 115), np.arange(117, 124),
np.arange(126, 132), np.arange(134, 158), np.arange(160, 169), np.arange(170, 186)))
if seiz_id == 'ictal':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19),
np.arange(
20, 39), np.arange(
41, 95), np.arange(
96, 112),
np.arange(113, 132), np.arange(134, 187)))
onsetelecs = ["R1", "R2", "R3"]
clinresult = 4
elif pat_id == 'la16':
# [1:3 10:16 23:24 28 31:35 37:39 42:44 46:47 ...
# 49:54 58:62 64:65 68:70 76:89 93:98 100:101 105:124 126 128:130 ...
# 132:134 136:140 142:144 149:156 158:163 165:166 168:170 173:181
# 183:189];
included_indices = np.concatenate((np.arange(0, 3), np.arange(9, 16),
np.arange(
22, 24), np.arange(
27, 28), np.arange(
30, 35),
np.arange(36, 39), np.arange(41, 44), np.arange(45, 47),
np.arange(48, 54), np.arange(57, 62), np.arange(63, 65),
np.arange(67, 70), np.arange(75, 89), np.arange(92, 98),
np.arange(99, 101), np.arange(104, 124), np.arange(125, 126),
np.arange(127, 130), np.arange(131, 134), np.arange(135, 140),
np.arange(141, 144), np.arange(148, 156), np.arange(157, 163),
np.arange(164, 166), np.arange(167, 170), np.arange(172, 181),
np.arange(182, 189)))
onsetelecs = ["Q7", "Q8"]
clinresult = 4
elif pat_id == 'la17':
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 39),
np.arange(41, 64)))
onsetelecs = ["X'1", "Y'1"]
clinresult = 4
return included_indices, onsetelecs, clinresult
def returnummcindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'ummc001':
# included_channels = [1:22 24:29 31:33 35:79 81:92];
included_indices = np.concatenate((np.arange(0, 22), np.arange(23, 29), np.arange(30, 33),
np.arange(34, 79), np.arange(80, 92)))
onsetelecs = ["GP13", 'GP21', 'GP29']
clinresult = 4
elif pat_id == 'ummc002':
# included_channels = [1:22 24:29 31:33 35:52];
included_indices = np.concatenate((np.arange(0, 22), np.arange(23, 29), np.arange(30, 33),
np.arange(34, 52)))
onsetelecs = ['ANT1', 'ANT2', 'ANT3',
'MEST1', 'MEST2', 'MEST3', 'MEST4', 'GRID17', 'GRID25']
# onsetelecs = ['ATT1', 'ATT2', 'ATT3',
# 'MEST1', 'MEST2', 'MEST3', 'MEST4', 'GRID17', 'GRID25']
clinresult = 1
elif pat_id == 'ummc003':
included_indices = np.concatenate((np.arange(0, 22), np.arange(23, 29), np.arange(30, 33),
np.arange(34, 48)))
onsetelecs = ['MEST4', 'MEST5', 'GRID4', 'GRID10', 'GRID12',
'GRID18', 'GRID19', 'GRID20', 'GRID26', 'GRID27']
clinresult = 1
elif pat_id == 'ummc004':
included_indices = np.concatenate((np.arange(0, 22), np.arange(23, 29), np.arange(30, 33),
np.arange(34, 49)))
onsetelecs = ['AT1', 'GRID1', 'GRID9', 'GRID10', 'GRID17', 'GRID18']
clinresult = 1
elif pat_id == 'ummc005':
included_indices = np.concatenate(
(np.arange(0, 33), np.arange(34, 48)))
onsetelecs = ['AT2', 'G17', 'G19', 'G25', 'G27', 'AT1', 'AT2', 'AT3', 'AT4',
'AT5', 'AT6']
onsetelecs = ['AT1']
# , 'GRID1', 'GRID9', 'GRID10', 'GRID17', 'GRID18']
clinresult = 1
elif pat_id == 'ummc005':
included_indices = np.concatenate(
(np.arange(0, 33), np.arange(34, 48)))
onsetelecs = ['AT2', 'G17', 'G19', 'G25', 'G27']
# , 'AT1', 'AT2', 'AT3', 'AT4','AT5', 'AT6']
clinresult = 1
elif pat_id == 'ummc006':
included_indices = np.concatenate((np.arange(0, 22), np.arange(23, 26), np.arange(27, 29),
np.arange(30, 33), np.arange(34, 56)))
onsetelecs = [
'MT2',
'MT3',
'MT4',
'MES2',
'MES3',
'MES5',
'MAT1',
'MAT2']
clinresult = 1
elif pat_id == 'ummc007':
included_indices = np.arange(0, 30)
onsetelecs = ['LMES1', 'LMES2', 'LMES3', 'LMES4', 'LPT3', 'LANT4', 'LANT5',
'RMES1', 'RANT1', 'RANT2', 'RANT3', 'RANT4', 'RPT3', 'RPT4', 'RPT5']
onsetelecs = [
'MT1',
'MT2',
'MT3',
'MT4',
'MEST1',
'MEST2',
'MEST3',
'MEST4',
'MEST5']
clinresult = 1
elif pat_id == 'ummc007':
included_indices = np.arange(0, 30)
onsetelecs = ['LMES1', 'LMES2', 'LMES3', 'LMES4',
'RMES1', 'RANT1', 'RANT2', 'RANT3', 'RANT4']
# 'LPT3','LANT4', 'LANT5',
# 'RPT3', 'RPT4', 'RPT5']
clinresult = 4
elif pat_id == 'ummc008':
included_indices = np.arange(0, 30)
onsetelecs = ['GRID1', 'GRID2', 'GRID3', 'GRID4',
'GRID5', 'GRID11', 'GRID12', 'GRID13',
'GRID17', 'GRID18', 'GRID19', 'GRID20', 'GRID21',
'AT1', 'AT2', 'AT3', 'AT4',
'MT1', 'MT2', 'MT3', 'MT4']
# 'GRID9',
# 'GRID10',
clinresult = 1
elif pat_id == 'ummc009':
included_indices = np.arange(0, 30)
onsetelecs = ['G4', 'G5', 'G6', 'G7', 'G12', 'G14', 'PT1', 'AT1']
clinresult = -1
return included_indices, onsetelecs, clinresult
def returnjhuindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'jh103':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 12), np.arange(14, 23),
np.arange(
24, 33), np.arange(
46, 63), np.arange(
64, 66),
np.arange(68, 71), np.arange(72, 110)))
onsetelecs = ['RAD1', 'RAD2', 'RAD3', 'RAD4', 'RAD5',
'RAD6', 'RAD7', 'RAD8',
'RHD1', 'RHD2', 'RHD3', 'RHD4', 'RHD5',
'RHD6', 'RHD7', 'RHD8', 'RHD9',
'RTG40', 'RTG48']
clinresult = 4
elif pat_id == 'jh105':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 12), np.arange(13, 19),
np.arange(
20, 37), np.arange(
41, 43), np.arange(
45, 49),
np.arange(50, 53), np.arange(54, 75), np.arange(77, 99)))
onsetelecs = ['RPG4', 'RPG5', 'RPG6', 'RPG12', 'RPG13', 'RPG14', 'RPG20', 'RPG21',
'APD1', 'APD2', 'APD3', 'APD4', 'APD5', 'APD6', 'APD7', 'APD8',
'PPD1', 'PPD2', 'PPD3', 'PPD4', 'PPD5', 'PPD6', 'PPD7', 'PPD8',
'ASI3', 'PSI5', 'PSI6']
clinresult = 1
return included_indices, onsetelecs, clinresult
def clinregions(patient):
''' THE REAL CLINICALLY ANNOTATED AREAS '''
# 001
if 'id001' in patient:
ezregions = ['ctx-rh-lateralorbitofrontal', 'ctx-rh-temporalpole']
pzregions = [
'ctx-rh-superiorfrontal',
'ctx-rh-rostralmiddlefrontal',
'ctx-lh-lateralorbitofrontal']
if 'id002' in patient:
ezregions = ['ctx-lh-lateraloccipital']
pzregions = ['ctx-lh-inferiorparietal', 'ctx-lh-superiorparietal']
if 'id003' in patient:
ezregions = ['ctx-lh-insula']
pzregions = ['Left-Putamen', 'ctx-lh-postcentral']
if 'id004' in patient:
''' '''
ezregions = [
'ctx-lh-posteriorcingulate',
'ctx-lh-caudalmiddlefrontal',
'ctx-lh-superiorfrontal']
pzregions = ['ctx-lh-precentral', 'ctx-lh-postcentral']
if 'id005' in patient:
''' '''
ezregions = ['ctx-lh-posteriorcingulate', 'ctx-lh-precuneus']
pzregions = ['ctx-lh-postcentral', 'ctx-lh-superiorparietal']
if 'id006' in patient:
''' '''
ezregions = ['ctx-rh-precentral']
pzregions = ['ctx-rh-postcentral', 'ctx-rh-superiorparietal']
if 'id007' in patient:
''' '''
ezregions = [
'Right-Amygdala',
'ctx-rh-temporalpole',
'ctx-rh-lateralorbitofrontal']
pzregions = ['Right-Hippocampus', 'ctx-rh-entorhinal', 'ctx-rh-medialorbitofrontal',
'ctx-rh-inferiortemporal', 'ctx-rh-temporalpole', 'ctx-rh-lateralorbitofrontal'] # 008
if 'id008' in patient:
ezregions = ['Right-Amygdala', 'Right-Hippocampus']
pzregions = [
'ctx-rh-superiortemporal',
'ctx-rh-temporalpole',
'ctx-rh-inferiortemporal',
'ctx-rh-medialorbitofrontal',
'ctx-rh-lateralorbitofrontal']
if 'id009' in patient:
ezregions = ['ctx-rh-lingual', 'ctx-rh-parahippocampal']
pzregions = [
'ctx-rh-lateraloccipital',
'ctx-rh-fusiform',
'ctx-rh-inferiorparietal'] # rlocc, rfug, ripc
if 'id010' in patient:
ezregions = [
'ctx-rh-medialorbitofrontal',
'ctx-rh-frontalpole',
'ctx-rh-rostralmiddlefrontal',
'ctx-rh-parsorbitalis'] # rmofc, rfp, rrmfg, rpor
pzregions = ['ctx-rh-lateralorbitofrontal', 'ctx-rh-rostralmiddlefrontal',
'ctx-rh-superiorfrontal', 'ctx-rh-caudalmiddlefrontal'] # rlofc, rrmfc, rsfc, rcmfg
if 'id011' in patient:
ezregions = ['Right-Hippocampus', 'Right-Amygdala'] # rhi, ramg
pzregions = ['Right-Thalamus-Proper', 'Right-Caudate', 'Right-Putamen',
'ctx-rh-insula', 'ctx-rh-entorhinal', 'ctx-rh-temporalpole'] # rth, rcd, rpu, rins, rentc, rtmp
if 'id012' in patient:
ezregions = [
'Right-Hippocampus',
'ctx-rh-fusiform',
'ctx-rh-entorhinal',
'ctx-rh-temporalpole'] # rhi, rfug, rentc, rtmp
pzregions = ['ctx-lh-fusiform', 'ctx-rh-inferiorparietal', 'ctx-rh-inferiortemporal',
'ctx-rh-lateraloccipital', 'ctx-rh-parahippocampal', 'ctx-rh-precuneus', 'ctx-rh-supramarginal'] # lfug, ripc, ritg, rloc, rphig, rpcunc, rsmg
# 013
if 'id013' in patient:
ezregions = ['ctx-rh-fusiform']
pzregions = ['ctx-rh-inferiortemporal', 'Right-Hippocampus', 'Right-Amygdala',
'ctx-rh-middletemporal', 'ctx-rh-entorhinal']
# 014
if 'id014' in patient:
ezregions = ['Left-Amygdala', 'Left-Hippocampus', 'ctx-lh-entorhinal', 'ctx-lh-fusiform',
'ctx-lh-temporalpole', 'ctx-rh-entorhinal']
pzregions = ['ctx-lh-superiortemporal', 'ctx-lh-middletemporal', 'ctx-lh-inferiortemporal',
'ctx-lh-insula', 'ctx-lh-parahippocampal']
if 'id015' in patient:
ezregions = ['ctx-rh-lingual', 'ctx-rh-lateraloccipital', 'ctx-rh-cuneus',
'ctx-rh-parahippocampal', 'ctx-rh-superiorparietal', 'ctx-rh-fusiform', 'ctx-rh-pericalcarine'] # rlgg, rloc, rcun, rphig, rspc, rfug, rpc
pzregions = [
'ctx-rh-parahippocampal',
'ctx-rh-superiorparietal',
'ctx-rh-fusiform'] # rphig, rspc, rfug
return ezregions, pzregions
|
{"hexsha": "6a9f1fc42031b58ef28519e5270318bad5e2fb93", "size": 41983, "ext": "py", "lang": "Python", "max_stars_repo_path": "datainterface/utils/utils.py", "max_stars_repo_name": "adam2392/eegdatastorage", "max_stars_repo_head_hexsha": "fc6fbaaa4d31df83b45e6d420d023fada62cfa8e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "datainterface/utils/utils.py", "max_issues_repo_name": "adam2392/eegdatastorage", "max_issues_repo_head_hexsha": "fc6fbaaa4d31df83b45e6d420d023fada62cfa8e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "datainterface/utils/utils.py", "max_forks_repo_name": "adam2392/eegdatastorage", "max_forks_repo_head_hexsha": "fc6fbaaa4d31df83b45e6d420d023fada62cfa8e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.6538024972, "max_line_length": 164, "alphanum_fraction": 0.4545173046, "include": true, "reason": "import numpy", "num_tokens": 13995}
|
## @type x : character
f <- function(x) x
for (i in 1:10) {
<warning descr="x expected to be of type character, found type numeric">f(i)</warning>
}
|
{"hexsha": "a7c89d57d82eafb85f44443c503a21b1e5c18700", "size": 151, "ext": "r", "lang": "R", "max_stars_repo_path": "testData/typing/slice.r", "max_stars_repo_name": "ktisha/TheRPlugin", "max_stars_repo_head_hexsha": "be2593c26dee8f6287d5ec0a82aebbca9c74f657", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2016-06-17T18:24:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-15T13:43:52.000Z", "max_issues_repo_path": "testData/typing/slice.r", "max_issues_repo_name": "ktisha/TheRPlugin", "max_issues_repo_head_hexsha": "be2593c26dee8f6287d5ec0a82aebbca9c74f657", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 31, "max_issues_repo_issues_event_min_datetime": "2015-12-15T14:54:07.000Z", "max_issues_repo_issues_event_max_datetime": "2017-05-30T23:36:08.000Z", "max_forks_repo_path": "testData/typing/slice.r", "max_forks_repo_name": "ktisha/TheRPlugin", "max_forks_repo_head_hexsha": "be2593c26dee8f6287d5ec0a82aebbca9c74f657", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2015-03-02T11:59:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-11T16:47:03.000Z", "avg_line_length": 25.1666666667, "max_line_length": 88, "alphanum_fraction": 0.6556291391, "num_tokens": 47}
|
#
# Copyright (c) 2021 The Markovflow Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A module contatining a collection of generating random matrices to be used in tests."""
from typing import Tuple
import numpy as np
def generate_random_pos_def_matrix(shape: int, batch_shape: Tuple = tuple()) -> np.ndarray:
"""
Generate a random positive definite matrix (i.e. for covariances)
:param shape: order of the square matrix generated by this function
:param batch_shape: the batch dimensions to create at the front
:return: A positive definite matrix of order n, with random entries.
"""
matrix = generate_random_lower_triangular_matrix(shape, batch_shape)
return np.einsum("...ij,...kj->...ik", matrix, matrix)
def generate_random_lower_triangular_matrix(shape: int, batch_shape: Tuple = tuple()) -> np.ndarray:
"""
Generate a random lower triangular matrix.
:param shape: order of the square matrix generated by this function
:param batch_shape: the batch dimensions to create at the front
:return: A lower triangular matrix of order n, with random entries.
"""
return np.tril(np.random.normal(size=batch_shape + (shape, shape)))
def generate_random_time_points(expected_range: float, shape: Tuple) -> np.ndarray:
"""
Generate random time points using an exponential distribution.
:param expected_range: The expected distance between the first and last data point
:param shape: The shape of the distribution to generate
:return: Random time points with the passed in shape
"""
# the last dimension of the shape is number of data points
scale = expected_range / shape[-1]
delta_ts = np.random.exponential(scale=scale, size=shape)
return np.cumsum(delta_ts, axis=-1)
def generate_random_time_observations(
obs_dim: int, num_data: int, batch_shape: Tuple = tuple()
) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate random time points and observations for those time points.
:param obs_dim: the dimension of each observation
:param num_data: the number of observations
:param batch_shape: the batch dimensions to create at the front
:return: time points batch_shape + [num_data] and
observations batch_shape + [num_data, obs_dim]
"""
time_points = generate_random_time_points(expected_range=4.0, shape=batch_shape + (num_data,))
observations = np.cos(100 * time_points)[..., None] + 1e-3 * np.random.normal(
size=batch_shape + (num_data, obs_dim)
)
return time_points, observations
|
{"hexsha": "825bf0d6f8d8c149a140e7a62e68f038289e4560", "size": 3078, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/tools/generate_random_objects.py", "max_stars_repo_name": "prakharverma/markovflow", "max_stars_repo_head_hexsha": "9b7fafc199dae2f7f3207c2945fd43f674386dc1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2021-09-16T10:34:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T20:24:28.000Z", "max_issues_repo_path": "tests/tools/generate_random_objects.py", "max_issues_repo_name": "prakharverma/markovflow", "max_issues_repo_head_hexsha": "9b7fafc199dae2f7f3207c2945fd43f674386dc1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-12-01T17:53:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-16T15:55:49.000Z", "max_forks_repo_path": "tests/tools/generate_random_objects.py", "max_forks_repo_name": "prakharverma/markovflow", "max_forks_repo_head_hexsha": "9b7fafc199dae2f7f3207c2945fd43f674386dc1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-16T09:29:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-16T09:29:49.000Z", "avg_line_length": 38.9620253165, "max_line_length": 100, "alphanum_fraction": 0.7277452891, "include": true, "reason": "import numpy", "num_tokens": 693}
|
import os,sys
import imageio
import numpy as np
import argparse
import math
from models import ResNet18
import torchvision.transforms as transforms
import torch
import cv2 as cv
import glob as glob
from numpy import clip
# GPUID = 0
# os.environ["CUDA_VISIBLE_DEVICES"] = str(GPUID)
# print ("PACKAGES LOADED")
""" https://github.com/sundyCoder/IS_MS_SS """
def load_images(args, image_dir):
images = []
for fn in os.listdir(image_dir):
ext = os.path.splitext(fn)[1].lower()
img_path = os.path.join(image_dir, fn)
img = imageio.imread(img_path)
# calculate per-channel means and standard deviations
means = img.mean(axis=(0, 1), dtype='float64')
stds = img.std(axis=(0, 1), dtype='float64')
# per-channel standardization of pixels
pixels = (img - means) / stds
pixels = clip(pixels, -1.0, 1.0)
images.append(pixels)
return images
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / np.expand_dims(e_x.sum(axis=1), axis=1) # only difference
def preds2score(preds, splits=10):
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))
kl = np.mean(np.sum(kl, 1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
def get_inception_score(args, images, device):
splits = args.num_splits
inps = []
input_transform = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
for img in images:
img = img.astype(np.float32)
# print("shape before", img.shape)
# inps.append(np.expand_dims(img, 0))
inps.append(img)
# print("shape before", inps[-1].shape)
preds = []
n_batches = int(math.ceil(float(len(inps)) / float(args.batch_size)))
n_preds = 0
net = ResNet18().to(device)
net.load_state_dict(torch.load(args.model_dir))
# print("load model successfully")
for i in range(n_batches):
sys.stdout.write(".")
sys.stdout.flush()
inp = inps[(i * args.batch_size):min((i + 1) * args.batch_size, len(inps))]
inp = np.concatenate(inp, 0)
inp = np.expand_dims(inp, axis=1)
inp = torch.from_numpy(inp).to(device)
outputs = net(inp)
pred = outputs.data.tolist()
#pred = softmax(pred)
preds.append(pred)
n_preds += outputs.shape[0]
preds = np.concatenate(preds, 0)
preds = np.exp(preds) / np.sum(np.exp(preds), 1, keepdims=True)
mean_, std_ = preds2score(preds, splits)
return mean_, std_
def main(args):
images = load_images(args, args.input_image_dir)
mean, std = get_inception_score(args, images)
print('\nInception mean: ', mean)
print('Inception std: ', std)
def crop10x10(in_path, out_path):
# mnist
x_cors = [2,32,62,92,122,152,182,212,242,272]
y_cors = [2,32,62,92,122,152,182,212,242,272]
img_size = 28
number_channel = 1
print(out_path)
if not os.path.exists(out_path):
os.makedirs(out_path)
in_list = glob.glob(in_path + "*.png")
count = 0
for img_name in in_list:
count += 1
if (number_channel == 1):
img = cv.imread(img_name, 0)
else:
img = cv.imread(img_name, 1)
for x in x_cors:
for y in y_cors:
img_crop = img[x:x + img_size, y:y + img_size]
#print(img_crop.shape)
if (number_channel == 1):
h, w = img_crop.shape
else:
h, w, c = img_crop.shape
if (h != img_size) or (w != img_size):
print("ERROR!!!")
exit()
out_name = out_path + str(count) + "_" + str(x) + "_" + str(y) + ".png"
#print(out_name)
cv.imwrite(out_name, img_crop)
if __name__ == '__main__':
in_path = "./data/"
out_path = in_path + "/crops/"
crop10x10(in_path, out_path)
parser = argparse.ArgumentParser()
parser.add_argument('--input_image_dir', default=out_path)
parser.add_argument('--model_dir', default="checkpoints/mnist_model_10.ckpt")
parser.add_argument('--img_size', default=28)
parser.add_argument('--batch_size', default=100)
parser.add_argument('--channel', default=1)
parser.add_argument('--num_splits', default=10)
args = parser.parse_args()
main(args)
'''
5gan:
avg:
Inception mean: 4.914344610788481
Inception std: 0.23576682113989486
mmd:
Inception mean: 5.176995156492626
Inception std: 0.2729022959078282
mdgan:
Inception mean: 6.790845002001021
Inception std: 0.24993071111665943
'''
|
{"hexsha": "2f31bdaaf6c0891051653f61430c35b57914daaa", "size": 4881, "ext": "py", "lang": "Python", "max_stars_repo_path": "MNIST_score.py", "max_stars_repo_name": "YilongJu/Implicit-Bias-towards-the-Kernel-RegimeCauses-Mode-Collapse-in-GANs", "max_stars_repo_head_hexsha": "983fcfde19c17b4d61223df8d7433c286db6b3db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MNIST_score.py", "max_issues_repo_name": "YilongJu/Implicit-Bias-towards-the-Kernel-RegimeCauses-Mode-Collapse-in-GANs", "max_issues_repo_head_hexsha": "983fcfde19c17b4d61223df8d7433c286db6b3db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MNIST_score.py", "max_forks_repo_name": "YilongJu/Implicit-Bias-towards-the-Kernel-RegimeCauses-Mode-Collapse-in-GANs", "max_forks_repo_head_hexsha": "983fcfde19c17b4d61223df8d7433c286db6b3db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9019607843, "max_line_length": 92, "alphanum_fraction": 0.5992624462, "include": true, "reason": "import numpy,from numpy", "num_tokens": 1345}
|
# ## Animation
z = get_maximal_state(storage)
z = [[z[1] for t = 1:100]..., z..., [z[end] for t = 1:100]...]
T = length(z)
anim = MeshCat.Animation(convert(Int, floor(1.0 / 0.01)))
build_robot(mech, vis=vis)
for t = 1:T
MeshCat.atframe(anim, t) do
set_robot(vis, mech, z[t])
end
end
MeshCat.setanimation!(vis, anim)
set_camera!(vis, cam_pos=[3,-0,0], zoom=0.84)
set_floor!(vis, x=0.2, y=10, z=0.02, color=RGBA(0,0,0,1))
# ## HMTL scene
render_static(vis)
open(joinpath(@__DIR__, "atlas_drop.html"), "w") do file
write(file, static_html(vis))
end
|
{"hexsha": "7296c3c4ce1357fc9c6894c762ed132b2a61b194", "size": 568, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/animations/generate/atlas_drop.jl", "max_stars_repo_name": "mcx/Dojo.jl", "max_stars_repo_head_hexsha": "0d54f76c3458f2eb979af0feb368c9c7b270100e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 70, "max_stars_repo_stars_event_min_datetime": "2022-03-02T01:28:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:14:51.000Z", "max_issues_repo_path": "examples/animations/generate/atlas_drop.jl", "max_issues_repo_name": "mcx/Dojo.jl", "max_issues_repo_head_hexsha": "0d54f76c3458f2eb979af0feb368c9c7b270100e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 35, "max_issues_repo_issues_event_min_datetime": "2022-03-02T06:58:54.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T17:17:26.000Z", "max_forks_repo_path": "examples/animations/generate/atlas_drop.jl", "max_forks_repo_name": "mcx/Dojo.jl", "max_forks_repo_head_hexsha": "0d54f76c3458f2eb979af0feb368c9c7b270100e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2022-03-07T01:47:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T19:13:07.000Z", "avg_line_length": 25.8181818182, "max_line_length": 62, "alphanum_fraction": 0.6338028169, "num_tokens": 213}
|
import logging
import items
import networkx as nx
import time
import socket
from typing import List
from collections import defaultdict
from nodes import BaseNode, ProcessSpec, SingleItemNode, NodeState
import basecases
from metrics import Metrics
log = logging.getLogger()
class ClusterBlueprint(object):
"""
Represents the cluster blueprint to create Clusters from
Useful for unit tests and designing test cases we want to operate on.
"""
def __init__(self, nodes: List[SingleItemNode]):
# Set of nodes to be used by this cluster
self.nodes = nodes
def _is_port_available(ip, port):
'''
HACK [last-minute]
returns true if the port is available. Will be used by the
publisher thread to bind to publish messages to the subscribers
of zmq
'''
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((ip, port))
s.close()
return True
except Exception:
return False
class Cluster(object):
"""
Represents the set of nodes interacting
"""
def __init__(self, blueprint, port_range_start=30000):
self.blueprint = blueprint
self.nodes = self.blueprint.nodes
self.node_ids_to_nodes = {node.node_id: node for node in self.nodes}
self.process_specs = None
self.init_process_specs(port_range_start)
def get_distinct_item_types_mapping(self):
item_types = set([node.dependency.get_result_item_type() for node in self.nodes])
item_type_to_id = {item_type: idx for idx, item_type in enumerate(item_types)}
return item_type_to_id
def init_process_specs(self, port_range_start: int):
# assign a process name and port to process
self.process_specs = {}
port = port_range_start
for node in self.nodes:
while not _is_port_available('127.0.0.1', port):
# keep loooking for an open port
port += 1
self.process_specs[node.node_id] = ProcessSpec(
'process-{}'.format(node.node_id),
port,
)
port += 1
def update_deps(self, node_id: int, new_dependency: items.ItemDependency):
for idx in range(len(self.nodes)):
if self.nodes[idx].node_id == node_id:
self.nodes[idx].dependency = new_dependency
def get_node_process_spec(self, node_id: int):
'''
returns the process metadata object for a given node ID
'''
return self.process_specs[node_id]
def __repr__(self):
return 'Cluster:\n\tNodes: {}\n\tProcesses: {}'.format(self.nodes, self.process_specs.values())
class ClusterWideFlow(object):
"""
Object storing details of the cluster wide flow
All nodes should interact e.g. get requests through this interface
"""
def __init__(self, nodes: List[SingleItemNode]):
self.nodes = nodes
self.node_ids = [n.node_id for n in nodes]
self.outgoing_flows = {n: [] for n in self.node_ids}
self.incoming_flows = {n: [] for n in self.node_ids}
def addNode(self, node_id):
if node_id not in self.node_ids:
self.node_ids.append(node_id)
self.outgoing_flows[node_id] = []
self.incoming_flows[node_id] = []
def removeNode(self, node_id):
if node_id in self.node_ids:
self.node_ids.remove(node_id)
self.outgoing_flows.pop(node_id)
self.incoming_flows.pop(node_id)
for node in self.outgoing_flows:
for tup in node:
if node_id in tup:
node.remove(tup)
for node in self.incoming_flows:
for tup in node:
if node_id in tup:
node.remove(tup)
def addFlow(self, source, dst, item: items.ItemReq):
assert (source in self.node_ids) and (dst in self.node_ids), "Source: {}, Dst: {}".format(source, dst)
self.outgoing_flows[source].append((dst, item))
self.incoming_flows[dst].append((source, item))
def getOutgoingFlowsForNode(self, node_id):
"""
Outgoing flows i.e. nodes to which node_id is supposed to give items
"""
if node_id not in self.outgoing_flows:
log.error("Node %d's outgoing edges not found in cluster flow", node_id)
return {}
return self.outgoing_flows[node_id]
def getIncomingFlowsForNode(self, node_id):
"""
Incoming flows i.e. nodes from which node_id is supposed to recieve items
"""
if node_id not in self.incoming_flows:
log.error("Node %d's incoming edges not found in cluster flow", node_id)
return {}
return self.incoming_flows[node_id]
def clearAll(self):
self.node_ids = []
self.outgoing_flows = {}
self.incoming_flows = {}
def __repr__(self):
return "ClusterWideFlow(\n\tincoming edges:{}\n\toutgoing edges:{}\n\tnode IDs: {}\n)".format(
self.incoming_flows, self.outgoing_flows, self.node_ids,
)
def get_networkx_graph_repr(self):
"""
Returns a representation of self in the form of a networkx graph object
Each graph represents the outgoing flow of the node
"""
graph = nx.DiGraph()
graph.add_nodes_from(self.node_ids)
for n0 in self.node_ids:
for edge in self.outgoing_flows[n0]:
n1, item_req = edge
graph.add_edge(n0, n1, item_req=item_req)
return graph
def _bootstrap_all_paths(nodes: List[SingleItemNode]):
"""
Create a flow with all possible dependency paths
"""
cluster_flow = ClusterWideFlow(nodes)
# Iterate over all nodes
for node_input in cluster_flow.nodes:
# Iterate over their input requirements
for input_req in node_input.dependency.input_item_reqs:
# Iterate over all nodes
for node_output in cluster_flow.nodes:
# Iterate over their output requirements
result_req = node_output.dependency.result_item_req
# add flow if item type in result = item type in input
if result_req and input_req and result_req.item.type == input_req.item.type:
cluster_flow.addFlow(node_output.node_id, node_input.node_id, result_req)
return cluster_flow
def output_possible_path(cluster_flow: ClusterWideFlow, start_node_id, end_node_id, path=[]):
"""
Find one possible path for a given ClusterWideFlow. It is a recursive depth-first algorithm
that tries to find the start_node to validate whether a given path is valid.
"""
# This is the return function -> when arriving at the starting node.
if (start_node_id == end_node_id):
return [(start_node_id, start_node_id)]
requirements = [] # Stores only ids of the required item types.
end_node = next((x for x in cluster_flow.nodes if x.node_id == end_node_id), None) # find the end node based off of its id
if end_node:
for input in end_node.dependency.input_item_reqs:
requirements.append(input.item.type)
# Loop over all incoming edges
for incoming in cluster_flow.incoming_flows[end_node_id]:
node = next((x for x in cluster_flow.nodes if x.node_id == incoming[0]), None)
# Only check node out if its type is in requirements, i.e. some items have already been delivered.
if node and node.dependency.result_item_req.item.type in requirements:
# Recursive function starts here -> end_node is changed to current node.
new_path = output_possible_path(cluster_flow, start_node_id, node.node_id, path)
boolean = [item for item in new_path if item[0] == start_node_id] # Check if start_node in path
# If start_node in path, the path is viable -> append it to the path + remove the item type from the requirements
if boolean:
path.append((node.node_id, end_node_id))
requirements.remove(node.dependency.result_item_req.item.type)
if requirements: # If still some item types in the requirements, path is not viable
path = []
return path
def bootstrap_flow(nodes: List[SingleItemNode], metrics, node_id):
"""
Create a flow with a possible path
metrics: metrics object from the calling node
node_id: node ID of the calling node
"""
start_time = time.time()
# Create a cluster_flow with all possible paths first
log.debug("Bootstrapping flow has started. Will calculate time taken.")
cluster_flow = _bootstrap_all_paths(nodes)
metrics.set_metric(node_id, "bootstrap_all_paths_time_sec", time.time() - start_time)
start_node = nodes[0].node_id
end_node = nodes[len(nodes)-1].node_id
log.debug("Cluster flow with all possible paths created: {}, time taken: {}"
.format(cluster_flow, time.time() - start_time))
# Create a new ClusterWideFlow object containing only one possible paths.
cluster_flow_final = ClusterWideFlow(nodes)
# Output one possible path
possible_path = output_possible_path(cluster_flow, start_node, end_node)
possible_path_set = list(set(possible_path))
# Add all edges to ClusterWideFlow object
for edge in possible_path_set:
node = next((x for x in cluster_flow.nodes if x.node_id == edge[0]), None)
cluster_flow_final.addFlow(
edge[0], edge[1], node.dependency.result_item_req
)
metrics.set_metric(node_id, "full_flow_bootstrap_time_sec", time.time() - start_time)
log.debug("Cluster flow with one specific path created: {}, time taken final: {}"
.format(cluster_flow_final, time.time() - start_time))
return cluster_flow_final
def bootstrap_flow_with_active_nodes(nodes: List[SingleItemNode], metrics, node_id):
"""
Create a flow with a possible path
"""
active_nodes = [active_node for active_node in nodes if active_node.state == NodeState.active]
return bootstrap_flow(active_nodes, metrics, node_id)
# for testing purposes
def _test():
number_types = 10
complexity = "medium"
nodes_per_type = 10
demo_nodes = basecases.bootstrap_random_dag(number_types, complexity, nodes_per_type)
# demo_nodes = basecases.bootstrap_demo()
print("Nodes have been created.")
print(demo_nodes)
cluster_flow_obj = bootstrap_flow(demo_nodes, Metrics("flow-test"), 98989898)
print("ClusterWideFlow Object has created.")
print(cluster_flow_obj)
return cluster_flow_obj
if __name__ == "__main__":
_test()
|
{"hexsha": "12b837d99266e409a75ca4c8384dfd483e26fd82", "size": 10748, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/cluster.py", "max_stars_repo_name": "raokrutarth/manufacturing-os", "max_stars_repo_head_hexsha": "3186d5a2f1add5bd87b728056990613f5594148f", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-30T05:35:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-30T05:35:50.000Z", "max_issues_repo_path": "src/cluster.py", "max_issues_repo_name": "raokrutarth/manufacturing-os", "max_issues_repo_head_hexsha": "3186d5a2f1add5bd87b728056990613f5594148f", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 107, "max_issues_repo_issues_event_min_datetime": "2020-04-21T02:22:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-08T21:47:28.000Z", "max_forks_repo_path": "src/cluster.py", "max_forks_repo_name": "raokrutarth/manufacturing-os", "max_forks_repo_head_hexsha": "3186d5a2f1add5bd87b728056990613f5594148f", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.067114094, "max_line_length": 127, "alphanum_fraction": 0.6574246371, "include": true, "reason": "import networkx", "num_tokens": 2391}
|
import pandas as pd
import codecs
import matplotlib.pyplot as plt
import numpy as np
import odf
import math
import pdfkit
from uncertainties import ufloat, ufloat_fromstr
from sklearn.linear_model import LinearRegression
from lmfit.models import LorentzianModel
from IPython.display import display, Latex
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
X = 'Frequency [$kHz$]'
Y= 'Amplitude [$mV$]'
f = codecs.open('data11.txt').read().replace('\t', ',')
i = open('new.txt', "w")
i.write(f)
i.close()
p1 = pd.read_csv('new.txt', names=[X, Y])
|
{"hexsha": "cd6493cc4cb724e4bc61dc5a516f9a91cc3ee4c0", "size": 618, "ext": "py", "lang": "Python", "max_stars_repo_path": "Advanced Physics Lab I/Ultrasonic Waves/test.py", "max_stars_repo_name": "n-khatiwada/Advanced-Physics-Lab", "max_stars_repo_head_hexsha": "61afdcecac37a5ed8ab94bd5b2ca971843dae32f", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Advanced Physics Lab I/Ultrasonic Waves/test.py", "max_issues_repo_name": "n-khatiwada/Advanced-Physics-Lab", "max_issues_repo_head_hexsha": "61afdcecac37a5ed8ab94bd5b2ca971843dae32f", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Advanced Physics Lab I/Ultrasonic Waves/test.py", "max_forks_repo_name": "n-khatiwada/Advanced-Physics-Lab", "max_forks_repo_head_hexsha": "61afdcecac37a5ed8ab94bd5b2ca971843dae32f", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.72, "max_line_length": 55, "alphanum_fraction": 0.7459546926, "include": true, "reason": "import numpy", "num_tokens": 168}
|
##########################################################################
# NSAp - Copyright (C) CEA, 2016
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Mocking Popen directly - need to construct a Mock to return, and adjust its
communicate() return_value.
The benefit of this approach is in not needing to do the strip/split on your
fake return string.
"""
# System import
import unittest
import sys
import copy
import numpy
# COMPATIBILITY: since python 3.3 mock is included in unittest module
python_version = sys.version_info
if python_version[:2] <= (3, 3):
import mock
from mock import patch
else:
import unittest.mock as mock
from unittest.mock import patch
# pyConnectomist import
from pyconnectomist.exceptions import ConnectomistBadManufacturerNameError
from pyconnectomist.exceptions import ConnectomistBadFileError
from pyconnectomist.exceptions import ConnectomistError
from pyconnectomist.preproc.qspace import data_import_and_qspace_sampling
class ConnectomistQspace(unittest.TestCase):
""" Test the Connectomist 'DWI & Q-space' tab:
'pyconnectomist.preproc.qspace.data_import_and_qspace_sampling'
"""
def setUp(self):
""" Run before each test - the mock_popen will be available and in the
right state in every test<something> function.
"""
# Mocking popen
self.popen_patcher = patch("pyconnectomist.wrappers.subprocess.Popen")
self.mock_popen = self.popen_patcher.start()
mock_process = mock.Mock()
attrs = {
"communicate.return_value": ("mock_OK", "mock_NONE"),
"returncode": 0
}
mock_process.configure_mock(**attrs)
self.mock_popen.return_value = mock_process
self.kwargs = {
"dwis": ["/my/path/mock_dwi.nii.gz"],
"bvals": ["/my/path/mock_dwi.bval"],
"bvecs": ["/my/path/mock_dwi.bvec"],
"outdir": "/my/path/mock_outdir",
"subject_id": "Lola",
"b0_magnitude": "/my/path/mock_b0_magnitude",
"b0_phase": "/my/path/mock_b0_phase",
"path_connectomist": "/my/path/mock_connectomist",
"flipX": False,
"flipY": False,
"flipZ": False,
"invertX": True,
"invertY": False,
"invertZ": False,
"manufacturer": "Siemens",
"phase_axis": "y",
"slice_axis": "x",
"subject_id": "Lola"
}
self.bvecs = numpy.array([[0, 0, 0], [0, 0, 0], [1, 0, 0], [0, 1, 0]])
self.bvals = numpy.array([0, 0, 1500, 1500])
def tearDown(self):
""" Run after each test.
"""
self.popen_patcher.stop()
def test_badaxis_raise(self):
""" A wrong slice or phase axis -> raise ValueError.
"""
# Test execution
wrong_kwargs = copy.copy(self.kwargs)
wrong_kwargs["phase_axis"] = "WRONG"
self.assertRaises(ValueError,
data_import_and_qspace_sampling, **wrong_kwargs)
def test_badfileerror_raise(self):
""" A wrong input -> raise ConnectomistBadFileError.
"""
# Test execution
self.assertRaises(ConnectomistBadFileError,
data_import_and_qspace_sampling, **self.kwargs)
def test_badinputlength_raise(self):
""" A wrong input length -> raise ValueError.
"""
# Test execution
wrong_kwargs = copy.copy(self.kwargs)
wrong_kwargs["bvals"] = []
self.assertRaises(ValueError,
data_import_and_qspace_sampling, **wrong_kwargs)
@mock.patch("pyconnectomist.preproc.qspace.ptk_nifti_to_gis")
@mock.patch("os.path")
@mock.patch("shutil.copyfile")
@mock.patch("os.mkdir")
def test_badmanufacturer_raise(self, mock_mkdir, mock_copyfile, mock_path,
mock_conversion):
""" A wrong manufacturer -> raise ConnectomistBadManufacturerNameError.
"""
# Set the mocked functions returned values
mock_path.isfile.side_effect = [True] * 5 + [False]
mock_conversion.side_effect = lambda *x: x[-1]
mock_path.join.side_effect = lambda *x: x[0] + "/" + x[1]
# Test execution
wrong_kwargs = copy.copy(self.kwargs)
wrong_kwargs["manufacturer"] = "WRONG"
self.assertRaises(ConnectomistBadManufacturerNameError,
data_import_and_qspace_sampling, **wrong_kwargs)
@mock.patch("numpy.savetxt")
@mock.patch("pyconnectomist.preproc.qspace.ConnectomistWrapper."
"_connectomist_version_check")
@mock.patch("pyconnectomist.preproc.qspace.ConnectomistWrapper."
"create_parameter_file")
@mock.patch("pyconnectomist.preproc.qspace.read_bvals_bvecs")
@mock.patch("pyconnectomist.preproc.qspace.ptk_nifti_to_gis")
@mock.patch("os.path")
@mock.patch("shutil.copyfile")
@mock.patch("os.mkdir")
def test_normal_execution(self, mock_mkdir, mock_copyfile, mock_path,
mock_conversion, mock_bvecs, mock_params,
mock_version, mock_savetxt):
""" Test the normal behaviour of the function.
"""
# Set the mocked functions returned values
mock_path.isdir.return_value = False
mock_path.isfile.side_effect = [True] * 8 + [False]
mock_path.join.side_effect = lambda *x: x[0] + "/" + x[1]
mock_conversion.side_effect = lambda *x: x[-1]
mock_bvecs.return_value = (self.bvals, self.bvecs, 1, 2)
mock_params.return_value = "/my/path/mock_parameters"
# Test execution
outdir = data_import_and_qspace_sampling(**self.kwargs)
self.assertEqual(outdir, self.kwargs["outdir"])
self.assertTrue(
[mock.call(self.kwargs["outdir"])] == mock_mkdir.call_args_list)
expected_copyfiles = [
mock.call(self.kwargs["bvals"][0],
self.kwargs["outdir"] + "/" + "dwi.bval"),
mock.call(self.kwargs["bvecs"][0],
self.kwargs["outdir"] + "/" + "dwi.bvec")]
self.assertTrue(expected_copyfiles == mock_copyfile.call_args_list)
expected_conversions = [
mock.call(self.kwargs["dwis"][0],
self.kwargs["outdir"] + "/" + "dwi.ima"),
mock.call(self.kwargs["b0_magnitude"],
self.kwargs["outdir"] + "/" + "b0_magnitude.ima"),
mock.call(self.kwargs["b0_phase"],
self.kwargs["outdir"] + "/" + "b0_phase.ima")]
self.assertTrue(expected_conversions == mock_conversion.call_args_list)
self.assertTrue([
mock.call([self.kwargs["outdir"] + "/" + "dwi.bval"],
[self.kwargs["outdir"] + "/" + "dwi.bvec"])] ==
mock_bvecs.call_args_list)
self.assertTrue(len(mock_params.call_args_list) == 1)
expected_saves = [
mock.call(self.kwargs["outdir"] + "/" + "dwi.bval", self.bvals),
mock.call(self.kwargs["outdir"] + "/" + "dwi.bvec", self.bvecs)]
self.assertTrue(expected_saves, mock_savetxt.call_args_list)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "2da7e42c3f95a51412946144246f6c479020f15e", "size": 7500, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyconnectomist/tests/tests_preproc/test_qspace.py", "max_stars_repo_name": "neurospin/pyconnectomist", "max_stars_repo_head_hexsha": "81881fd88d94b3c2bd401602783261a64e818a05", "max_stars_repo_licenses": ["CECILL-B"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-05-24T13:02:41.000Z", "max_stars_repo_stars_event_max_datetime": "2018-05-24T13:02:41.000Z", "max_issues_repo_path": "pyconnectomist/tests/tests_preproc/test_qspace.py", "max_issues_repo_name": "neurospin/pyconnectomist", "max_issues_repo_head_hexsha": "81881fd88d94b3c2bd401602783261a64e818a05", "max_issues_repo_licenses": ["CECILL-B"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyconnectomist/tests/tests_preproc/test_qspace.py", "max_forks_repo_name": "neurospin/pyconnectomist", "max_forks_repo_head_hexsha": "81881fd88d94b3c2bd401602783261a64e818a05", "max_forks_repo_licenses": ["CECILL-B"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2018-06-25T14:20:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T14:43:38.000Z", "avg_line_length": 41.2087912088, "max_line_length": 79, "alphanum_fraction": 0.6033333333, "include": true, "reason": "import numpy", "num_tokens": 1743}
|
from pandas.core.indexes.datetimes import date_range
import frappe
from frappe.utils import nowdate, add_to_date, cstr, cint, getdate
import itertools
import pandas as pd
import numpy as np
import time
from frappe import _
import json
import multiprocessing
import os
from multiprocessing.pool import ThreadPool as Pool
from itertools import product
from one_fm.api.notification import create_notification_log
@frappe.whitelist(allow_guest=True)
def get_staff(assigned=1, employee_id=None, employee_name=None, company=None, project=None, site=None, shift=None, department=None, designation=None):
date = cstr(add_to_date(nowdate(), days=1))
conds = ""
if employee_name:
conds += 'and emp.employee_name="{name}" '.format(name=employee_name)
if department:
conds += 'and emp.department="{department}" '.format(department=department)
if designation:
conds += 'and emp.designation="{designation}" '.format(designation=designation)
if company:
conds += 'and emp.company="{company}" '.format(company=company)
if project:
conds += 'and emp.project="{project}" '.format(project=project)
if site:
conds += 'and emp.site="{site}" '.format(site=site)
if shift:
conds += 'and emp.name="{shift}" '.format(shift=shift)
if not cint(assigned):
data = frappe.db.sql("""
select
distinct emp.name, emp.employee_id, emp.employee_name, emp.image, emp.one_fm_nationality as nationality, usr.mobile_no, usr.name as email, emp.designation, emp.department, emp.project
from `tabEmployee` as emp, `tabUser` as usr
where
emp.project is NULL
and emp.site is NULL
and emp.shift is NULL
and emp.user_id=usr.name
{conds}
""".format(date=date, conds=conds), as_dict=1)
return data
data = frappe.db.sql("""
select
distinct emp.name, emp.employee_id, emp.employee_name, emp.image, emp.one_fm_nationality as nationality, usr.mobile_no, usr.name as email, emp.designation, emp.department, emp.shift, emp.site, emp.project
from `tabEmployee` as emp, `tabUser` as usr
where
emp.project is not NULL
and emp.site is not NULL
and emp.shift is not NULL
and emp.user_id=usr.name
{conds}
""".format(date=date, conds=conds), as_dict=1)
return data
@frappe.whitelist(allow_guest=True)
def get_staff_filters_data():
company = frappe.get_list("Company", limit_page_length=9999, order_by="name asc")
projects = frappe.get_list("Project", limit_page_length=9999, order_by="name asc")
sites = frappe.get_list("Operations Site", limit_page_length=9999, order_by="name asc")
shifts = frappe.get_list("Operations Shift", limit_page_length=9999, order_by="name asc")
departments = frappe.get_list("Department", limit_page_length=9999, order_by="name asc")
designations = frappe.get_list("Designation", limit_page_length=9999, order_by="name asc")
return {
"company": company,
"projects": projects,
"sites": sites,
"shifts": shifts,
"departments": departments,
"designations": designations
}
@frappe.whitelist()
def get_roster_view(start_date, end_date, assigned=0, scheduled=0, employee_search_id=None, employee_search_name=None, project=None, site=None, shift=None, department=None, post_type=None, designation=None, isOt=None, limit_start=0, limit_page_length=100):
start = time.time()
master_data, formatted_employee_data, post_count_data, employee_filters, additional_assignment_filters={}, {}, {}, {}, {}
post_types_list = []
employees = []
filters = {
'date': ['between', (start_date, end_date)]
}
if post_type:
filters.update({'post_type': post_type})
if employee_search_id:
employee_filters.update({'employee_id': employee_search_id})
if employee_search_name:
employee_filters.update({'employee_name': ("like", "%" + employee_search_name + "%")})
additional_assignment_filters.update({'employee_name': ("like", "%" + employee_search_name + "%")})
if project:
employee_filters.update({'project': project})
additional_assignment_filters.update({'project': project})
if site:
employee_filters.update({'site': site})
additional_assignment_filters.update({'site': site})
if shift:
employee_filters.update({'shift': shift})
additional_assignment_filters.update({'shift': shift})
if department:
employee_filters.update({'department': department})
#--------------------- Fetch Employee list ----------------------------#
if isOt:
employee_filters.update({'employee_availability' : 'Working'})
employees = frappe.db.get_list("Employee Schedule", employee_filters, ["distinct employee", "employee_name"], order_by="employee_name asc" ,limit_start=limit_start, limit_page_length=limit_page_length, ignore_permissions=True)
master_data.update({'total' : len(employees)})
employee_filters.update({'date': ['between', (start_date, end_date)], 'post_status': 'Planned'})
employee_filters.pop('employee_availability')
else:
employee_filters.update({'status': 'Active'})
if designation:
employee_filters.update({'designation' : designation})
employees = frappe.db.get_list("Employee", employee_filters, ["employee", "employee_name"], order_by="employee_name asc" ,limit_start=limit_start, limit_page_length=limit_page_length, ignore_permissions=True)
employees_asa = frappe.db.get_list("Additional Shift Assignment", additional_assignment_filters, ["distinct employee", "employee_name"], order_by="employee_name asc" ,limit_start=limit_start, limit_page_length=limit_page_length, ignore_permissions=True)
if len(employees_asa) > 0:
employees.extend(employees_asa)
employees = filter_redundant_employees(employees)
master_data.update({'total': len(employees)})
employee_filters.pop('status', None)
employee_filters.update({'date': ['between', (start_date, end_date)], 'post_status': 'Planned'})
if employee_search_name:
employee_filters.pop('employee_name')
if employee_search_id:
employee_filters.pop('employee_id')
if department:
employee_filters.pop('department', None)
if post_type:
employee_filters.update({'post_type': post_type})
if designation:
employee_filters.pop('designation', None)
#------------------- Fetch post types ------------------------#
post_types_list = frappe.db.get_list("Post Schedule", employee_filters, ["distinct post_type", "post_abbrv"], ignore_permissions=True)
if post_type:
employee_filters.pop('post_type', None)
employee_filters.pop('date')
employee_filters.pop('post_status')
#------------------- Fetch Employee Schedule --------------------#
for key, group in itertools.groupby(employees, key=lambda x: (x['employee'], x['employee_name'])):
filters.update({'date': ['between', (start_date, end_date)], 'employee': key[0]})
if isOt:
filters.update({'roster_type' : 'Over-Time'})
schedules = frappe.db.get_list("Employee Schedule",filters, ["employee", "employee_name", "date", "post_type", "post_abbrv", "shift", "roster_type", "employee_availability", "day_off_ot"], order_by="date asc, employee_name asc", ignore_permissions=True)
if isOt:
filters.pop("roster_type", None)
attendances = frappe.db.get_list("Attendance", {'attendance_date': ['between', (start_date, add_to_date(cstr(getdate()), days=-1))], 'employee': key[0]}, ["status", "attendance_date"], ignore_permissions=True)
schedule_list = []
schedule = {}
default_shift = frappe.db.get_value("Employee", {'employee': key[0]}, ["shift"])
for date in pd.date_range(start=start_date, end=end_date):
if date < getdate() and any(cstr(attendance.attendance_date) == cstr(date).split(" ")[0] for attendance in attendances):
attendance = next((attendance for attendance in attendances if cstr(attendance.attendance_date) == cstr(date).split(" ")[0]), {})
schedule = {
'employee': key[0],
'employee_name': key[1],
'date': cstr(date).split(" ")[0],
'attendance': attendance.status
}
elif not any(cstr(schedule.date) == cstr(date).split(" ")[0] for schedule in schedules):
schedule = {
'employee': key[0],
'employee_name': key[1],
'date': cstr(date).split(" ")[0]
}
else:
schedule = next((sch for sch in schedules if cstr(sch.date) == cstr(date).split(" ")[0]), {})
if schedule.shift and schedule.shift != default_shift:
schedule.update({'asa': default_shift})
schedule_list.append(schedule)
formatted_employee_data.update({key[1]: schedule_list})
master_data.update({'employees_data': formatted_employee_data})
#----------------- Get post type count and check fill status -------------------#
for key, group in itertools.groupby(post_types_list, key=lambda x: (x['post_abbrv'], x['post_type'])):
post_list = []
post_filters = employee_filters
post_filters.update({'date': ['between', (start_date, end_date)], 'post_type': key[1]})
post_filled_count = frappe.db.get_list("Employee Schedule",["name", "employee", "date"] ,{'date': ['between', (start_date, end_date)],'post_type': key[1] }, order_by="date asc", ignore_permissions=True)
post_filters.update({"post_status": "Planned"})
post_schedule_count = frappe.db.get_list("Post Schedule", ["name", "date"], post_filters, ignore_permissions=True)
post_filters.pop("post_status", None)
for date in pd.date_range(start=start_date, end=end_date):
filled_schedule = sum(frappe.utils.cstr(x.date) == cstr(date.date()) for x in post_filled_count)
filled_post = sum(frappe.utils.cstr(x.date) == cstr(date.date()) for x in post_schedule_count)
count = cstr(filled_schedule)+"/"+cstr(filled_post)
highlight = "bggreen"
if filled_schedule > filled_post:
highlight = "bgyellow"
elif filled_schedule < filled_post:
highlight = "bgred"
post_list.append({'count': count, 'post_type': key[0], 'date': cstr(date).split(" ")[0], 'highlight': highlight})
post_count_data.update({key[0]: post_list })
master_data.update({'post_types_data': post_count_data})
end = time.time()
print("[[[[[[]]]]]]]", end-start)
return master_data
def filter_redundant_employees(employees):
return list({employee['employee']:employee for employee in employees}.values())
@frappe.whitelist(allow_guest=True)
def get_post_view(start_date, end_date, project=None, site=None, shift=None, post_type=None, active_posts=1, limit_start=0, limit_page_length=100):
user, user_roles, user_employee = get_current_user_details()
if "Operations Manager" not in user_roles and "Projects Manager" not in user_roles and "Site Supervisor" not in user_roles:
frappe.throw(_("Insufficient permissions for Post View."))
filters, master_data, post_data = {}, {}, {}
if project:
filters.update({'project': project})
if site:
filters.update({'site': site})
if shift:
filters.update({'site_shift': shift})
if post_type:
filters.update({'post_template': post_type})
post_total = len(frappe.db.get_list("Operations Post", filters))
post_list = frappe.db.get_list("Operations Post", filters, "name", order_by="name asc", limit_start=limit_start, limit_page_length=limit_page_length)
fields = ['name', 'post', 'post_type','date', 'post_status', 'site', 'shift', 'project']
filters.pop('post_template', None)
filters.pop('site_shift', None)
if post_type:
filters.update({'post_type': post_type})
if shift:
filters.update({'shift': shift})
for key, group in itertools.groupby(post_list, key=lambda x: (x['name'])):
schedule_list = []
filters.update({'date': ['between', (start_date, end_date)], 'post': key})
schedules = frappe.db.get_list("Post Schedule", filters, fields, order_by="date asc, post asc")
for date in pd.date_range(start=start_date, end=end_date):
if not any(cstr(schedule.date) == cstr(date).split(" ")[0] for schedule in schedules):
schedule = {
'post': key,
'date': cstr(date).split(" ")[0]
}
else:
schedule = next((sch for sch in schedules if cstr(sch.date) == cstr(date).split(" ")[0]), {})
schedule_list.append(schedule)
post_data.update({key: schedule_list})
master_data.update({"post_data": post_data, "total": post_total})
return master_data
@frappe.whitelist()
def get_filtered_post_types(doctype, txt, searchfield, start, page_len, filters):
shift = filters.get('shift')
return frappe.db.sql("""
select distinct post_template
from `tabOperations Post`
where site_shift="{shift}"
""".format(shift=shift))
def get_current_user_details():
user = frappe.session.user
user_roles = frappe.get_roles(user)
user_employee = frappe.get_value("Employee", {"user_id": user}, ["name", "employee_id", "employee_name", "image", "enrolled", "designation"], as_dict=1)
return user, user_roles, user_employee
@frappe.whitelist()
def schedule_staff(employees, shift, post_type, otRoster, start_date, project_end_date, keep_days_off, request_employee_schedule, day_off_ot=None, end_date=None):
validation_logs = []
user, user_roles, user_employee = get_current_user_details()
if cint(project_end_date) and not end_date:
project = frappe.db.get_value("Operations Shift", shift, ["project"])
if frappe.db.exists("Contracts", {'project': project}):
contract, end_date = frappe.db.get_value("Contracts", {'project': project}, ["name", "end_date"])
if not end_date:
validation_logs.append("Please set contract end date for contract: {contract}".format(contract=contract))
else:
validation_logs.append("No contract linked with project {project}".format(project=project))
elif end_date and not cint(project_end_date):
end_date = end_date
elif not cint(project_end_date) and not end_date:
validation_logs.append("Please set an end date for scheduling the staff.")
elif cint(project_end_date) and end_date:
validation_logs.append("Please select either the project end date or set a custom date. You cannot set both!")
if not cint(request_employee_schedule):
if "Projects Manager" not in user_roles and "Operations Manager" not in user_roles:
for emp in json.loads(employees):
for date in pd.date_range(start=start_date, end=end_date):
shift_es = frappe.db.get_value("Employee Schedule", {'employee': emp, 'employee_availability': 'Working', 'date': date}, ["shift"])
if shift_es:
supervisor = frappe.db.get_value("Operations Shift", shift_es, ["supervisor"])
if supervisor and user_employee.name != supervisor:
validation_logs.append("You are not authorized to change this schedule. Please check the Request Employee Schedule option to place a request.")
break
else:
continue
break
if len(validation_logs) > 0:
frappe.throw(validation_logs)
frappe.log_error(validation_logs)
else:
import time
try:
start = time.time()
for employee in json.loads(employees):
if not cint(request_employee_schedule):
frappe.enqueue(schedule, employee=employee, start_date=start_date, end_date=end_date, shift=shift, post_type=post_type, otRoster=otRoster, keep_days_off=keep_days_off, day_off_ot=day_off_ot, is_async=True, queue='long')
else:
from_schedule = frappe.db.sql("""select shift, post_type from `tabEmployee Schedule` where shift!= %(shift)s and date >= %(start_date)s and date <= %(end_date)s and employee = %(employee)s""",{
'shift' : shift,
'start_date': start_date,
'end_date': end_date,
'employee': employee
}, as_dict=1)
if len(from_schedule) > 0:
from_shift = from_schedule[0].shift
from_post_type = from_schedule[0].post_type
frappe.enqueue(create_request_employee_schedule, employee=employee, from_shift=from_shift, from_post_type=from_post_type, to_shift=shift, to_post_type=post_type, otRoster=otRoster, start_date=start_date, end_date=end_date, is_async=True, queue='long')
else:
frappe.throw("This employee is not scheduled. Please uncheck Request Employee Schedule option.")
frappe.log_error("This employee is not scheduled. Please uncheck Request Employee Schedule option.")
frappe.enqueue(update_roster, key="roster_view", is_async=True, queue='long')
end = time.time()
print("[TOTAL]", end-start)
return True
except Exception as e:
frappe.log_error(e)
frappe.throw(_(e))
def create_request_employee_schedule(employee, from_shift, from_post_type, to_shift, to_post_type, otRoster, start_date, end_date):
if otRoster == 'false':
roster_type = 'Basic'
elif otRoster == 'true':
roster_type = 'Over-Time'
req_es_doc = frappe.new_doc("Request Employee Schedule")
req_es_doc.employee = employee
req_es_doc.from_shift = from_shift
req_es_doc.from_post_type = from_post_type
req_es_doc.to_shift = to_shift
req_es_doc.to_post_type = to_post_type
req_es_doc.start_date = start_date
req_es_doc.end_date = end_date
req_es_doc.roster_type = roster_type
req_es_doc.save(ignore_permissions=True)
frappe.db.commit()
print("Created request for employee schedule")
def update_roster(key):
frappe.publish_realtime(key, "Success")
def schedule(employee, shift, post_type, otRoster, start_date, end_date, keep_days_off, day_off_ot):
start = time.time()
if otRoster == 'false':
roster_type = 'Basic'
elif otRoster == 'true':
roster_type = 'Over-Time'
emp_project, emp_site, emp_shift = frappe.db.get_value("Employee", employee, ["project", "site", "shift"])
for date in pd.date_range(start=start_date, end=end_date):
if not cint(keep_days_off):
if frappe.db.exists("Employee Schedule", {"employee": employee, "date": cstr(date.date()), "roster_type" : roster_type}):
site, project, shift_type= frappe.get_value("Operations Shift", shift, ["site", "project", "shift_type"])
post_abbrv = frappe.get_value("Post Type", post_type, "post_abbrv")
roster = frappe.get_value("Employee Schedule", {"employee": employee, "date": cstr(date.date()), "roster_type" : roster_type })
update_existing_schedule(roster, shift, site, shift_type, project, post_abbrv, cstr(date.date()), "Working", post_type, roster_type, day_off_ot)
else:
roster_doc = frappe.new_doc("Employee Schedule")
roster_doc.employee = employee
roster_doc.date = cstr(date.date())
roster_doc.shift = shift
roster_doc.employee_availability = "Working"
roster_doc.post_type = post_type
roster_doc.roster_type = roster_type
roster_doc.day_off_ot = cint(day_off_ot)
roster_doc.save(ignore_permissions=True)
else:
if frappe.db.exists("Employee Schedule", {"employee": employee, "date": cstr(date.date()), "roster_type" : roster_type, 'employee_availability': 'Working'}):
site, project, shift_type= frappe.get_value("Operations Shift", shift, ["site", "project", "shift_type"])
post_abbrv = frappe.get_value("Post Type", post_type, "post_abbrv")
roster = frappe.get_value("Employee Schedule", {"employee": employee, "date": cstr(date.date()), "roster_type" : roster_type })
update_existing_schedule(roster, shift, site, shift_type, project, post_abbrv, cstr(date.date()), "Working", post_type, roster_type, day_off_ot)
elif not frappe.db.exists("Employee Schedule", {"employee": employee, "date": cstr(date.date()), "roster_type" : roster_type}):
roster_doc = frappe.new_doc("Employee Schedule")
roster_doc.employee = employee
roster_doc.date = cstr(date.date())
roster_doc.shift = shift
roster_doc.employee_availability = "Working"
roster_doc.post_type = post_type
roster_doc.roster_type = roster_type
roster_doc.day_off_ot = day_off_ot
roster_doc.save(ignore_permissions=True)
"""Update employee assignment"""
site, project = frappe.get_value("Operations Shift", shift, ["site", "project"])
if emp_project and emp_project != project or emp_site and emp_site != site or emp_shift and emp_shift != shift:
if frappe.db.exists("Additional Shift Assignment", {'employee': employee, 'project': project, 'site': site, 'shift': shift}):
additional_shift_assignment_doc = frappe.get_doc("Additional Shift Assignment", {'employee': employee, 'project': project, 'site': site, 'shift': shift})
additional_shift_assignment_doc.project = project
additional_shift_assignment_doc.site = site
additional_shift_assignment_doc.shift = shift
additional_shift_assignment_doc.save(ignore_permissions=True)
else:
additional_shift_assignment_doc = frappe.new_doc("Additional Shift Assignment")
additional_shift_assignment_doc.employee = employee
additional_shift_assignment_doc.project = project
additional_shift_assignment_doc.site = site
additional_shift_assignment_doc.shift = shift
additional_shift_assignment_doc.save(ignore_permissions=True)
elif emp_project == project and emp_site == site and emp_shift == shift:
frappe.db.sql("""
DELETE FROM
`tabAdditional Shift Assignment`
WHERE
employee=%(employee)s
""", {'employee': employee})
frappe.db.commit()
elif emp_project and emp_site is None and emp_shift is None:
update_employee_assignment(employee, project, site, shift)
end = time.time()
print("Scheduled employee : ", employee, end-start)
def update_employee_assignment(employee, project, site, shift):
""" This function updates the employee project, site and shift in the employee doctype """
frappe.db.set_value("Employee", employee, "project", val=project)
frappe.db.set_value("Employee", employee, "site", val=site)
frappe.db.set_value("Employee", employee, "shift", val=shift)
@frappe.whitelist()
def schedule_leave(employees, leave_type, start_date, end_date):
try:
for employee in json.loads(employees):
for date in pd.date_range(start=start_date, end=end_date):
if frappe.db.exists("Employee Schedule", {"employee": employee["employee"], "date": cstr(date.date())}):
roster = frappe.get_doc("Employee Schedule", {"employee": employee["employee"], "date": cstr(date.date())})
roster.shift = None
roster.shift_type = None
roster.project = None
roster.site = None
else:
roster = frappe.new_doc("Employee Schedule")
roster.employee = employee["employee"]
roster.date = cstr(date.date())
roster.employee_availability = leave_type
roster.save(ignore_permissions=True)
except Exception as e:
print(e)
return frappe.utils.response.report_error(e.http_status_code)
@frappe.whitelist(allow_guest=True)
def unschedule_staff(employees, start_date, end_date=None, never_end=0):
try:
for employee in json.loads(employees):
st = time.time()
if cint(never_end) == 1:
rosters = frappe.get_list("Employee Schedule", {"employee": employee["employee"],"date": ('>=', start_date)}, ignore_permissions=True)
rosters = [roster.name for roster in rosters]
rosters = ', '.join(['"{}"'.format(value) for value in rosters])
if rosters:
frappe.db.sql("""
delete from `tabEmployee Schedule`
where name in ({ids})
""".format(ids=rosters))
if end_date and cint(never_end) != 1:
rosters = frappe.get_list("Employee Schedule", {"employee": employee["employee"], "date": ['between', (start_date, end_date)]}, ignore_permissions=True)
rosters = [roster.name for roster in rosters]
rosters = ', '.join(['"{}"'.format(value) for value in rosters])
if rosters:
frappe.db.sql("""
delete from `tabEmployee Schedule`
where name in ({ids})
""".format(ids=rosters))
frappe.db.commit()
return True
except Exception as e:
print(e)
return frappe.utils.response.report_error(e.http_status_code)
@frappe.whitelist()
def edit_post(posts, values):
user, user_roles, user_employee = get_current_user_details()
if "Operations Manager" not in user_roles and "Projects Manager" not in user_roles:
frappe.throw(_("Insufficient permissions to Edit Post."))
args = frappe._dict(json.loads(values))
if args.post_status == "Plan Post":
if args.plan_end_date and cint(args.project_end_date):
frappe.throw(_("Cannot set both project end date and custom end date!"))
if not args.plan_end_date and not cint(args.project_end_date):
frappe.throw(_("Please set an end date!"))
frappe.enqueue(plan_post, posts=posts, args=args, is_async=True, queue='long')
elif args.post_status == "Cancel Post":
if args.cancel_end_date and cint(args.project_end_date):
frappe.throw(_("Cannot set both project end date and custom end date!"))
if not args.cancel_end_date and not cint(args.project_end_date):
frappe.throw(_("Please set an end date!"))
frappe.enqueue(cancel_post,posts=posts, args=args, is_async=True, queue='long')
elif args.post_status == "Suspend Post":
if args.suspend_to_date and cint(args.project_end_date):
frappe.throw(_("Cannot set both project end date and custom end date!"))
if not args.suspend_to_date and not cint(args.project_end_date):
frappe.throw(_("Please set an end date!"))
frappe.enqueue(suspend_post, posts=posts, args=args, is_async=True, queue='long')
elif args.post_status == "Post Off":
if args.repeat_till and cint(args.project_end_date):
frappe.throw(_("Cannot set both project end date and custom end date!"))
if not args.repeat_till and not cint(args.project_end_date):
frappe.throw(_("Please set an end date!"))
if args.repeat == "Does not repeat" and cint(args.project_end_date):
frappe.throw(_("Cannot set both project end date and choose 'Does not repeat' option!"))
frappe.enqueue(post_off, posts=posts, args=args, is_async=True, queue='long')
frappe.enqueue(update_roster, key="staff_view", is_async=True, queue='long')
def plan_post(posts, args):
""" This function sets the post status to planned provided a post, start date and an end date """
end_date = None
if args.plan_end_date and not cint(args.project_end_date):
end_date = args.plan_end_date
for post in json.loads(posts):
if cint(args.project_end_date) and not args.plan_end_date:
project = frappe.db.get_value("Operations Post", post["post"], ["project"])
if frappe.db.exists("Contracts", {'project': project}):
contract, end_date = frappe.db.get_value("Contracts", {'project': project}, ["name", "end_date"])
if not end_date:
frappe.throw(_("No end date set for contract {contract}".format(contract=contract)))
else:
frappe.throw(_("No contract linked with project {project}".format(project=project)))
for date in pd.date_range(start=args.plan_from_date, end=end_date):
if frappe.db.exists("Post Schedule", {"date": cstr(date.date()), "post": post["post"]}):
doc = frappe.get_doc("Post Schedule", {"date": cstr(date.date()), "post": post["post"]})
else:
doc = frappe.new_doc("Post Schedule")
doc.post = post["post"]
doc.date = cstr(date.date())
doc.post_status = "Planned"
doc.save()
frappe.db.commit()
def cancel_post(posts, args):
end_date = None
if args.cancel_end_date and not cint(args.project_end_date):
end_date = args.plan_end_date
for post in json.loads(posts):
if cint(args.project_end_date) and not args.cancel_end_date:
project = frappe.db.get_value("Operations Post", post["post"], ["project"])
if frappe.db.exists("Contracts", {'project': project}):
contract, end_date = frappe.db.get_value("Contracts", {'project': project}, ["name", "end_date"])
if not end_date:
frappe.throw(_("No end date set for contract {contract}".format(contract=contract)))
else:
frappe.throw(_("No contract linked with project {project}".format(project=project)))
for date in pd.date_range(start=args.cancel_from_date, end=end_date):
if frappe.db.exists("Post Schedule", {"date": cstr(date.date()), "post": post["post"]}):
doc = frappe.get_doc("Post Schedule", {"date": cstr(date.date()), "post": post["post"]})
else:
doc = frappe.new_doc("Post Schedule")
doc.post = post["post"]
doc.date = cstr(date.date())
doc.paid = args.suspend_paid
doc.unpaid = args.suspend_unpaid
doc.post_status = "Cancelled"
doc.save()
frappe.db.commit()
def suspend_post(posts, args):
end_date = None
if args.suspend_to_date and not cint(args.project_end_date):
end_date = args.suspend_to_date
for post in json.loads(posts):
if cint(args.project_end_date) and not args.suspend_to_date:
project = frappe.db.get_value("Operations Post", post["post"], ["project"])
if frappe.db.exists("Contracts", {'project': project}):
contract, end_date = frappe.db.get_value("Contracts", {'project': project}, ["name", "end_date"])
if not end_date:
frappe.throw(_("No end date set for contract {contract}".format(contract=contract)))
else:
frappe.throw(_("No contract linked with project {project}".format(project=project)))
for date in pd.date_range(start=args.suspend_from_date, end=end_date):
if frappe.db.exists("Post Schedule", {"date": cstr(date.date()), "post": post["post"]}):
doc = frappe.get_doc("Post Schedule", {"date": cstr(date.date()), "post": post["post"]})
else:
doc = frappe.new_doc("Post Schedule")
doc.post = post["post"]
doc.date = cstr(date.date())
doc.paid = args.suspend_paid
doc.unpaid = args.suspend_unpaid
doc.post_status = "Suspended"
doc.save()
frappe.db.commit()
def post_off(posts, args):
from one_fm.api.mobile.roster import month_range
post_off_paid = args.post_off_paid
post_off_unpaid = args.post_off_unpaid
if args.repeat == "Does not repeat":
for post in json.loads(posts):
set_post_off(post["post"], post["date"], post_off_paid, post_off_unpaid)
else:
if args.repeat and args.repeat in ["Daily", "Weekly", "Monthly", "Yearly"]:
end_date = None
if args.repeat_till and not cint(args.project_end_date):
end_date = args.repeat_till
if args.repeat == "Daily":
for post in json.loads(posts):
if cint(args.project_end_date) and not args.repeat_till:
project = frappe.db.get_value("Operations Post", post["post"], ["project"])
if frappe.db.exists("Contracts", {'project': project}):
contract, end_date = frappe.db.get_value("Contracts", {'project': project}, ["name", "end_date"])
if not end_date:
frappe.throw(_("No end date set for contract {contract}".format(contract=contract)))
else:
frappe.throw(_("No contract linked with project {project}".format(project=project)))
for date in pd.date_range(start=post["date"], end=end_date):
set_post_off(post["post"], cstr(date.date()), post_off_paid, post_off_unpaid)
elif args.repeat == "Weekly":
week_days = []
if args.sunday: week_days.append("Sunday")
if args.monday: week_days.append("Monday")
if args.tuesday: week_days.append("Tuesday")
if args.wednesday: week_days.append("Wednesday")
if args.thursday: week_days.append("Thursday")
if args.friday: week_days.append("Friday")
if args.saturday: week_days.append("Saturday")
for post in json.loads(posts):
if cint(args.project_end_date) and not args.repeat_till:
project = frappe.db.get_value("Operations Post", post["post"], ["project"])
if frappe.db.exists("Contracts", {'project': project}):
contract, end_date = frappe.db.get_value("Contracts", {'project': project}, ["name", "end_date"])
if not end_date:
frappe.throw(_("No end date set for contract {contract}".format(contract=contract)))
else:
frappe.throw(_("No contract linked with project {project}".format(project=project)))
for date in pd.date_range(start=post["date"], end=end_date):
if getdate(date).strftime('%A') in week_days:
set_post_off(post["post"], cstr(date.date()), post_off_paid, post_off_unpaid)
elif args.repeat == "Monthly":
for post in json.loads(posts):
if cint(args.project_end_date) and not args.repeat_till:
project = frappe.db.get_value("Operations Post", post["post"], ["project"])
if frappe.db.exists("Contracts", {'project': project}):
contract, end_date = frappe.db.get_value("Contracts", {'project': project}, ["name", "end_date"])
if not end_date:
frappe.throw(_("No end date set for contract {contract}".format(contract=contract)))
else:
frappe.throw(_("No contract linked with project {project}".format(project=project)))
for date in month_range(post["date"], end_date):
set_post_off(post["post"], cstr(date.date()), post_off_paid, post_off_unpaid)
elif args.repeat == "Yearly":
for post in json.loads(posts):
if cint(args.project_end_date) and not args.repeat_till:
project = frappe.db.get_value("Operations Post", post["post"], ["project"])
if frappe.db.exists("Contracts", {'project': project}):
contract, end_date = frappe.db.get_value("Contracts", {'project': project}, ["name", "end_date"])
if not end_date:
frappe.throw(_("No end date set for contract {contract}".format(contract=contract)))
else:
frappe.throw(_("No contract linked with project {project}".format(project=project)))
for date in pd.date_range(start=post["date"], end=end_date, freq=pd.DateOffset(years=1)):
set_post_off(post["post"], cstr(date.date()), post_off_paid, post_off_unpaid)
frappe.db.commit()
def set_post_off(post, date, post_off_paid, post_off_unpaid):
if frappe.db.exists("Post Schedule", {"date": date, "post": post}):
doc = frappe.get_doc("Post Schedule", {"date": date, "post": post})
else:
doc = frappe.new_doc("Post Schedule")
doc.post = post
doc.date = date
doc.paid = post_off_paid
doc.unpaid = post_off_unpaid
doc.post_status = "Post Off"
doc.save()
@frappe.whitelist()
def dayoff(employees, selected_dates=0, repeat=0, repeat_freq=None, week_days=[], repeat_till=None, project_end_date=None):
if not repeat_till and not cint(project_end_date) and not selected_dates:
frappe.throw(_("Please select either a repeat till date or check the project end date option."))
from one_fm.api.mobile.roster import month_range
if cint(selected_dates):
for employee in json.loads(employees):
set_dayoff(employee["employee"], employee["date"])
else:
if repeat and repeat_freq in ["Daily", "Weekly", "Monthly", "Yearly"]:
end_date = None
if repeat_till and not cint(project_end_date):
end_date = repeat_till
if repeat_freq == "Daily":
for employee in json.loads(employees):
if cint(project_end_date):
project = frappe.db.get_value("Employee", {'employee': employee["employee"]}, ["project"])
if frappe.db.exists("Contracts", {'project': project}):
contract, end_date = frappe.db.get_value("Contracts", {'project': project}, ["name", "end_date"])
if not end_date:
frappe.throw(_("No end date set for contract {contract}".format(contract=contract)))
else:
frappe.throw(_("No contract linked with project {project}".format(project=project)))
for date in pd.date_range(start=employee["date"], end=end_date):
frappe.enqueue(set_dayoff, employee=employee["employee"], date=cstr(date.date()), queue='short')
elif repeat_freq == "Weekly":
for employee in json.loads(employees):
if cint(project_end_date):
project = frappe.db.get_value("Employee", {'employee': employee["employee"]}, ["project"])
if frappe.db.exists("Contracts", {'project': project}):
contract, end_date = frappe.db.get_value("Contracts", {'project': project}, ["name", "end_date"])
if not end_date:
frappe.throw(_("No end date set for contract {contract}".format(contract=contract)))
else:
frappe.throw(_("No contract linked with project {project}".format(project=project)))
for date in pd.date_range(start=employee["date"], end=end_date):
if getdate(date).strftime('%A') in week_days:
frappe.enqueue(set_dayoff, employee=employee["employee"], date=cstr(date.date()), queue='short')
elif repeat_freq == "Monthly":
for employee in json.loads(employees):
if cint(project_end_date):
project = frappe.db.get_value("Employee", {'employee': employee["employee"]}, ["project"])
if frappe.db.exists("Contracts", {'project': project}):
contract, end_date = frappe.db.get_value("Contracts", {'project': project}, ["name", "end_date"])
if not end_date:
frappe.throw(_("No end date set for contract {contract}".format(contract=contract)))
else:
frappe.throw(_("No contract linked with project {project}".format(project=project)))
for date in month_range(employee["date"], end_date):
frappe.enqueue(set_dayoff, employee=employee["employee"], date=cstr(date.date()), queue='short')
elif repeat_freq == "Yearly":
for employee in json.loads(employees):
if cint(project_end_date):
project = frappe.db.get_value("Employee", {'employee': employee["employee"]}, ["project"])
if frappe.db.exists("Contracts", {'project': project}):
contract, end_date = frappe.db.get_value("Contracts", {'project': project}, ["name", "end_date"])
if not end_date:
frappe.throw(_("No end date set for contract {contract}".format(contract=contract)))
else:
frappe.throw(_("No contract linked with project {project}".format(project=project)))
for date in pd.date_range(start=employee["date"], end=end_date, freq=pd.DateOffset(years=1)):
frappe.enqueue(set_dayoff, employee=employee["employee"], date=cstr(date.date()), queue='short')
def set_dayoff(employee, date):
if frappe.db.exists("Employee Schedule", {"date": date, "employee": employee}):
doc = frappe.get_doc("Employee Schedule", {"date": date, "employee": employee})
else:
doc = frappe.new_doc("Employee Schedule")
doc.employee = employee
doc.date = date
doc.shift = None
doc.post_type = None
doc.shift_type = None
doc.site = None
doc.project = None
doc.employee_availability = "Day Off"
doc.post_abbrv = None
doc.roster_type = 'Basic'
doc.save(ignore_permissions=True)
@frappe.whitelist()
def assign_staff(employees, shift, request_employee_assignment):
validation_logs = []
user, user_roles, user_employee = get_current_user_details()
if not cint(request_employee_assignment):
for emp in json.loads(employees):
emp_project, emp_site, emp_shift = frappe.db.get_value("Employee", emp, ["project", "site", "shift"])
supervisor = frappe.db.get_value("Operations Shift", emp_shift, ["supervisor"])
if user_employee.name != supervisor:
validation_logs.append("You are not authorized to change assignment for employee {emp}. Please check the Request Employee Assignment option to place a request.".format(emp=emp))
if len(validation_logs) > 0:
frappe.throw(validation_logs)
frappe.log_error(validation_logs)
else:
try:
start = time.time()
for employee in json.loads(employees):
if not cint(request_employee_assignment):
frappe.enqueue(assign_job, employee=employee, shift=shift, site=site, project=project, is_async=True, queue="long")
else:
emp_project, emp_site, emp_shift = frappe.db.get_value("Employee", employee, ["project", "site", "shift"])
site, project = frappe.get_value("Operations Shift", shift, ["site", "project"])
if emp_project != project or emp_site != site or emp_shift != shift:
frappe.enqueue(create_request_employee_assignment, employee=employee, from_shift=emp_shift, to_shift=shift, is_async=True, queue="long")
frappe.enqueue(update_roster, key="staff_view", is_async=True, queue="long")
end = time.time()
print(end-start, "[TOTS]")
return True
except Exception as e:
frappe.log_error(e)
frappe.throw(_(e))
def create_request_employee_assignment(employee, from_shift, to_shift):
req_ea_doc = frappe.new_doc("Request Employee Assignment")
req_ea_doc.employee = employee
req_ea_doc.from_shift = from_shift
req_ea_doc.to_shift = to_shift
req_ea_doc.save(ignore_permissions=True)
def assign_job(employee, shift, site, project):
start = time.time()
frappe.set_value("Employee", employee, "shift", shift)
frappe.set_value("Employee", employee, "site", site)
frappe.set_value("Employee", employee, "project", project)
# for date in pd.date_range(start=start_date, end=end_date):
# if frappe.db.exists("Employee Schedule", {"employee": employee, "date": cstr(date.date())}):
# roster = frappe.get_value("Employee Schedule", {"employee": employee, "date": cstr(date.date())})
# update_existing_schedule(roster, shift, site, shift_type, project, post_abbrv, cstr(date.date()), "Working", post_type)
# else:
# roster = frappe.new_doc("Employee Schedule")
# roster.employee = employee
# roster.date = cstr(date.date())
# roster.shift = shift
# roster.employee_availability = "Working"
# roster.post_type = post_type
# roster.save(ignore_permissions=True)
end = time.time()
print("------------------[TIME TAKEN]===================", end-start)
@frappe.whitelist(allow_guest=True)
def update_existing_schedule(roster, shift, site, shift_type, project, post_abbrv, date, employee_availability, post_type, roster_type, day_off_ot):
frappe.db.set_value("Employee Schedule", roster, "shift", val=shift)
frappe.db.set_value("Employee Schedule", roster, "site", val=site)
frappe.db.set_value("Employee Schedule", roster, "shift_type", val=shift_type)
frappe.db.set_value("Employee Schedule", roster, "project", val=project)
frappe.db.set_value("Employee Schedule", roster, "post_abbrv", val=post_abbrv)
frappe.db.set_value("Employee Schedule", roster, "date", val=date)
frappe.db.set_value("Employee Schedule", roster, "employee_availability", val=employee_availability)
frappe.db.set_value("Employee Schedule", roster, "post_type", val=post_type)
frappe.db.set_value("Employee Schedule", roster, "roster_type", val=roster_type)
frappe.db.set_value("Employee Schedule", roster, "day_off_ot", val=cint(day_off_ot))
@frappe.whitelist(allow_guest=True)
def search_staff(key, search_term):
conds = ""
if key == "customer" and search_term:
conds += 'and prj.customer like "%{customer}%" and emp.project=prj.name'.format(customer=search_term)
elif key == "employee_id" and search_term:
conds += 'and emp.employee_id like "%{employee_id}%" '.format(employee_id=search_term)
elif key == "project" and search_term:
conds += 'and emp.project like "%{project}%" '.format(project=search_term)
elif key == "site" and search_term:
conds += 'and emp.site like "%{site}%" '.format(site=search_term)
elif key == "employee_name" and search_term:
conds += 'and emp.employee_name like "%{name}%" '.format(name=search_term)
data = frappe.db.sql("""
select
distinct emp.name, emp.employee_id, emp.employee_name, emp.image, emp.one_fm_nationality as nationality, usr.mobile_no, usr.name as email, emp.designation, emp.department, emp.shift, emp.site, emp.project
from `tabEmployee` as emp, `tabUser` as usr, `tabProject` as prj
where
emp.user_id=usr.name
{conds}
""".format(conds=conds), as_dict=1)
return data
|
{"hexsha": "8cfe826b6dab1b259077936ead4daf8018b4e2cd", "size": 43534, "ext": "py", "lang": "Python", "max_stars_repo_path": "one_fm/one_fm/page/roster/roster.py", "max_stars_repo_name": "ks093/One-FM", "max_stars_repo_head_hexsha": "d89b6739f12a8ad527355f7ba31184bb635e1dad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "one_fm/one_fm/page/roster/roster.py", "max_issues_repo_name": "ks093/One-FM", "max_issues_repo_head_hexsha": "d89b6739f12a8ad527355f7ba31184bb635e1dad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "one_fm/one_fm/page/roster/roster.py", "max_forks_repo_name": "ks093/One-FM", "max_forks_repo_head_hexsha": "d89b6739f12a8ad527355f7ba31184bb635e1dad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.3620873269, "max_line_length": 258, "alphanum_fraction": 0.6984425966, "include": true, "reason": "import numpy", "num_tokens": 10810}
|
import logging
import collections
from typing import Dict, List, Optional, Union, Tuple, Any, Callable
import numpy
from dataclasses import dataclass
from cephlib.units import b2ssize, b2ssize_10
from . import html
from .cluster_classes import CephInfo, OSDStatus, DiskType, CephVersion, BlueStoreInfo, FileStoreInfo, LogicBlockDev, \
Disk, CephOSD
from .visualize_utils import tab, partition_by_len, to_html_histo, plot, perf_info_required, seconds_to_str_simple
from .obj_links import osd_link, host_link
from .checks import expected_wr_speed
from .plot_data import get_histo_img
from .table import Table, count, bytes_sz, ident, seconds, exact_count, ok_or_fail, idents_list, \
yes_or_no, extra_columns, to_str
from .groupby import group_by
logger = logging.getLogger('report')
@dataclass
class OSDInfo:
id: int
status: bool
version: CephVersion
daemon_runs: bool
dev_class: str
reweight: float
storage_type: str
storage_total: int
storage_dev_type: DiskType
journal_or_wal_collocated: bool
journal_or_wal_type: DiskType
journal_or_wal_size: int
journal_on_file: bool
db_collocated: Optional[bool]
db_type: Optional[DiskType]
db_size: Optional[int]
def group_osds(ceph: CephInfo) -> List[List[OSDInfo]]:
objs = []
for osd in ceph.sorted_osds:
assert osd.storage_info
data_dev_path = osd.storage_info.data.path
jinfo = osd.storage_info.journal if isinstance(osd.storage_info, FileStoreInfo) else osd.storage_info.wal
if isinstance(osd.storage_info, BlueStoreInfo):
info = osd.storage_info.db.dev_info
db_collocated: Optional[bool] = info.dev_path != data_dev_path
db_type: Optional[DiskType] = info.tp
db_size: Optional[int] = osd.storage_info.db.partition_info.size
else:
db_collocated = None
db_type = None
db_size = None
osd_info = OSDInfo(
id=osd.id,
status=osd.status == OSDStatus.up,
version=osd.version,
daemon_runs=osd.daemon_runs,
dev_class=osd.class_name if osd.class_name else "",
reweight=osd.reweight,
storage_type="bluestore" if isinstance(osd.storage_info, BlueStoreInfo) else "filestore",
storage_total=osd.total_space,
storage_dev_type=osd.storage_info.data.dev_info.tp, # type: ignore
journal_or_wal_collocated=jinfo.dev_info.dev_path == data_dev_path,
journal_or_wal_type=jinfo.dev_info.tp,
journal_or_wal_size=jinfo.partition_info.size,
journal_on_file=(osd.storage_info.journal.partition_name == osd.storage_info.data.partition_name
if isinstance(osd.storage_info, FileStoreInfo) else False),
db_collocated=db_collocated,
db_type=db_type,
db_size=db_size)
objs.append(osd_info)
return [[objs[idx] for idx in group] for group in group_by((obj.__dict__ for obj in objs), mutable_keys='id')]
@tab("OSD's state")
def show_osd_state(ceph: CephInfo) -> html.HTMLTable:
statuses: Dict[OSDStatus, List[str]] = collections.defaultdict(list)
for osd in ceph.osds.values():
statuses[osd.status].append(str(osd.id))
table = html.HTMLTable("table-osds-state", ["Status", "Count", "ID's"])
for status, osds in sorted(statuses.items(), key=lambda x: str(x)):
table.add_row([html.ok(status.name) if status == OSDStatus.up else html.fail(status.name),
str(len(osds)),
"<br>".join(", ".join(grp) for grp in partition_by_len(osds, 120, 1))])
return table
class OSDLoadTableAgg(Table):
ids = ident("OSD id's", dont_sort=True)
node = ident()
class_and_rules = ident("Class<br>rules")
pgs = ident("PG's", dont_sort=True)
open_files = ident("open<br>files")
ip_conn = ident("ip<br>conn")
threads = ident("Thread<br>count")
rss = ident("RSS")
vmm = ident("VMM")
cpu_used = ident("CPU<br>Used, s")
data = ident("Total<br>data")
read_ops = ident("Read<br>ops<br>uptime")
read = ident("Read<br>uptime")
write_ops = ident("Write<br>ops<br>uptime")
write = ident("Write<br>uptime")
@tab("OSD process info aggregated")
def show_osd_proc_info_agg(ceph: CephInfo) -> html.HTMLTable:
records: Dict[Tuple[str, str, Tuple[str, ...]], List[CephOSD]] = collections.defaultdict(list)
for osd in ceph.osds.values():
rules = tuple(ceph.crush.rules[rule_id].name for rule_id in osd.crush_rules_weights)
records[(osd.host.name, osd.class_name if osd.class_name else '', rules)].append(osd)
table = OSDLoadTableAgg()
for (hostname, classname, rules), osds in sorted(records.items()):
row = table.next_row()
ids = [(osd_link(osd.id).link, len(str(osd.id))) for osd in sorted(osds, key=lambda x: x.id)]
row.ids = "<br>".join(", ".join(part) for part in partition_by_len(ids, 20, 1)) # type: ignore
row.node = host_link(hostname).link, hostname
row.pgs = to_html_histo([osd.pg_count for osd in osds], short=True) # type: ignore
rule_names = '<br>'.join("rule: " + rule for rule in rules)
row.class_and_rules = f"cls: {classname}<br>{rule_names}", classname
# RUN INFO - FD COUNT, TCP CONN, THREADS
if all(osd.run_info for osd in osds):
row.open_files = to_html_histo([osd.run_info.fd_count for osd in osds], short=True) # type: ignore
row.ip_conn = to_html_histo([osd.run_info.opened_socks for osd in osds], short=True) # type: ignore
row.threads = to_html_histo([osd.run_info.th_count for osd in osds], short=True) # type: ignore
row.rss = to_html_histo([osd.run_info.vm_rss for osd in osds], short=True, tostr=b2ssize) # type: ignore
row.vmm = to_html_histo([osd.run_info.vm_size for osd in osds], short=True, tostr=b2ssize) # type: ignore
row.cpu_used = to_html_histo([int(osd.run_info.cpu_usage) for osd in osds], short=True) # type: ignore
row.data = to_html_histo([osd.pg_stats.bytes for osd in osds], short=True, tostr=b2ssize) # type: ignore
row.read_ops = to_html_histo([osd.pg_stats.reads for osd in osds], short=True, tostr=b2ssize_10) # type: ignore
row.read = to_html_histo([osd.pg_stats.read_b for osd in osds], short=True, tostr=b2ssize) # type: ignore
row.write_ops = to_html_histo([osd.pg_stats.writes for osd in osds], # type: ignore
short=True, tostr=b2ssize_10) # type: ignore
row.write = to_html_histo([osd.pg_stats.write_b for osd in osds], short=True, tostr=b2ssize) # type: ignore
return table.html(id="table-osd-process-info-agg", align=html.TableAlign.left_right)
class OSDLoadTable(Table):
id = ident()
node = ident()
class_ = ident("Class")
rules = ident("rules")
pgs = exact_count("PG's")
open_files = exact_count("open<br>files")
ip_conn = ident("ip<br>conn")
threads = exact_count("thr")
rss = ident("RSS<br>GiB")
vmm = ident("VMM<br>GiB")
cpu_used = ident("CPU Used<br>per 1h uptime")
data = ident("Total<br>data, GiB")
write_ops = ident("Write<br>Mops<br>total")
write = ident("Write<br>total, TiB")
read_ops = ident("Read<br>Mops<br>total")
read = ident("Read<br>total, TiB")
@tab("OSD process info")
def show_osd_proc_info(ceph: CephInfo) -> html.HTMLTable:
table = OSDLoadTable()
for osd in ceph.sorted_osds:
row = table.next_row()
row.id = osd_link(osd.id).link, osd.id
row.node = host_link(osd.host.name).link, osd.host.name
row.pgs = osd.pg_count
row.class_ = osd.class_name
row.rules = ', '.join(ceph.crush.rules[rule_id].name for rule_id in osd.crush_rules_weights)
def tostr(v: float) -> str:
if v > 10:
return f"{int(v)}"
if v < 0.1:
return "0"
return f"{v:.1f}"
# RUN INFO - FD COUNT, TCP CONN, THREADS
if osd.run_info:
row.open_files = osd.run_info.fd_count
row.ip_conn = str(osd.run_info.opened_socks)
row.threads = osd.run_info.th_count
row.rss = tostr(osd.run_info.vm_rss / 2 ** 30)
row.vmm = tostr(osd.run_info.vm_size / 2 ** 30)
row.cpu_used = seconds_to_str_simple(osd.run_info.cpu_usage * 3600 // osd.host.uptime)
assert osd.pg_stats
row.data = tostr(osd.pg_stats.bytes / 2 ** 30)
row.read_ops = tostr(osd.pg_stats.reads / 10 ** 6)
row.read = tostr(osd.pg_stats.read_b / 2 ** 40)
row.write_ops = tostr(osd.pg_stats.writes / 10 ** 6)
row.write = tostr(osd.pg_stats.write_b / 2 ** 40)
return table.html(id="table-osd-process-info", align=html.TableAlign.left_right)
@tab("OSD info")
def show_osd_info(ceph: CephInfo) -> html.HTMLTable:
class OSDInfoTable(Table):
# id = html.ident()
count = exact_count()
ids = idents_list(chars_per_line=50)
node = ident()
status = ok_or_fail()
version = ident("version [hash]")
daemon_runs = yes_or_no("daemon<br>run")
dev_class = ident("Storage<br>class")
weights = extra_columns(ident(), **{f"w_{rule.name}": f"Weight for<br>{rule.name}"
for rule in ceph.crush.rules.values()})
reweight = ident()
pg = ident("PG")
scrub_err = exact_count("Scrub<br>ERR")
storage_type = ident("Type")
storage_dev = ident("Storage<br>dev type")
storage_total = bytes_sz("Storage<br>total")
journal_or_wal_collocated = ident("Journal<br>or wal<br>colocated")
journal_or_wal_type = ident("Journal<br>or wal<br>dev type")
journal_or_wal_size = ident("Journal<br>or wal<br>size")
journal_on_file = yes_or_no("Journal<br>on file", true_fine=False)
db_collocated = ident("DB<br>colocated")
db_type = ident("DB<br>dev type")
db_size = ident("DB<br>size")
all_versions = [osd.version for osd in ceph.osds.values()]
all_versions += [mon.version for mon in ceph.mons.values()]
all_versions_set = set(all_versions)
largest_ver = max(ver for ver in all_versions_set if ver is not None)
fast_drives = {DiskType.nvme, DiskType.sata_ssd, DiskType.sas_ssd}
table = OSDInfoTable()
for osd_infos in sorted(group_osds(ceph), key=lambda x: x[0].dev_class):
osds = [ceph.osds[osd_info.id] for osd_info in osd_infos]
osd = osds[0]
osd_info = osd_infos[0]
row = table.next_row()
row.count = len(osd_infos)
row.ids = [(host_link(str(osd_info.id)).link, str(osd_info.id)) for osd_info in osd_infos]
row.status = osd_info.status
pgs = [ceph.osds[osd_info.id].pg_count for osd_info in osd_infos]
assert None not in pgs
row.pg = to_html_histo(pgs) # type: ignore
for rule in ceph.crush.rules.values():
weights = [tosd.crush_rules_weights[rule.id] for tosd in osds if rule.id in tosd.crush_rules_weights]
if weights:
row.weights[f"w_{rule.name}"] = to_html_histo(weights, show_int=False)
if osd.version is None:
row.version = html.fail("Unknown"), ""
else:
if len(all_versions_set) != 1:
color: Callable[[str], Union[str, html.TagProxy]] = html.ok if osd.version == largest_ver else html.fail
else:
color = lambda x: x
row.version = color(str(osd.version)), osd.version
row.daemon_runs = osd.daemon_runs
row.dev_class = osd.class_name
rew = f"{osd.reweight:.2f}"
row.reweight = html.fail(rew) if abs(osd.reweight - 1.0) > .01 else rew
row.storage_type = osd_info.storage_type
assert osd.storage_info
data_drive_color_fn = (html.ok if osd.storage_info.data.dev_info.tp in fast_drives # type: ignore
else lambda x: x) # type: ignore
row.storage_dev = data_drive_color_fn(osd.storage_info.data.dev_info.tp.name), \
osd.storage_info.data.dev_info.tp.name
# color = "red" if osd.free_perc < 20 else ( "yellow" if osd.free_perc < 40 else "green")
# avail_perc_str = H.font(osd.free_perc, color=color)
row.storage_total = osd.total_space
data_dev_path = osd.storage_info.data.path
# JOURNAL/WAL/DB info
if isinstance(osd.storage_info, FileStoreInfo):
jinfo = osd.storage_info.journal
if osd.run_info:
osd_sync = float(osd.config['filestore_max_sync_interval'])
min_size = osd_sync * expected_wr_speed[osd.storage_info.data.dev_info.tp] * (1024 ** 2)
else:
min_size = 0
else:
jinfo = osd.storage_info.wal
min_size = 512 * 1024 * 1024
if jinfo.dev_info.dev_path == data_dev_path:
if jinfo.dev_info.tp in (DiskType.nvme, DiskType.sata_ssd, DiskType.sas_ssd):
vl = "yes"
else:
vl = html.fail("yes"), "yes"
else:
vl = "no"
row.journal_or_wal_collocated = vl
color = html.ok if jinfo.dev_info.tp in fast_drives else html.fail
row.journal_or_wal_type = color(jinfo.dev_info.tp.name), jinfo.dev_info.tp.name
size_s = b2ssize(osd_info.journal_or_wal_size)
row.journal_or_wal_size = (size_s if osd_info.journal_or_wal_size >= min_size else html.fail(size_s)), \
osd_info.journal_or_wal_size
if isinstance(osd.storage_info, FileStoreInfo):
jonfile = osd.storage_info.journal.partition_name == osd.storage_info.data.partition_name
row.journal_on_file = (html.fail("yes"), "yes") if jonfile else "no"
if isinstance(osd.storage_info, BlueStoreInfo):
if osd.storage_info.db.dev_info.size is not None:
min_db_size = osd.storage_info.data.dev_info.size * 0.05
else:
min_db_size = 0
info = osd.storage_info.db.dev_info
if info.dev_path == data_dev_path:
if info.tp in (DiskType.nvme, DiskType.sata_ssd, DiskType.sas_ssd):
vl = "yes"
else:
vl = html.fail("yes"), "yes"
else:
vl = "no"
row.db_collocated = vl
color = html.ok if info.tp in fast_drives else html.fail
row.db_type = color(info.tp.name), info.tp.name
size_s = b2ssize(osd_info.db_size)
assert osd_info.db_size is not None
row.db_size = size_s if osd_info.db_size >= min_db_size else html.fail(size_s), osd_info.db_size
return table.html(id="table-osd-info", align=html.TableAlign.left_right)
@tab("OSD's dev uptime average load")
def show_osd_perf_info(ceph: CephInfo) -> html.HTMLTable:
class Tbl(Table):
osd = ident()
cls = ident("Class")
node = ident()
apply_lat = ident("apply<br>lat")
commit_lat = ident("commit<br>lat")
journal_lat = ident("journal<br>lat")
xx = ident("ms<br>avg/osd perf")
data_dev = ident("Data<br>dev")
data_read = ident("Data read<br>MiBps/iops")
data_write = ident("Data write<br>MiBps/iops")
data_lat = ident("Data lat<br>ms")
data_io_time = to_str("Data<br>IO time<br>ms per s")
j_dev = ident("J dev")
j_write = ident("J write<br>MiBps/iops")
j_lat = ident("J lat<br>ms")
j_io_time = to_str("J<br>IO time<br>ms per s")
wal_dev = ident("WAL dev")
wal_write = ident("WAL write<br>MiBps/iops")
wal_lat = ident("WAL lat<br>ms")
wal_io_time = to_str("WAL<br>IO time<br>ms per s")
db_dev = ident("DB dev")
db_read = ident("DB read<br>MiBps/iops")
db_write = ident("DB write<br>MiBps/iops")
db_lat = ident("DB lat<br>ms")
db_io_time = to_str("DB IO<br>time<br>ms per s")
def add_dev_info(row: Any, dev: Union[Disk, LogicBlockDev], attr: str, uptime: float, with_read: bool = True):
setattr(row, attr + '_dev', dev.name if dev.tp == DiskType.nvme else f"{dev.name} ({dev.tp.short_name})")
MBupt = 2 ** 20 * int(uptime)
if with_read:
setattr(row, attr + '_read', (f"{dev.usage.read_bytes // MBupt} / {int(dev.usage.read_iops / uptime)}",
dev.usage.read_iops / uptime))
setattr(row, attr + '_write', (f"{dev.usage.write_bytes // MBupt} / {int(dev.usage.write_iops / uptime)}",
dev.usage.write_iops / uptime))
if dev.usage.lat is None:
setattr(row, attr + '_lat', ('-', 0))
else:
setattr(row, attr + '_lat',
((f"{dev.usage.lat / 1000:.1f}s"
if dev.usage.lat > 1000
else f"{int(dev.usage.lat)}ms"), dev.usage.lat))
setattr(row, attr + '_io_time', int(dev.usage.io_time // uptime))
def get_lat_val(osd: CephOSD, lat_name: str) -> Optional[str]:
if lat_name in osd.osd_perf_dump:
ms = osd.osd_perf_dump[lat_name] * 1000
elif lat_name + '_s' in osd.osd_perf_dump:
ms = osd.osd_perf_dump[lat_name + "_s"] * 1000
elif lat_name + '_ms' in osd.osd_perf_dump:
ms = osd.osd_perf_dump[lat_name + "_ms"]
else:
return None
return f"{ms / 1000:.1f}s" if ms > 1000 else f"{int(ms)}ms"
table = Tbl()
for osd in ceph.sorted_osds:
assert osd.storage_info is not None
row = table.next_row()
row.osd = osd_link(osd.id).link, osd.id
row.cls = osd.class_name
row.node = host_link(osd.host.name).link, osd.host.name
row.apply_lat = get_lat_val(osd, "apply_latency")
clat = get_lat_val(osd, "commitcycle_latency")
if clat is None:
clat = get_lat_val(osd, "commit_latency")
row.commit_lat = clat
jlat = get_lat_val(osd, "journal_latency")
if jlat is not None:
row.journal_lat = jlat
add_dev_info(row, osd.storage_info.data.dev_info, "data", osd.host.uptime)
if isinstance(osd.storage_info, FileStoreInfo):
add_dev_info(row, osd.storage_info.journal.dev_info, "j", osd.host.uptime, with_read=False)
else:
add_dev_info(row, osd.storage_info.wal.dev_info, "wal", osd.host.uptime, with_read=False)
add_dev_info(row, osd.storage_info.db.dev_info, "db", osd.host.uptime)
return table.html("table-osd-dev-uptime-load")
@tab("PG copy per OSD")
def show_osd_pool_pg_distribution(ceph: CephInfo) -> Optional[html.HTMLTable]:
if ceph.sum_per_osd is None:
logger.warning("PG copy per OSD: No pg dump data. Probably too many PG")
return None
pools_order_t = sorted((-count, name) for name, count in ceph.sum_per_pool.items())
# pools = sorted(cluster.sum_per_pool)
pools = [name for _, name in pools_order_t]
name_headers = []
for name in pools:
if len(name) > 10:
parts = name.split(".")
best_idx = 0
best_delta = len(name) + 1
for idx in range(len(parts)):
sz1 = len(".".join(parts[:idx]))
sz2 = len(".".join(parts[idx:]))
if abs(sz2 - sz1) < best_delta:
best_idx = idx
best_delta = abs(sz2 - sz1)
if best_idx != 0 and best_idx != len(parts):
name = ".".join(parts[:best_idx]) + '.' + '<br>' + ".".join(parts[best_idx:])
name_headers.append(name)
table = html.HTMLTable("table-pg-per-osd", ["OSD/pool"] + name_headers + ['sum'])
for osd_id, row in sorted(ceph.osd_pool_pg_2d.items()):
data = [osd_link(osd_id).link] + [row.get(pool_name, 0) for pool_name in pools] # type: ignore
data.append(ceph.sum_per_osd[osd_id]) # type: ignore
table.add_row(map(str, data))
table.add_cell("Total cluster PG", sorttable_customkey=str(max(ceph.osds) + 1))
list(map(table.add_cell, (ceph.sum_per_pool[pool_name] for pool_name in pools))) # type: ignore
table.add_cell(str(sum(ceph.sum_per_pool.values())))
return table
@tab("PG copy per OSD")
def show_osd_pool_agg_pg_distribution(ceph: CephInfo) -> Optional[html.HTMLTable]:
if ceph.sum_per_osd is None:
logger.warning("PG copy per OSD: No pg dump data. Probably too many PG")
return None
pool2osd_count: Dict[str, List[Tuple[int, int]]] = {}
for osd_id, pool_row in ceph.osd_pool_pg_2d.items():
for pool_name, count in pool_row.items():
pool2osd_count.setdefault(pool_name, []).append((count, osd_id))
class PGAggTable(Table):
name = ident()
pg = exact_count("PG copies")
osds = exact_count("OSDS")
min = ident(dont_sort=True)
p10 = exact_count("10%")
p30 = exact_count("30%")
p50 = exact_count("50%")
p70 = exact_count("70%")
p90 = exact_count("90%")
max = ident(dont_sort=True)
table = PGAggTable()
for pool_name, counts in pool2osd_count.items():
pool = ceph.pools[pool_name]
counts.sort()
row = table.next_row()
row.name = pool_name
row.pg = pool.pg * pool.size
row.osds = len(ceph.osds4rule[pool.crush_rule])
row.min = "<br>".join(osd_link(osd_id).link + f": {cnt}" for cnt, osd_id in counts[:5])
row.max = "<br>".join(osd_link(osd_id).link + f": {cnt}" for cnt, osd_id in counts[-5:])
row.p10 = counts[int(len(counts) * 0.1)]
row.p30 = counts[int(len(counts) * 0.3)]
row.p50 = counts[int(len(counts) * 0.5)]
row.p70 = counts[int(len(counts) * 0.7)]
row.p90 = counts[int(len(counts) * 0.9)]
return table.html(id="table-pg-per-osd-agg")
@plot
@tab("PG per OSD")
def show_osd_pg_histo(ceph: CephInfo) -> Optional[str]:
vals = [osd.pg_count for osd in ceph.osds.values() if osd.pg_count is not None]
return get_histo_img(numpy.array(vals)) if vals else None
|
{"hexsha": "332b3c845805db3807f80017f3763562b5308c4b", "size": 22386, "ext": "py", "lang": "Python", "max_stars_repo_path": "ceph_monitoring/visualize_osds.py", "max_stars_repo_name": "izadorozhna/ceph-monitoring", "max_stars_repo_head_hexsha": "71c8c75987ed175b335074db2741d008796078c8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ceph_monitoring/visualize_osds.py", "max_issues_repo_name": "izadorozhna/ceph-monitoring", "max_issues_repo_head_hexsha": "71c8c75987ed175b335074db2741d008796078c8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ceph_monitoring/visualize_osds.py", "max_forks_repo_name": "izadorozhna/ceph-monitoring", "max_forks_repo_head_hexsha": "71c8c75987ed175b335074db2741d008796078c8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.5324675325, "max_line_length": 120, "alphanum_fraction": 0.6189582775, "include": true, "reason": "import numpy", "num_tokens": 6087}
|
import binascii
import numpy as np
from PIL import Image
from encryption import AESCipher
import sys
class ImageSteganography:
def image_to_matrix(self, path):
image = Image.open(path)
matrix = np.array(image)
return matrix
def matrix_to_image(self, matrix, output_image):
image = Image.fromarray(matrix)
image.save(output_image)
return image
def text_to_bits(self, message):
result = []
for c in message:
bits = bin(ord(c))[2:]
bits = '00000000'[len(bits):] + bits
result.extend([int(b) for b in bits])
return result
def text_from_bits(self, bits):
# matrix with 8 elements per row (1 byte)
bits = np.reshape(bits, (-1, 8))
bitvalues = [128, 64, 32, 16, 8, 4, 2, 1]
bytes = np.sum(bits * bitvalues, axis=1) # rows to bytes
# convert each byte to a character and put into a list
chars = [chr(b) for b in bytes]
return ''.join(chars)
def set_bit(self, number, index, flag): # index starts from 0 where 0 is LSB
# Compute mask, an integer with just bit 'index' set.
mask = 1 << index
# Clear the bit indicated by the mask (if flag is False)
number &= ~mask
if flag:
# If flag was True, set the bit indicated by the mask.
number |= mask
return number
def hide_in_pixel(self, path, message, index, plane, output_image):
matrix = self.image_to_matrix(path)
bit_message = self.text_to_bits(message)
flatten_matrix = matrix.reshape(-1, 3)
inverted_plane = []
operational_list = flatten_matrix[:, plane].flatten()
result_matrix = np.ones((flatten_matrix.shape[0], 1))
operational_list_index = (operational_list & (
1 << index)) >> index # Get index bit of a number
bit_message = np.append(
bit_message, operational_list_index[len(bit_message):])
# number ^= (-x ^ number) & (1 << n)
operational_list_temp = (-bit_message ^
operational_list) & (1 << index)
# Set n bit of a number to x
operational_list = operational_list ^ operational_list_temp
operational_matrix = operational_list.reshape(-1, len(plane))
temp_index = 0
for plane_index in range(3):
if plane_index in plane:
result_matrix = np.column_stack(
(result_matrix, np.reshape(operational_matrix[:, temp_index], (-1, 1))))
temp_index += 1
else:
result_matrix = np.column_stack(
(result_matrix, np.reshape(flatten_matrix[:, plane_index], (-1, 1))))
result_matrix = np.reshape(
result_matrix[:, 1:].flatten(), matrix.shape)
result_matrix = result_matrix.astype('uint8')
stegano_image = self.matrix_to_image(result_matrix, output_image)
return stegano_image
def extract_info_from_lsb(self, path, plane, index):
lsb_message_result = []
matrix = self.image_to_matrix(path)
flatten_matrix = matrix.reshape(-1, 3)[:, plane].flatten()
lsb_message_result = (flatten_matrix & (1 << index)) >> index
lsb_message_result = lsb_message_result.tolist()
end_index = (len(lsb_message_result) / 8) * 8
lsb_message_result = self.text_from_bits(
lsb_message_result[:end_index])
return lsb_message_result
def embed(self, cover_file, secret_file, color_plane, pixel_bit):
cover_array = self.image_to_matrix(cover_file)
secret_array = self.image_to_matrix(secret_file)
# every bit except the one at `pixel_bit` position is 1
mask = 0xff ^ (1 << pixel_bit)
# shift the MSB of the secret to the `pixel_bit` position
secret_bits = ((secret_array[..., color_plane] >> 7) << pixel_bit)
height, width, _ = secret_array.shape
cover_plane = (cover_array[:height, :width,
color_plane] & mask) + secret_bits
cover_array[:height, :width, color_plane] = cover_plane
stego_image = self.matrix_to_image(cover_array, cover_file)
return stego_image
def extract_embedded_image(self, stego_file, color_plane, pixel_bit, output_image):
stego_array = self.image_to_matrix(stego_file)
change_index = [0, 1, 2]
change_index.remove(color_plane)
stego_array[..., change_index] = 0
stego_array = ((stego_array >> pixel_bit) & 0x01) << 7
exposed_secret = self.matrix_to_image(stego_array, output_image)
return exposed_secret
|
{"hexsha": "966e4be1ddb24d126e4b1311c420b28cbe9678ed", "size": 4701, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/Steganography.py", "max_stars_repo_name": "MacBox7/Image-Steganography", "max_stars_repo_head_hexsha": "e277afc415960638ce4ecfb25caf0c7c88f0659b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/Steganography.py", "max_issues_repo_name": "MacBox7/Image-Steganography", "max_issues_repo_head_hexsha": "e277afc415960638ce4ecfb25caf0c7c88f0659b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2017-03-13T18:34:50.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-13T18:34:50.000Z", "max_forks_repo_path": "scripts/Steganography.py", "max_forks_repo_name": "MacBox7/Image-Steganography", "max_forks_repo_head_hexsha": "e277afc415960638ce4ecfb25caf0c7c88f0659b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.9732142857, "max_line_length": 92, "alphanum_fraction": 0.6166773027, "include": true, "reason": "import numpy", "num_tokens": 1109}
|
using ArgParse
include("functions_closest.jl")
function parse_commandline()
s = ArgParseSettings()
@add_arg_table! s begin
"--target"
help = "The alignment to search for closest matches, in fasta format"
required = true
"--query"
help = "The sequence(s) to find matches for, in fasta format"
required = true
"--reference", "-r"
help = "The reference sequence, in fasta format"
required = false
"--csv-out", "-o"
help = "The name of csv file containing closest matches to write"
default = "out.csv"
"--fast"
help = "Restrict comparison to sites that are different from the reference.
Requires --reference to be provided; doesn't calculate a per-site distance"
action = :store_true
end
return parse_args(s)
end
function main()
parsed_args = parse_commandline()
if parsed_args["fast"]
if parsed_args["reference"] == nothing
e = error("need to provide --reference if you use --fast")
throw(e)
end
fast_closest(parsed_args["target"], parsed_args["query"], parsed_args["reference"], parsed_args["csv-out"])
else
closest(parsed_args["target"], parsed_args["query"], parsed_args["csv-out"])
end
end
main()
|
{"hexsha": "e3e04efbe33f72dd99e30b6a41e014411b23b1c2", "size": 1375, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/closest.jl", "max_stars_repo_name": "cov-ert/julialign", "max_stars_repo_head_hexsha": "6bddd1825b6c5d0205876252fffe252411dadc5b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/closest.jl", "max_issues_repo_name": "cov-ert/julialign", "max_issues_repo_head_hexsha": "6bddd1825b6c5d0205876252fffe252411dadc5b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/closest.jl", "max_forks_repo_name": "cov-ert/julialign", "max_forks_repo_head_hexsha": "6bddd1825b6c5d0205876252fffe252411dadc5b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2553191489, "max_line_length": 115, "alphanum_fraction": 0.5941818182, "num_tokens": 296}
|
\chapter{General Markov Chains}
In this chapter we will study Markov chains on a general state space, i.e., state spaces that are not necessarily finite or countable.
\section{Markov Chain Monte Carlo}
This Section is under \work.
The methods described so far are generally unsuitable for complex multivariate or multi-dimensional distributions. One way to generate from such distributions is based on the simple idea that if a Markov chain is designed with a desired distribution as its stationary distribution, the states of the stationary chain will provide the required sample values. This approach is known as {\it Markov Chain Monte Carlo (MCMC)}.
A distribution with density/mass function $f$ is a {\it stationary distribution} of a Markov chain if $X^{(t)}\sim f$ implies $X^{(t+1)}\sim f$.
To generate from a desired distribution using MCMC, the Markov chain must have the following properties:
\begin{asparaenum}[(a)]
\item The state space of the Markov chain must coincide with the support of the desired distribution.
\item {\it Irreducible}: The Markov chain must be free to move over the entire state space.
\item {\it Harris recurrent}: The Markov chain must not get stuck in any subset of the state space.
\item {\it Positive}: The Markov chain must converge to a unique stationary distribution regardless of the starting state.
\item {\it Aperiodic}: The Markov chain must not exhibit any deterministic pattern of movement.
\end{asparaenum}
\begin{definition}[Ergodic Markov Chain]
An {\it ergodic} Markov chain is one that is irreducible, positive, Harris recurrent and {\it aperiodic}.
\end{definition}
%\begin{flushright} $\boxbox$ \end{flushright}
MCMC is based on the observation that the state of an {\it ergodic} Markov chain will eventually converge to a stationary distribution, no matter which state the chain starts in. Thus, to obtain a sample from a desired distribution $f$, an ergodic Markov chain with $f$ as its stationary distribution can be constructed and then run till it is stationary. The required sample values are given by the states of the stationary chain. Let $X^{(0)},X^{(1)},\ldots$ represent a sequence of states for an ergodic Markov chain and suppose that it reaches its stationary distribution $f$ after transition $T$. Then a sample with distribution $f$ is given by $\{X^{(t)}:t>T\}$. Note that unlike sample points given by the previous methods, which are independent, the sample points given by MCMC are {\it dependent} because they are states from a Markov chain.
Generic algorithms that allow an ergodic Markov chain with a specified stationary distribution to be constructed easily are available. Many of these algorithms can be regarded as variants of the {\it Metropolis-Hastings algorithm}.
\begin{algorithm}%WORK rewrite
\caption{Metropolis-Hastings Sampler}
\label{A:MHSampler}
\begin{algorithmic}[1]
\STATE {
{\it input:}
\begin{itemize}
\item[(1)] shape of a target density $\tilde{f}(x) = \left({\int \tilde{f}(x)dx}\right) f(x)$,
\item[(2)] a {\it transition kernel}, $q(y|x)$.
\end{itemize}
}
\STATE {\it output:} a sequence of samples $x_0,\ldots$ from the Markov chain $\{X\}_{i \in \Zz_+}$ with stationary distribution $f$
\STATE Choose initial state $X^{(0)}$ and {\it proposal distribution} $g$.
\REPEAT
\STATE At iteration $t$,
\STATE Generate $X\sim g(x|X^{(t-1)})$ and $U\sim U(0,1)$,
\STATE Compute {\it acceptance probability}
\begin{equation}
\alpha=\min\left\{1,\frac{f(\tilde{X})g(X^{(t-1)}|\tilde{X})}{f(X^{(t-1)})g(\tilde{X})|X^{(t-1)}} \right\},
\end{equation}
\STATE
{\bf If} $U \leq \alpha$
{\bf then} $X^{(t)} \gets \tilde{X}$, %(accept $\tilde{X}$ with probability $\alpha$)
{\bf else} $X^{(t)} \gets X^{(t-1)}$
\UNTIL desired number of samples are obtained from $\{X\}_{i \in \Zz_+}$
\end{algorithmic}
\end{algorithm}
\begin{definition}[Transition Kernel]
The transitions of a Markov chain are governed by a conditional density/mass function known as the {\it transition kernel}, $q(y|x)$. For a discrete state space:
\begin{equation}
q(y|x)=P(X^{(t+1)}=y|X^{(t)}=x),
\end{equation}
while for a continuous state space:
\begin{equation}
\int_Aq(y|x)dy=P(X^{(t+1)}\in A|X^{(t)}=x),
\end{equation}
for any subset $A$ of the state space.
\end{definition}
%\begin{flushright} $\boxbox$ \end{flushright}
\begin{prop}If a Markov chain with transition kernel $q(y|x)$ satisfies the {\it detailed balance condition}:
\begin{equation}
q(x|y)f(y)=q(y|x)f(x),
\end{equation}
where $f$ is a density/mass function, then $f$ is a stationary distribution of the chain.
\begin{proof}
Let $S$ be the state space of the Markov chain and let $A\subset S$. Suppose that $X^{(t)}\sim f$. Then:
\begin{displaymath}
\begin{split}
P(X^{(t+1)}\in A)&=\int_S P(X^{(t+1)\in A, X^{(t)}=y}dy)\\
&=\int_S P(X^{(t+1)\in A| X^{(t)}=y}f(y)dy)\\
&=\int_S\int_A q(x|y)f(y)dxdy\\
&=\int_S\int_A q(y|x)f(x)dxdy\\
&=\int_Af(x)dx.
\end{split}
\end{displaymath}
Therefore, $X^{(t+1)}\sim f$ and so $f$ is a stationary distribution.
\end{proof}
\end{prop}
\begin{prop}
In the Metropolis-Hastings algorithm, if the support of the proposal distribution is at least as large as the support of $f$, then the algorithm produces a Markov chain that has a stationary distribution $f$.
\begin{proof}
Let
$$\alpha(x,y)=\min\left\{ 1,\frac{f(y)g(x|y)}{f(x)g(y|x)}\right\}.$$
The transition kernel of the Metropolis-Hastings chain is:
\begin{equation}
q(y|x)=\alpha(x,y)g(y|x)+[1-\beta(x)]\delta_x(y),
\end{equation}
where:
\begin{equation}
\beta(x)=\int_S\alpha(x,y)g(y|x)dy,
\end{equation}
and $\delta_x(\cdotp)$ is the Dirac delta function at $x$.
It is enough to show that the Metropolis-Hastings chain satisfies the detailed balance condition with $f$, i.e. that $q(y|x)f(x)=q(x|y)f(y)$. This follows from:
$$\alpha(x,y)g(y|x)f(x)=\min\{f(x)g(u|x),f(y)g(x|y)\}=\alpha(y,x)g(x|y)f(y),$$
and:
$$[1-\beta(x)]\delta_x(y)f(x)=[1-\beta(y)]\delta_y(x)f(y).$$
\end{proof}
\end{prop}
Since $f$ is used only to compute the acceptance probability, and appears both in the numerator and denominator, the algorithm is applicable even if $f$ is not known completely but only up to a multiplicative constant. This is frequently the case in practice, where $f$ is available as an un-normalised distribution. Some common variants of the Metropolis-Hastings algorithm include the {\it Metropolis sampler, independent Metropolis-Hastings sampler, single-component Metropolis-Hastings sampler} and {\it Gibbs sampler}.
The {\it Metropolis sampler} is obtained when a symmetric proposal distribution is used, i.e. $g(\tilde{X}|X^{(t-1)})=g(X^{(t-1)}|\tilde{x})$. In this case, the acceptance probability simplifies to:
\begin{equation}
\alpha=\min\left\{1,\frac{f(\tilde{X})}{f(X^{(t-1)})}\right\}
\end{equation}
A special case where $g(\tilde{X}|X^{(t-1)})=g(|\tilde{X}-X^{(t-1)}|)$ is known as the {\it random walk Metropolis-Hastings (RWMH) sampler}.
\begin{example}[{\tt rwmh\_vonMises\_uniform}]
The von Mises density with the location parameter $a\in[-\pi,\pi]$ and a scale parameter $b > 0$ is given by:
\begin{equation}
f(x)=\frac{e_{b\cos(x-a)}}{2\pi I_0(b)},
\end{equation}
for $x\in[-\pi ,\pi]$, and where $I_0$ is the modified Bessel function of the first kind and order zero. Implement the RWMH sampler for generating from the von Mises density with $a = 0$ and $b = 3$ by using the $U(-1, 1)$ density to generate steps in the random walk, i.e. $g(\cdot|x)=U(x-1,x+1)$.
Matlab code: For m = 1000 iterations of the IMH sampler,
\begin{VrbM}
b = 3;
m = 1000;
x = ones(1,m); % allocate storage and initialise to 1
for k = 2:m
y = x(k-1) + unifrnd(-1,1); % unifrnd(a,b) is the Matlab function for generating
% U(a,b) random variables
alpha = min(1,exp(b * (cos(y) - cos(x(k-1)))));
if rand < alpha
x(k) = y;
else
x(k) = x(k-1);
end % if
end % for
\end{VrbM}
\end{example}
When the proposal distribution is independent of $X^{(t-1)}$, the {\it independent Metropolis-Hastings (IMH) sampler} is obtained, with the acceptance probability given by:
\begin{equation}
\alpha=\min\left\{ 1,\frac{f(\tilde{X}g(X^{(t-1)}))}{f(X^{(t-1)})g(\tilde{X})}\right|\}
\end{equation}
This algorithm usually works well if $g$ is close to $f$ and has heavier tails than $f$.
\begin{example}
Consider the log-normal distribution whose density is:
\begin{equation}
f(x)=\frac{1}{x\sqrt{2\pi}}\exp\left\{-\frac{(\log x)^2}{2} \right\},
\end{equation}
for $x\geq 0$. Use the IMH sampler with a gamma distribution proposal to generate from the log-normal distribution.
The gamma density with shape parameter $a > 0$ and scale parameter $b > 0$ is given by:
\begin{equation}
g(x)=\frac{1}{b^a\Gamma(a)}x^{a-1}e^{-x/b},
\end{equation}
for $x \geq 0$. Note that the IMH acceptance probability can be written as:
$$\alpha=\min\left\{1, \frac{f(\tilde{X})/g(\tilde{X})}{f(X^{(t-1)})/g(X^{(t-1)})}\right\},$$
which involves the ratio $f /g$ in both the numerator and denominator, thus making multiplicative constants in $f$ and $g$ irrelevant and able to be discarded. In other words, is is enough to use:
$$\frac{\tilde{f}(x)}{\tilde{g}(x)}=\frac{\exp[-(\log x)^2/2]}{x^ae^{-x/b}}=\frac{1}{x^a}\exp[\frac{x}{b}-\frac{(\log x)^2}{2}]
$$
to compute $\alpha$.
Matlab code: For $m = 1000$ iterations of the IMH sampler using $gamma(1.5, 2.5)$ as proposal distribution:
\begin{VrbM}
m = 1000;
x = 0.5 * ones(1,m); % allocate storage and initialise to 0.5
for k = 2:m
y = gamrnd(1.5,2.5); % gamrnd is the Matlab function for generating gamma
% random variables
alpha = (x(k-1) / y)^1.5 * \exp((y - x(k-1)) /2.5 + (log(x(k-1))^2 - log(y)^2) /2) ;
alpha = min(1,alpha);
if rand < alpha
x(k) = y;
else
x(k) = x(k-1);
end % if
end % for
\end{VrbM}
\end{example}
In general, $X$ may be multivariate. The idea behind the {\it single-component Metropolis-Hastings sampler} is to update $X$ using a series of steps at each iteration, rather than a single step. To do this, $X$ is partitioned into $d$ parts: $X=(X_[1],X_[2],\ldots,X_[d])$. Let $X_[-j]$ denote $X$ with the $j^{\textrm{th}}$ part omitted, and suppose that the conditional distributions, $f(x_{[j]}|x_{[-j]})$, are known.
% WORK make into algorithm
\subsection{\alg -- {\it Single-component Metropolis-Hastings sampler.}}
Choose initial state $X^{(0)}$ and proposal distribution $g$.\\
\begin{tabbing}
\=At ite\=ration $t$,\\
\>For j = $1, 2,\ldots, d,$\\
\> \>Generate $\tilde{ X}_{[j]}\sim g(x_{[j]}|X^{(t)}_{[1]},\ldots,X^{(t)}_{[j-1]},X^{(t-1)}_{[j]},\ldots,X^{(t-1)}_{[d]},)$ and $U_j\sim U(0,1)$ ,\\
\>\>Compute\\
\end{tabbing}
\begin{equation}
\begin{split}
\alpha_j=\min\{1,&\frac{f(\tilde{X}_{[j]}|X^{(t)}_{[1]},\ldots,X^{(t)}_{[j-1]},X^{(t-1)}_{[j+1]},\ldots,X^{(t-1)}_{[d]},)}{f(X^{(t-1)}_{[j]}|X^{(t)}_{[1]},\ldots,X^{(t)}_{[j-1]},X^{(t-1)}_{[j+1]},\ldots,X^{(t-1)}_{[d]},)}\cdot\\
&\frac{g(X^{(t-1)}_{[j]}|X^{(t)}_{[1]},\ldots,X^{(t)}_{[j-1]},\tilde{X}_{[j]},X^{(t-1)}_{[j+1]},\ldots,X^{(t-1)}_{[d]},)}{g(\tilde{X}_{[j]}|X^{(t)}_{[1]},\ldots,X^{(t)}_{[j-1]},X^{(t-1)}_{[j]},X^{(t-1)}_{[j+1]},\ldots,X^{(t-1)}_{[d]},)}\}
\end{split}
\end{equation}
\begin{tabbing}
\=If $U_j$\=$\leq\alpha_j$\\
\>\>Set $X^{(t)}_{[j]}=\tilde{X}_{[j]}.$\\(accept $\tilde{X}_[j]$ with probability $\alpha_j$)\\
\>Else\>\\
\>\>Set $X^{(t)}_{[j]}=X^{(t-1)}_{[j]}.$\\
\end{tabbing}
%\begin{flushright} $\boxbox$ \end{flushright}
If it is possible to generate from the conditional distributions, $f(x_{[j]}|x-{[-j]})$, then by choosing them as the proposal distribution in the single-component Metropolis-Hastings sampler, the acceptance probabilities will always be one and the proposals will always be accepted. The resulting algorithm is known as the {\it Gibbs sampler}, which effectively generates from the conditional distributions.
\subsection{\alg --{\it Gibbs sampler.}}
Choose initial state $X^{(0)}$.
\begin{tabbing}
\=At it\=eration \=$t$,\\
\>\>For $j$\>$ = 1, 2, ¡, d,$\\
\>\>\>Generate $X^{(t)}_{[j]}\sim f(x_{[j]}|X^{(t)}_{[1]},\ldots,X^{(t)}_{[j-1]},X^{(t-1)}_{[j+1]},\ldots,X^{(t-1)}_{[d]})$.\\
\end{tabbing}
\begin{example}[{\tt gibbs\_example.m.}]
Consider the joint density:
$$f(x,y,z)\propto x^4y^3z^2(1-x-y-z)$$
where $x, y, z > 0$ and $x + y + z < 1$. Let $B(a, b)$ represent a beta distribution with parameters $a$ and $b$. The conditional distributions for $x$, $y$ and $z$ are given by:
\begin{displaymath}
\begin{array}{l}
x|y,z\sim (1-y-z)q, \space q\sim B(5,2),\\
y|x,z\sim (1-x-z)r, \space r\sim B(4,2),\\
z|x,y\sim (1-x-y)s, \space s\sim B(3,2).\\
\end{array}
\end{displaymath}
In other words, the conditional distribution of $x$, given $y$ and $z$, is the same as the distribution of $(1-y-z)q$ where $q$ has a $B(5, 2)$ distribution, and so on. Implement a Gibbs sampler to generate samples from the joint density.
\Matlab code: For $m = 1000$ iterations of the Gibbs sampler:
\begin{VrbM}
m = 1000;
x = 0.3 * ones(1,m); % allocate storage and initialise to 0.3
y = x;
z = y;
for k = 2:m
x(k) = (1 - y(k-1) - z(k-1)) * betarnd(5,2); % betarnd is the Matlab function for
% generating beta random variables
y(k) = (1 - x(k) - z(k-1)) * betarnd(4,2);
z(k) = (1 - x(k) - y(k)) * betarnd(3,2);
end
\end{VrbM}
\end{example}
Hybrid combinations of single-component Metropolis-Hastings and Gibbs sampling are possible, with some parts of $X$ updated using Gibbs updates, and others (which cannot be generated from their conditional distributions) using Metropolis-Hastings updates.
In practice, a MCMC sampler is used to generate a long sequence of Markov chain states. After an initial {\it burn-in period}, the states are assumed to have the required stationary distribution, at least approximately. The difficulty in using MCMC is deciding how long the burn-in period should be.
\section{Exercises}
\begin{exercise}
Implement the IMH sampler in Example 2.4.9. Perform 10,000 iterations and plot the outputs sequentially. Comment on the appearance of the plot with regard to convergence to the target density. Plot the density histogram for the last 5000 iterations, and superimpose the target and proposal densities onto it.
\end{exercise}
\begin{exercise}
Implement the Gibbs sampler in Example 2.4.12. Perform 10,000 Gibbs iterations, and plot the sequential outputs for $x$, $y$ and $z$. Comment on the appearance of the plots with regard to convergence to the target density. Obtain a three-dimensional scatter plot of the last 5000 sample points (use the {\tt plot3} function).
\end{exercise}
\begin{exercise}
\begin{asparaenum}[(a)]
\item Generate a sample of size 20 from the $N(0.06, 1)$ distribution.
\item Treat the sample from Part (a) as observations from an $N(\theta, 1)$ distribution. Pretend that you do not know $\theta$ and wish to infer its value using the Bayesian approach. Denoting the sample by $z={z_1,\ldots,z_{20}}$, the posterior density of $\theta$, given $z$, is given by Bayes' theorem as:
$$f(\theta|z)\propto f(z|\theta)f(\theta),$$
where $f(z|\theta)$ is the likelihood function, i.e.:
$$f(z|\theta)\propto \exp[ -\frac{1}{2} \sum^n_{i=1} (z_i-\theta)^2 ],$$
and $f(\theta)$ is a prior density for $\theta$. Choosing the Cauchy$(0,1)$ density as the prior density, the posterior density is therefore:
$$f(\theta|z)\propto \exp [-\frac{1}{2}\sum^n_{i=1}(z_i-\theta)^2] \frac{1}{1+\theta^2}.$$
Implement the IMH sampler for generating from the posterior density, using the Cauchy$(0,1)$ as the proposal density.
(c) Use your IMH sampler to generate 1000 values from the posterior distribution. Use the generated values to estimate the mean of the posterior distribution and to obtain an approximate 95\% probability interval for $\theta$.
\end{asparaenum}
\end{exercise}
\begin{exercise}
Suppose $x$ and $y$ have conditional distributions that are exponential distributions restricted to the interval $(0, 5)$. Then:
$$f(x|y)\propto ye^{-xy}\textrm{ and } f(y|x)\propto xe^{-xy}.$$
\begin{asparaenum}[(a)]
\item Implement the Gibbs sampler for generating sample points from the joint distribution $f(x,y)$.
\item Use your Gibbs sampler to generate 5000 sample points from $f(x,y)$. Use appropriate plots of the Markov chain outputs to assess convergence to the target distribution.
\item Obtain a two-dimensional scatter plot of your generated sample points.
\end{asparaenum}
\end{exercise}
|
{"hexsha": "1d04d6d452130b782288d80b2cadd871cadbcb48", "size": 16340, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "matlab/csebook/MCMC.tex", "max_stars_repo_name": "raazesh-sainudiin/computational-statistical-experiments", "max_stars_repo_head_hexsha": "edb33db9a05b32645e8337c03729c0b8d02fa728", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-02-19T07:54:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-14T13:55:18.000Z", "max_issues_repo_path": "matlab/csebook/MCMC.tex", "max_issues_repo_name": "raazesh-sainudiin/computational-statistical-experiments", "max_issues_repo_head_hexsha": "edb33db9a05b32645e8337c03729c0b8d02fa728", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "matlab/csebook/MCMC.tex", "max_forks_repo_name": "raazesh-sainudiin/computational-statistical-experiments", "max_forks_repo_head_hexsha": "edb33db9a05b32645e8337c03729c0b8d02fa728", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-07-18T07:47:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-19T11:28:24.000Z", "avg_line_length": 49.9694189602, "max_line_length": 850, "alphanum_fraction": 0.688127295, "num_tokens": 5351}
|
from itertools import chain
from itertools import combinations
import numpy
def powerset(iterable, min_size):
s = list(iterable)
return chain.from_iterable(combinations(s, r)
for r in range(min_size, len(s) + 1))
def reduced_combinations(candidate, numbers):
max_pos = numpy.searchsorted(numbers, candidate) + 1
reduced_numbers = numbers[:max_pos]
min_size = numpy.searchsorted(numpy.cumsum(numbers[::-1]), candidate) + 1
return powerset(reduced_numbers, min_size=min_size)
def numbers_to_positive_negative(numbers):
numbers = numpy.array(numbers)
split = numpy.searchsorted(numbers, 0)
positive = numbers[split:]
negative = numbers[:split]
# Rearrange negative numbers
negative = abs(negative[::-1])
return positive, negative
def two_sum_zero(numbers):
if not numbers:
return False
positive, negative = numbers_to_positive_negative(numbers)
# Return True if 0 is in array
if positive[0] == 0:
return True
for p in range(len(positive)):
for n in range(len(negative)):
if positive[p] > negative[n]:
n += 1
elif positive[p] < negative[n]:
p += 1
else:
return True
return False
def has_subset_sum_zero(numbers):
positive, negative = numbers_to_positive_negative(numbers)
# Return True if 0 is in array
if positive[0] == 0:
return True
# Differenciate by size
if len(positive) > len(negative):
bigger = positive
smaller = negative
else:
smaller = positive
bigger = negative
# Try to reduce a bit the necessary combinations
for c in powerset(smaller, min_size=1):
candidate = sum(c)
for slice_combination in reduced_combinations(candidate, bigger):
if candidate == sum(slice_combination):
return True
return False
|
{"hexsha": "95d98a790179198a6c9fd43e09193b7d12c12846", "size": 1957, "ext": "py", "lang": "Python", "max_stars_repo_path": "201705/peque/main.py", "max_stars_repo_name": "rocasan/vigotech-reto", "max_stars_repo_head_hexsha": "6b45dacb80139121ae6204cc614db8e4441fc444", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "201705/peque/main.py", "max_issues_repo_name": "rocasan/vigotech-reto", "max_issues_repo_head_hexsha": "6b45dacb80139121ae6204cc614db8e4441fc444", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2017-10-23T17:31:59.000Z", "max_issues_repo_issues_event_max_datetime": "2017-10-30T21:08:23.000Z", "max_forks_repo_path": "201705/peque/main.py", "max_forks_repo_name": "rocasan/vigotech-reto", "max_forks_repo_head_hexsha": "6b45dacb80139121ae6204cc614db8e4441fc444", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-10-10T15:42:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-25T09:34:37.000Z", "avg_line_length": 28.3623188406, "max_line_length": 77, "alphanum_fraction": 0.638221768, "include": true, "reason": "import numpy", "num_tokens": 439}
|
"""Module including useful functions relative to rigid motion.
Functions:
augment_matrix_coord: returns augmented vector
get_rotation_mat_single_axis: computes rotation matrix around specificied axis (x,y or z)
get_rigid_motion_mat_from_euler: computes 4X4 rigid transformation matrix, from the specified sequence of Euler/Cardan angles.
transform_rigid_motion: applies rigid transformation matrix to a given vector
get_euler_zxy: retrieves ZXY Cardan sequene of angles from given rigid transformation matrix.
"""
#### PYTHON MODULES
import numpy as np
def augment_matrix_coord(array):
n = len(array)
return np.concatenate((array, np.ones((n,1))), axis = 1).T
def get_rotation_mat_single_axis( axis, angle ):
"""It computes the 3X3 rotation matrix relative to a single rotation of angle(rad)
about the axis(string 'x', 'y', 'z') for a righr handed CS"""
if axis == 'x' : return np.array(([1,0,0],[0, np.cos(angle), -np.sin(angle)],[0, np.sin(angle), np.cos(angle)]))
if axis == 'y' : return np.array(([np.cos(angle),0,np.sin(angle)],[0, 1, 0],[-np.sin(angle), 0, np.cos(angle)]))
if axis == 'z' : return np.array(([np.cos(angle),-np.sin(angle),0],[np.sin(angle), np.cos(angle), 0],[0, 0, 1]))
def get_rigid_motion_mat_from_euler( alpha, axis_1, beta, axis_2, gamma, axis_3, t_x, t_y, t_z ):
"""It computes the 4X4 rigid motion matrix given a sequence of 3 Euler angles about the 3 axes 1,2,3
and the translation vector t_x, t_y, t_z"""
rot1 = get_rotation_mat_single_axis( axis_1, alpha )
rot2 = get_rotation_mat_single_axis( axis_2, beta )
rot3 = get_rotation_mat_single_axis( axis_3, gamma )
rot_mat = np.dot(rot1, np.dot(rot2,rot3))
t = np.array(([t_x], [t_y], [t_z]))
output = np.concatenate((rot_mat, t), axis = 1)
return np.concatenate((output, np.array([[0.,0.,0.,1.]])), axis = 0)
def transform_rigid_motion( v, alpha, axis_1, beta, axis_2, gamma, axis_3, t_x, t_y, t_z ):
"""It transforms the NX3 array of points v according to the rigid transformation given by
the sequence of 3 Euler angles about the 3 axes 1,2,3 and the translation vector t_x, t_y, t_z
It returns a NX3 array of transformed points
"""
rigid_motion_matrix = get_rigid_motion_mat_from_euler( alpha, axis_1, beta, axis_2, gamma, axis_3, t_x, t_y, t_z )
return np.dot(rigid_motion_matrix, augment_matrix_coord(v))[0:3].T
def get_euler_zxy(rotation_matrix):
"""retrieves ZXY Cardan sequene of angles from given rigid transformation matrix."""
if (rotation_matrix[2,1] != 1. and rotation_matrix[2,1] != -1.):
euler_x_1 = np.arcsin(rotation_matrix[2,1])
euler_y_1 = - np.arctan2( rotation_matrix[2,0]/np.cos(euler_x_1), rotation_matrix[2,2]/np.cos(euler_x_1))
euler_z_1 = - np.arctan2( rotation_matrix[0,1]/np.cos(euler_x_1), rotation_matrix[1,1]/np.cos(euler_x_1))
euler_x_2 = np.pi - np.arcsin(rotation_matrix[2,1])
euler_y_2 = - np.arctan2( rotation_matrix[2,0]/np.cos(euler_x_1), rotation_matrix[2,2]/np.cos(euler_x_1))
euler_z_2 = - np.arctan2( rotation_matrix[0,1]/np.cos(euler_x_1), rotation_matrix[1,1]/np.cos(euler_x_1))
euler_1 = [np.rad2deg(euler_z_1), np.rad2deg(euler_x_1), np.rad2deg(euler_y_1)]
euler_2 = [np.rad2deg(euler_z_2), np.rad2deg(euler_x_2), np.rad2deg(euler_y_2)]
else:
print('Gimbal Lock occurred')
return euler_1, euler_2
if __name__ == "__main__":
rotation_matrix = np.array([[0.9923, -0.1226, 0.0170],
[0.0112, -0.0483, -0.9988],
[0.1233, 0.9913, -0.0465]])
e1, e2 = get_euler_zxy(rotation_matrix)
print(e1)
|
{"hexsha": "6c9e318ecd4995c00046b2f500dd833a917bdbe5", "size": 3734, "ext": "py", "lang": "Python", "max_stars_repo_path": "3D_2D_Registration/modules/RigidMotionModule.py", "max_stars_repo_name": "lekooooook/Opt-GO-3D-2D-Registration", "max_stars_repo_head_hexsha": "b12f22d99ed1d8a44663c8baf5d18f0bc563388c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2020-05-08T05:10:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-29T13:40:24.000Z", "max_issues_repo_path": "3D_2D_Registration/modules/RigidMotionModule.py", "max_issues_repo_name": "FweiGao/Opt-GO-3D-2D-Registration", "max_issues_repo_head_hexsha": "b12f22d99ed1d8a44663c8baf5d18f0bc563388c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-05-24T13:03:42.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-06T08:24:47.000Z", "max_forks_repo_path": "3D_2D_Registration/modules/RigidMotionModule.py", "max_forks_repo_name": "FweiGao/Opt-GO-3D-2D-Registration", "max_forks_repo_head_hexsha": "b12f22d99ed1d8a44663c8baf5d18f0bc563388c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-10-05T09:08:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T13:40:27.000Z", "avg_line_length": 37.7171717172, "max_line_length": 133, "alphanum_fraction": 0.6764863417, "include": true, "reason": "import numpy", "num_tokens": 1156}
|
#!/usr/bin/env python -W ignore
import numpy as np
# матриця Леслі
L = np.array([
[ 0, 6, 15],
[0.5, 0, 0],
[ 0, 0.5, 0]
])
# початковий розподіл популяції
x0 = np.array([1, 1, 1])
# розподіл через t = 5 кроків обчислюємо як L^t * x0
t = 5
xt = np.dot(np.linalg.matrix_power(L, t), x0)
print(f'x({t}) = {xt}') # x(5) = [239.625 55.125 16.6875]
# знаходимо власні значення і власні вектори
_lambda, x = np.linalg.eig(L)
# знаходимо найбільше додатне власне значення
lambda_L = np.max(_lambda)
print(f'lambda_L = {np.float_(lambda_L)}') # lambda_L = 2.173738392044369
# знаходимо власний вектор що йому відповідає
x_L = x[:, _lambda == lambda_L].T[0]
# нормуємо цей вектор в || ||_1 нормі
x_L /= np.linalg.norm(x_L, 1)
print(f'x_L = {np.abs(np.float_(x_L))}') # x_L = [0.77946759 0.17929195 0.04124046]
# знаходимо min t: ||x(t)||_1 > 75
t = 0
while np.sum(x0) <= 75:
x0 = np.dot(L, x0)
t += 1
print(f't = {t}, X({t}) = {np.sum(x0)}') # t = 3, X(3) = 77.25
|
{"hexsha": "2e192d4510eba10bb5f53622e949de438673580e", "size": 986, "ext": "py", "lang": "Python", "max_stars_repo_path": "labs/examples/1/py/2.py", "max_stars_repo_name": "Sky-Nik/ecology-and-economics", "max_stars_repo_head_hexsha": "25dfc7bccc9b8c430eb31729805f75daebda24d0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-28T08:45:54.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-28T08:45:54.000Z", "max_issues_repo_path": "labs/examples/1/py/2.py", "max_issues_repo_name": "Sky-Nik/ecology-and-economics", "max_issues_repo_head_hexsha": "25dfc7bccc9b8c430eb31729805f75daebda24d0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "labs/examples/1/py/2.py", "max_forks_repo_name": "Sky-Nik/ecology-and-economics", "max_forks_repo_head_hexsha": "25dfc7bccc9b8c430eb31729805f75daebda24d0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-16T01:05:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-16T01:05:09.000Z", "avg_line_length": 24.65, "max_line_length": 84, "alphanum_fraction": 0.6156186613, "include": true, "reason": "import numpy", "num_tokens": 493}
|
using Test;
using LinearAlgebra;
using SparseArrays;
using ParamLevelSet;
using Random;
n = 10;
B = getSpMatBuilder(Int64,Float64,n, n, 30);
for k=1:10
setNext!(B,k,k,1.0);
end
II = getSparseMatrix(B);
@test norm(II - SparseMatrixCSC(1.0I,n,n)) < 1e-14
B.V.*=100.0;
reset!(B);
for i=1:10
JJ = randperm(n)[1:5];
for j = 1:5
setNext!(B,i,JJ[j],1.0);
end
end
@test sum(B.V[:]) == 50;
II = getSparseMatrix(B);
IIT = getSparseMatrixTransposed(B);
@test norm(II - IIT') < 1e-14
|
{"hexsha": "523f7aa725b9f9b487a307b3c33b7359056efbcb", "size": 483, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/testSpMatBuilder.jl", "max_stars_repo_name": "JuliaInv/ParamLevelSet.jl", "max_stars_repo_head_hexsha": "d6bd68665080aeb0718e97cc873b2a5e234bd4f8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-09-26T18:19:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-16T19:09:49.000Z", "max_issues_repo_path": "test/testSpMatBuilder.jl", "max_issues_repo_name": "JuliaInv/ParamLevelSet.jl", "max_issues_repo_head_hexsha": "d6bd68665080aeb0718e97cc873b2a5e234bd4f8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/testSpMatBuilder.jl", "max_forks_repo_name": "JuliaInv/ParamLevelSet.jl", "max_forks_repo_head_hexsha": "d6bd68665080aeb0718e97cc873b2a5e234bd4f8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-10-21T15:20:34.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-18T17:55:06.000Z", "avg_line_length": 16.1, "max_line_length": 50, "alphanum_fraction": 0.652173913, "num_tokens": 192}
|
import sys
IN_NOSETESTS = False
if sys.argv and sys.argv[0].endswith('nosetests'): # pragma: no cover
IN_NOSETESTS = True
import warnings
import re
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# Only show ImportWarning if not running in nosetests
if not IN_NOSETESTS: # pragma: no cover
warnings.filterwarnings('always', category=ImportWarning,
module='^{0}\.'.format(re.escape(__name__)))
from .setup_utils import root_version_active, get_config
ROOT_VERSION = root_version_active()
config = get_config()
if config is not None: # pragma: no cover
root_version_at_install = config.get('ROOT_version', ROOT_VERSION)
if ROOT_VERSION != root_version_at_install:
warnings.warn(
"ROOT {0} is currently active but you "
"installed root_numpy against ROOT {1}. "
"Please consider reinstalling root_numpy "
"for this ROOT version.".format(
ROOT_VERSION, root_version_at_install),
RuntimeWarning)
import numpy
numpy_version_at_install = config.get('numpy_version', numpy.__version__)
if numpy.__version__ != numpy_version_at_install:
warnings.warn(
"numpy {0} is currently installed but you "
"installed root_numpy against numpy {1}. "
"Please consider reinstalling root_numpy "
"for this numpy version.".format(
numpy.__version__, numpy_version_at_install),
RuntimeWarning)
del root_version_at_install
del numpy_version_at_install
from ._tree import (
root2array, root2rec,
tree2array, tree2rec,
array2tree, array2root,
list_trees, list_branches,
list_directories, list_structures)
from ._hist import fill_hist, fill_profile, hist2array, array2hist
from ._graph import fill_graph
from ._sample import random_sample
from ._array import array
from ._matrix import matrix
from ._evaluate import evaluate
from ._warnings import RootNumpyUnconvertibleWarning
from ._utils import (
stretch, blockwise_inner_join,
rec2array, stack, dup_idx)
from .info import __version__
__all__ = [
'root2array',
'root2rec',
'tree2array',
'tree2rec',
'array2tree',
'array2root',
'hist2array',
'array2hist',
'fill_hist',
'fill_profile',
'fill_graph',
'random_sample',
'array',
'matrix',
'evaluate',
'list_trees',
'list_branches',
'list_structures',
'list_directories',
'rec2array',
'stack',
'stretch',
'dup_idx',
'blockwise_inner_join',
'RootNumpyUnconvertibleWarning',
]
|
{"hexsha": "a964584dc77e1c4f5fa736f874dfa89b4f57f46e", "size": 2780, "ext": "py", "lang": "Python", "max_stars_repo_path": "root_numpy/__init__.py", "max_stars_repo_name": "douglasdavis/root_numpy", "max_stars_repo_head_hexsha": "d337ed0bae3d59f096bcd4ea427bf3244a9a34be", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 83, "max_stars_repo_stars_event_min_datetime": "2017-04-11T13:46:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-29T05:16:12.000Z", "max_issues_repo_path": "root_numpy/__init__.py", "max_issues_repo_name": "ndawe/root_numpy", "max_issues_repo_head_hexsha": "34625988547e8a462cc8e10cba6459e9fa2fa65e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 125, "max_issues_repo_issues_event_min_datetime": "2015-01-05T01:49:26.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-31T15:37:12.000Z", "max_forks_repo_path": "root_numpy/__init__.py", "max_forks_repo_name": "ndawe/root_numpy", "max_forks_repo_head_hexsha": "34625988547e8a462cc8e10cba6459e9fa2fa65e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2015-01-18T22:54:43.000Z", "max_forks_repo_forks_event_max_datetime": "2017-03-15T17:56:01.000Z", "avg_line_length": 28.6597938144, "max_line_length": 77, "alphanum_fraction": 0.681294964, "include": true, "reason": "import numpy", "num_tokens": 657}
|
''' All DUT alignment functions in space and time are listed here plus additional alignment check functions'''
from __future__ import division
import logging
import sys
import os
from collections import Iterable
import math
import tables as tb
import numpy as np
import scipy
from matplotlib.backends.backend_pdf import PdfPages
from tqdm import tqdm
from beam_telescope_analysis.telescope.telescope import Telescope
from beam_telescope_analysis.tools import analysis_utils
from beam_telescope_analysis.tools import plot_utils
from beam_telescope_analysis.tools import geometry_utils
from beam_telescope_analysis.tools import data_selection
from beam_telescope_analysis.track_analysis import find_tracks, fit_tracks, line_fit_3d, _fit_tracks_kalman_loop
from beam_telescope_analysis.result_analysis import calculate_residuals, histogram_track_angle, get_angles
from beam_telescope_analysis.tools.storage_utils import save_arguments
default_alignment_parameters = ["translation_x", "translation_y", "translation_z", "rotation_alpha", "rotation_beta", "rotation_gamma"]
default_cluster_shapes = [1, 3, 5, 13, 14, 7, 11, 15]
kfa_alignment_descr = np.dtype([('translation_x', np.float64),
('translation_y', np.float64),
('translation_z', np.float64),
('rotation_alpha', np.float64),
('rotation_beta', np.float64),
('rotation_gamma', np.float64),
('translation_x_err', np.float64),
('translation_y_err', np.float64),
('translation_z_err', np.float64),
('rotation_alpha_err', np.float64),
('rotation_beta_err', np.float64),
('rotation_gamma_err', np.float64),
('translation_x_delta', np.float64),
('translation_y_delta', np.float64),
('translation_z_delta', np.float64),
('rotation_alpha_delta', np.float64),
('rotation_beta_delta', np.float64),
('rotation_gamma_delta', np.float64),
('annealing_factor', np.float64)])
@save_arguments
def apply_alignment(telescope_configuration, input_file, output_file=None, local_to_global=True, align_to_beam=False, chunk_size=1000000):
'''Convert local to global coordinates and vice versa.
Note:
-----
This function cannot be easily made faster with multiprocessing since the computation function (apply_alignment_to_chunk) does not
contribute significantly to the runtime (< 20 %), but the copy overhead for not shared memory needed for multipgrocessing is higher.
Also the hard drive IO can be limiting (30 Mb/s read, 20 Mb/s write to the same disk)
Parameters
----------
telescope_configuration : string
Filename of the telescope configuration file.
input_file : string
Filename of the input file (merged or tracks file).
output_file : string
Filename of the output file with the converted coordinates (merged or tracks file).
local_to_global : bool
If True, convert from local to global coordinates.
align_to_beam : bool
If True, use telescope alignment to align to the beam (beam along z axis).
chunk_size : uint
Chunk size of the data when reading from file.
Returns
-------
output_file : string
Filename of the output file with new coordinates.
'''
telescope = Telescope(telescope_configuration)
n_duts = len(telescope)
logging.info('=== Apply alignment to %d DUTs ===', n_duts)
if output_file is None:
output_file = os.path.splitext(input_file)[0] + ('_global_coordinates.h5' if local_to_global else '_local_coordinates.h5')
def convert_data(dut, dut_index, node, conv, data):
if isinstance(dut, Telescope):
data['x_dut_%d' % dut_index], data['y_dut_%d' % dut_index], data['z_dut_%d' % dut_index] = conv(
x=data['x_dut_%d' % dut_index],
y=data['y_dut_%d' % dut_index],
z=data['z_dut_%d' % dut_index],
translation_x=dut.translation_x,
translation_y=dut.translation_y,
translation_z=dut.translation_z,
rotation_alpha=dut.rotation_alpha,
rotation_beta=dut.rotation_beta,
rotation_gamma=dut.rotation_gamma)
else:
data['x_dut_%d' % dut_index], data['y_dut_%d' % dut_index], data['z_dut_%d' % dut_index] = conv(
x=data['x_dut_%d' % dut_index],
y=data['y_dut_%d' % dut_index],
z=data['z_dut_%d' % dut_index])
if "Tracks" in node.name:
format_strings = ['offset_{dimension}_dut_{dut_index}']
if "DUT%d" % dut_index in node.name:
format_strings.extend(['offset_{dimension}'])
for format_string in format_strings:
if format_string.format(dimension='x', dut_index=dut_index) in node.dtype.names:
data[format_string.format(dimension='x', dut_index=dut_index)], data[format_string.format(dimension='y', dut_index=dut_index)], data[format_string.format(dimension='z', dut_index=dut_index)] = conv(
x=data[format_string.format(dimension='x', dut_index=dut_index)],
y=data[format_string.format(dimension='y', dut_index=dut_index)],
z=data[format_string.format(dimension='z', dut_index=dut_index)],
translation_x=dut.translation_x,
translation_y=dut.translation_y,
translation_z=dut.translation_z,
rotation_alpha=dut.rotation_alpha,
rotation_beta=dut.rotation_beta,
rotation_gamma=dut.rotation_gamma)
format_strings = ['slope_{dimension}_dut_{dut_index}']
if "DUT%d" % dut_index in node.name:
format_strings.extend(['slope_{dimension}'])
for format_string in format_strings:
if format_string.format(dimension='x', dut_index=dut_index) in node.dtype.names:
data[format_string.format(dimension='x', dut_index=dut_index)], data[format_string.format(dimension='y', dut_index=dut_index)], data[format_string.format(dimension='z', dut_index=dut_index)] = conv(
x=data[format_string.format(dimension='x', dut_index=dut_index)],
y=data[format_string.format(dimension='y', dut_index=dut_index)],
z=data[format_string.format(dimension='z', dut_index=dut_index)],
# no translation for the slopes
translation_x=0.0,
translation_y=0.0,
translation_z=0.0,
rotation_alpha=dut.rotation_alpha,
rotation_beta=dut.rotation_beta,
rotation_gamma=dut.rotation_gamma)
format_strings = ['{dimension}_err_dut_{dut_index}']
for format_string in format_strings:
if format_string.format(dimension='x', dut_index=dut_index) in node.dtype.names:
data[format_string.format(dimension='x', dut_index=dut_index)], data[format_string.format(dimension='y', dut_index=dut_index)], data[format_string.format(dimension='z', dut_index=dut_index)] = np.abs(conv(
x=data[format_string.format(dimension='x', dut_index=dut_index)],
y=data[format_string.format(dimension='y', dut_index=dut_index)],
z=data[format_string.format(dimension='z', dut_index=dut_index)],
# no translation for the errors
translation_x=0.0,
translation_y=0.0,
translation_z=0.0,
rotation_alpha=dut.rotation_alpha,
rotation_beta=dut.rotation_beta,
rotation_gamma=dut.rotation_gamma))
# Looper over the hits of all DUTs of all hit tables in chunks and apply the alignment
with tb.open_file(input_file, mode='r') as in_file_h5:
with tb.open_file(output_file, mode='w') as out_file_h5:
for node in in_file_h5.root: # Loop over potential hit tables in data file
logging.info('== Apply alignment to node %s ==', node.name)
hits_aligned_table = out_file_h5.create_table(
where=out_file_h5.root,
name=node.name,
description=node.dtype,
title=node.title,
filters=tb.Filters(
complib='blosc',
complevel=5,
fletcher32=False))
pbar = tqdm(total=node.shape[0], ncols=80)
for data_chunk, index in analysis_utils.data_aligned_at_events(node, chunk_size=chunk_size): # Loop over the hits
for dut_index, dut in enumerate(telescope): # Loop over the DUTs
if local_to_global:
conv = dut.local_to_global_position
else:
conv = dut.global_to_local_position
if align_to_beam and not local_to_global:
convert_data(dut=telescope, dut_index=dut_index, node=node, conv=conv, data=data_chunk)
convert_data(dut=dut, dut_index=dut_index, node=node, conv=conv, data=data_chunk)
if align_to_beam and local_to_global:
convert_data(dut=telescope, dut_index=dut_index, node=node, conv=conv, data=data_chunk)
hits_aligned_table.append(data_chunk)
pbar.update(data_chunk.shape[0])
pbar.close()
return output_file
def prealign(telescope_configuration, input_correlation_file, output_telescope_configuration=None, select_duts=None, select_reference_dut=0, reduce_background=True, use_location=False, plot=True):
'''Deduce a pre-alignment from the correlations, by fitting the correlations with a straight line (gives offset, slope, but no tild angles).
The user can define cuts on the fit error and straight line offset in an interactive way.
Parameters
----------
telescope_configuration : string
Filename of the telescope configuration file.
input_correlation_file : string
Filename of the input correlation file.
output_telescope_configuration : string
Filename of the output telescope configuration file.
select_duts : iterable
List of duts for which the prealignment is done. If None, prealignment is done for all duts.
select_reference_dut : uint
DUT index of the reference plane. Default is DUT 0.
reduce_background : bool
If True, use correlation histograms with reduced background (by applying SVD method to the correlation matrix).
plot : bool
If True, create additional output plots.
'''
telescope = Telescope(telescope_configuration)
n_duts = len(telescope)
logging.info('=== Pre-alignment of %d DUTs ===' % n_duts)
if output_telescope_configuration is None:
output_telescope_configuration = os.path.splitext(telescope_configuration)[0] + '_prealigned.yaml'
elif output_telescope_configuration == telescope_configuration:
raise ValueError('Output telescope configuration file must be different from input telescope configuration file.')
# remove reference DUT from list of all DUTs
if select_duts is None:
select_duts = list(set(range(n_duts)) - set([select_reference_dut]))
else:
select_duts = list(set(select_duts) - set([select_reference_dut]))
if plot is True:
output_pdf = PdfPages(os.path.splitext(input_correlation_file)[0] + '_prealigned.pdf', keep_empty=False)
else:
output_pdf = None
with tb.open_file(input_correlation_file, mode="r") as in_file_h5:
# loop over DUTs for pre-alignment
for actual_dut_index in select_duts:
actual_dut = telescope[actual_dut_index]
logging.info("== Pre-aligning %s ==" % actual_dut.name)
x_global_pixel, y_global_pixel, z_global_pixel = [], [], []
for column in range(1, actual_dut.n_columns + 1):
global_positions = actual_dut.index_to_global_position(
column=[column] * actual_dut.n_rows,
row=range(1, actual_dut.n_rows + 1))
x_global_pixel = np.hstack([x_global_pixel, global_positions[0]])
y_global_pixel = np.hstack([y_global_pixel, global_positions[1]])
z_global_pixel = np.hstack([z_global_pixel, global_positions[2]])
# calculate rotation matrix for later rotation corrections
rotation_alpha = actual_dut.rotation_alpha
rotation_beta = actual_dut.rotation_beta
rotation_gamma = actual_dut.rotation_gamma
R = geometry_utils.rotation_matrix(
alpha=rotation_alpha,
beta=rotation_beta,
gamma=rotation_gamma)
select = None
# loop over x- and y-axis
for x_direction in [True, False]:
if reduce_background:
node = in_file_h5.get_node(in_file_h5.root, 'Correlation_%s_%d_%d_reduced_background' % ('x' if x_direction else 'y', select_reference_dut, actual_dut_index))
else:
node = in_file_h5.get_node(in_file_h5.root, 'Correlation_%s_%d_%d' % ('x' if x_direction else 'y', select_reference_dut, actual_dut_index))
dut_name = actual_dut.name
ref_name = telescope[select_reference_dut].name
pixel_size = actual_dut.column_size if x_direction else actual_dut.row_size
logging.info('Pre-aligning data from %s', node.name)
bin_size = node.attrs.resolution
ref_hist_extent = node.attrs.ref_hist_extent
ref_hist_size = (ref_hist_extent[1] - ref_hist_extent[0])
dut_hist_extent = node.attrs.dut_hist_extent
dut_hist_size = (dut_hist_extent[1] - dut_hist_extent[0])
# retrieve data
data = node[:]
# Calculate the positions on the x axis
dut_pos = np.linspace(start=dut_hist_extent[0] + bin_size / 2.0, stop=dut_hist_extent[1] - bin_size / 2.0, num=data.shape[0], endpoint=True)
# calculate maximum per column
max_select = np.argmax(data, axis=1)
hough_data = np.zeros_like(data)
hough_data[np.arange(data.shape[0]), max_select] = 1
# transpose for correct angle
hough_data = hough_data.T
accumulator, theta, rho, theta_edges, rho_edges = analysis_utils.hough_transform(hough_data, theta_res=0.1, rho_res=1.0, return_edges=True)
def largest_indices(ary, n):
''' Returns the n largest indices from a numpy array.
https://stackoverflow.com/questions/6910641/how-to-get-indices-of-n-maximum-values-in-a-numpy-array
'''
flat = ary.flatten()
indices = np.argpartition(flat, -n)[-n:]
indices = indices[np.argsort(-flat[indices])]
return np.unravel_index(indices, ary.shape)
# finding correlation
# check for non-zero values to improve speed
count_nonzero = np.count_nonzero(accumulator)
indices = np.vstack(largest_indices(accumulator, count_nonzero)).T
for index in indices:
rho_idx, th_idx = index[0], index[1]
rho_val, theta_val = rho[rho_idx], theta[th_idx]
slope_idx, offset_idx = -np.cos(theta_val) / np.sin(theta_val), rho_val / np.sin(theta_val)
slope = slope_idx
offset = offset_idx * bin_size + ref_hist_extent[0] + 0.5 * bin_size
# check for proper slope
if np.isclose(slope, 1.0, rtol=0.0, atol=0.1) or np.isclose(slope, -1.0, rtol=0.0, atol=0.1):
break
else:
raise RuntimeError('Cannot find %s correlation between %s and %s' % ("X" if x_direction else "Y", telescope[select_reference_dut].name, actual_dut.name))
# offset in the center of the pixel matrix
offset_center = offset + slope * (0.5 * dut_hist_size - 0.5 * bin_size)
# calculate offset for local frame
offset_plot = offset - slope * dut_pos[0]
# find loactions where the max. correlation is close to expected value
x_list = find_inliers(
x=dut_pos[max_select != 0],
y=(max_select[max_select != 0] * bin_size - ref_hist_size / 2.0 + bin_size / 2.0),
m=slope,
c=offset_plot,
threshold=pixel_size * np.sqrt(12) * 2)
# 1-dimensional clustering of calculated locations
kernel = scipy.stats.gaussian_kde(x_list)
densities = kernel(dut_pos)
max_density = np.max(densities)
# calculate indices where value is close to max. density
indices = np.where(densities > max_density * 0.5)
# get locations from indices
x_list = dut_pos[indices]
# calculate range where correlation exists
dut_pos_limit = [np.min(x_list), np.max(x_list)]
plot_utils.plot_hough(
dut_pos=dut_pos,
data=hough_data,
accumulator=accumulator,
offset=offset_plot,
slope=slope,
dut_pos_limit=dut_pos_limit,
theta_edges=theta_edges,
rho_edges=rho_edges,
ref_hist_extent=ref_hist_extent,
dut_hist_extent=dut_hist_extent,
ref_name=ref_name,
dut_name=dut_name,
x_direction=x_direction,
reduce_background=reduce_background,
output_pdf=output_pdf)
if select is None:
select = np.ones_like(x_global_pixel, dtype=np.bool)
if x_direction:
select &= (x_global_pixel >= dut_pos_limit[0]) & (x_global_pixel <= dut_pos_limit[1])
if slope < 0.0:
R = np.linalg.multi_dot([geometry_utils.rotation_matrix_y(beta=np.pi), R])
translation_x = offset_center
else:
select &= (y_global_pixel >= dut_pos_limit[0]) & (y_global_pixel <= dut_pos_limit[1])
if slope < 0.0:
R = np.linalg.multi_dot([geometry_utils.rotation_matrix_x(alpha=np.pi), R])
translation_y = offset_center
# Setting new parameters
# Only use new limits if they are narrower
# Convert from global to local coordinates
local_coordinates = actual_dut.global_to_local_position(
x=x_global_pixel[select],
y=y_global_pixel[select],
z=z_global_pixel[select])
if actual_dut.x_limit is None:
actual_dut.x_limit = (min(local_coordinates[0]), max(local_coordinates[0]))
else:
actual_dut.x_limit = (max((min(local_coordinates[0]), actual_dut.x_limit[0])), min((max(local_coordinates[0]), actual_dut.x_limit[1])))
if actual_dut.y_limit is None:
actual_dut.y_limit = (min(local_coordinates[1]), max(local_coordinates[1]))
else:
actual_dut.y_limit = (max((min(local_coordinates[1]), actual_dut.y_limit[0])), min((max(local_coordinates[1]), actual_dut.y_limit[1])))
# Setting geometry
actual_dut.translation_x = translation_x
actual_dut.translation_y = translation_y
rotation_alpha, rotation_beta, rotation_gamma = geometry_utils.euler_angles(R=R)
actual_dut.rotation_alpha = rotation_alpha
actual_dut.rotation_beta = rotation_beta
actual_dut.rotation_gamma = rotation_gamma
telescope.save_configuration(configuration_file=output_telescope_configuration)
if output_pdf is not None:
output_pdf.close()
return output_telescope_configuration
def find_inliers(x, y, m, c, threshold=1.0):
''' Find inliers.
Parameters
----------
x : list
X coordinates.
y : list
Y coordinates.
threshold : float
Maximum distance of the data points for inlier selection.
Returns
-------
x_list : array
X coordianates of inliers.
'''
# calculate distance to reference hit
dist = np.abs(m * x + c - y)
sel = dist < threshold
return x[sel]
def find_line_model(points):
""" find a line model for the given points
:param points selected points for model fitting
:return line model
"""
# [WARNING] vertical and horizontal lines should be treated differently
# here we just add some noise to avoid division by zero
# find a line model for these points
m = (points[1, 1] - points[0, 1]) / (points[1, 0] - points[0, 0] + sys.float_info.epsilon) # slope (gradient) of the line
c = points[1, 1] - m * points[1, 0] # y-intercept of the line
return m, c
def find_intercept_point(m, c, x0, y0):
""" find an intercept point of the line model with
a normal from point (x0,y0) to it
:param m slope of the line model
:param c y-intercept of the line model
:param x0 point's x coordinate
:param y0 point's y coordinate
:return intercept point
"""
# intersection point with the model
x = (x0 + m * y0 - m * c) / (1 + m**2)
y = (m * x0 + (m**2) * y0 - (m**2) * c) / (1 + m**2) + c
return x, y
def find_ransac(x, y, iterations=100, threshold=1.0, ratio=0.5):
''' RANSAC implementation
Note
----
Implementation from Alexey Abramov,
https://salzis.wordpress.com/2014/06/10/robust-linear-model-estimation-using-ransac-python-implementation/
Parameters
----------
x : list
X coordinates.
y : list
Y coordinates.
iterations : int
Maximum number of iterations.
threshold : float
Maximum distance of the data points for inlier selection.
ratio : float
Break condition for inliers.
Returns
-------
model_ratio : float
Ration of inliers to outliers.
model_m : float
Slope.
model_c : float
Offset.
model_x_list : array
X coordianates of inliers.
model_y_list : array
Y coordianates of inliers.
'''
data = np.column_stack((x, y))
n_samples = x.shape[0]
model_ratio = 0.0
model_m = 0.0
model_c = 0.0
# perform RANSAC iterations
for it in range(iterations):
all_indices = np.arange(n_samples)
np.random.shuffle(all_indices)
indices_1 = all_indices[:2] # pick up two random points
indices_2 = all_indices[2:]
maybe_points = data[indices_1, :]
test_points = data[indices_2, :]
# find a line model for these points
m, c = find_line_model(maybe_points)
x_list = []
y_list = []
num = 0
# find orthogonal lines to the model for all testing points
for ind in range(test_points.shape[0]):
x0 = test_points[ind, 0]
y0 = test_points[ind, 1]
# find an intercept point of the model with a normal from point (x0,y0)
x1, y1 = find_intercept_point(m, c, x0, y0)
# distance from point to the model
dist = math.sqrt((x1 - x0)**2 + (y1 - y0)**2)
# check whether it's an inlier or not
if dist < threshold:
x_list.append(x0)
y_list.append(y0)
num += 1
# in case a new model is better - cache it
if num / float(n_samples) > model_ratio:
model_ratio = num / float(n_samples)
model_m = m
model_c = c
model_x_list = np.array(x_list)
model_y_list = np.array(y_list)
# we are done in case we have enough inliers
if num > n_samples * ratio:
break
return model_ratio, model_m, model_c, model_x_list, model_y_list
def align(telescope_configuration, input_merged_file, output_telescope_configuration=None, select_duts=None, alignment_parameters=None, select_telescope_duts=None, select_extrapolation_duts=None, select_fit_duts=None, select_hit_duts=None, max_iterations=3, max_events=None, fit_method='fit', beam_energy=None, particle_mass=None, scattering_planes=None, track_chi2=10.0, cluster_shapes=None, quality_distances=(250.0, 250.0), isolation_distances=(500.0, 500.0), use_limits=True, plot=True, chunk_size=1000000):
''' This function does an alignment of the DUTs and sets translation and rotation values for all DUTs.
The reference DUT defines the global coordinate system position at 0, 0, 0 and should be well in the beam and not heavily rotated.
To solve the chicken-and-egg problem that a good dut alignment needs hits belonging to one track, but good track finding needs a good dut alignment this
function work only on already prealigned hits belonging to one track. Thus this function can be called only after track finding.
These steps are done
1. Take the found tracks and revert the pre-alignment
2. Take the track hits belonging to one track and fit tracks for all DUTs
3. Calculate the residuals for each DUT
4. Deduce rotations from the residuals and apply them to the hits
5. Deduce the translation of each plane
6. Store and apply the new alignment
repeat step 3 - 6 until the total residual does not decrease (RMS_total = sqrt(RMS_x_1^2 + RMS_y_1^2 + RMS_x_2^2 + RMS_y_2^2 + ...))
Parameters
----------
telescope_configuration : string
Filename of the telescope configuration file.
input_merged_file : string
Filename of the input merged file.
output_telescope_configuration : string
Filename of the output telescope configuration file.
select_duts : iterable or iterable of iterable
The combination of duts that are algined at once. One should always align the high resolution planes first.
E.g. for a telesope (first and last 3 planes) with 2 devices in the center (3, 4):
select_duts=[[0, 1, 2, 5, 6, 7], # align the telescope planes first
[4], # align first DUT
[3]] # align second DUT
alignment_parameters : list of lists of strings
The list of alignment parameters for each align_dut. Valid parameters:
- translation_x: horizontal axis
- translation_y: vertical axis
- translation_z: beam axis
- rotation_alpha: rotation around x-axis
- rotation_beta: rotation around y-axis
- rotation_gamma: rotation around z-axis (beam axis)
If None, all paramters will be selected.
select_telescope_duts : iterable
The given DUTs will be used to align the telescope along the z-axis.
Usually the coordinates of these DUTs are well specified.
At least 2 DUTs need to be specified. The z-position of the selected DUTs will not be changed by default.
select_extrapolation_duts : list
The given DUTs will be used for track extrapolation for improving track finding efficiency.
In some rare cases, removing DUTs with a coarse resolution might improve track finding efficiency.
If None, select all DUTs.
If list is empty or has a single entry, disable extrapolation (at least 2 DUTs are required for extrapolation to work).
select_fit_duts : iterable or iterable of iterable
Defines for each select_duts combination wich devices to use in the track fit.
E.g. To use only the telescope planes (first and last 3 planes) but not the 2 center devices
select_fit_duts=[0, 1, 2, 5, 6, 7]
select_hit_duts : iterable or iterable of iterable
Defines for each select_duts combination wich devices must have a hit to use the track for fitting. The hit
does not have to be used in the fit itself! This is useful for time reference planes.
E.g. To use telescope planes (first and last 3 planes) + time reference plane (3)
select_hit_duts = [0, 1, 2, 4, 5, 6, 7]
max_iterations : uint
Maximum number of iterations of calc residuals, apply rotation refit loop until constant result is expected.
Usually the procedure converges rather fast (< 5 iterations).
Non-telescope DUTs usually require 2 itearations.
max_events: uint
Radomly select max_events for alignment. If None, use all events, which might slow down the alignment.
fit_method : string
Available methods are 'kalman', which uses a Kalman Filter for track calculation, and 'fit', which uses a simple
straight line fit for track calculation.
beam_energy : float
Energy of the beam in MeV, e.g., 2500.0 MeV for ELSA beam. Only used for the Kalman Filter.
particle_mass : float
Mass of the particle in MeV, e.g., 0.511 MeV for electrons. Only used for the Kalman Filter.
scattering_planes : list or dict
Specifies additional scattering planes in case of DUTs which are not used or additional material in the way of the tracks.
The list must contain dictionaries containing the following keys:
material_budget: material budget of the scattering plane
translation_x/translation_y/translation_z: x/y/z position of the plane (in um)
rotation_alpha/rotation_beta/rotation_gamma: alpha/beta/gamma angle of scattering plane (in radians)
The material budget is defined as the thickness devided by the radiation length.
If scattering_planes is None, no scattering plane will be added.
track_chi2 : float or list
Setting the limit on the track chi^2. If None or 0.0, no cut will be applied.
A smaller value reduces the number of tracks for the alignment.
A large value increases the number of tracks but at the cost of alignment efficiency bacause of potentially bad tracks.
A good start value is 5.0 to 10.0 for high energy beams and 15.0 to 50.0 for low energy beams.
cluster_shapes : iterable or iterable of iterables
List of cluster shapes (unsigned integer) for each DUT. Only the selected cluster shapes will be used for the alignment.
Cluster shapes have impact on precision of the alignment. Larger clusters and certain cluster shapes can have a significant uncertainty for the hit position.
If None, use default cluster shapes [1, 3, 5, 13, 14, 7, 11, 15], i.e. 1x1, 2x1, 1x2, 3-pixel cluster, 4-pixel cluster. If empty list, all cluster sizes will be used.
The cluster shape can be calculated with the help of beam_telescope_analysis.tools.analysis_utils.calculate_cluster_array/calculate_cluster_shape.
quality_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the quality flag. The selected track and corresponding hit
must have a smaller distance to have the quality flag to be set to 1.
The purpose of quality_distances is to find good tracks for the alignment.
A good start value is 1-2x the pixel pitch for large pixels and high-energy beams and 5-10x the pixel pitch for small pixels and low-energy beams.
A too small value will remove good tracks, a too large value will allow bad tracks to contribute to the alignment.
If None, set distance to infinite.
isolation_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the isolated track/hit flag. Any other occurence of tracks or hits from the same event
within this distance will prevent the flag from beeing set.
The purpose of isolation_distances is to find good tracks for the alignment. Hits and tracks which are too close to each other should be removed.
The value given by isolation_distances should be larger than the quality_distances value to be effective,
A too small value will remove almost no tracks, a too large value will remove good tracks.
If None, set distance to 0.
isolation_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the quality flag. Any other occurence of tracks or hits from the same event
within this distance will reject the quality flag.
The purpose of isolation_distances is to remove tracks from alignment that could be potentially fake tracks (noisy detector / high beam density).
If None, use infinite distance.
use_limits : bool
If True, use column and row limits from pre-alignment for selecting the data.
plot : bool
If True, create additional output plots.
chunk_size : uint
Chunk size of the data when reading from file.
'''
telescope = Telescope(telescope_configuration)
n_duts = len(telescope)
logging.info('=== Alignment of %d DUTs ===' % len(set(np.unique(np.hstack(np.array(select_duts))).tolist())))
# Create list with combinations of DUTs to align
if select_duts is None: # If None: align all DUTs
select_duts = list(range(n_duts))
# Check for value errors
if not isinstance(select_duts, Iterable):
raise ValueError("Parameter select_duts is not an iterable.")
elif not select_duts: # empty iterable
raise ValueError("Parameter select_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), select_duts)):
select_duts = [select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_duts)):
raise ValueError("Not all items in parameter select_duts are iterable.")
# Finally check length of all iterables in iterable
for dut in select_duts:
if not dut: # check the length of the items
raise ValueError("Item in parameter select_duts has length 0.")
# Check if some DUTs will not be aligned
non_select_duts = set(range(n_duts)) - set(np.unique(np.hstack(np.array(select_duts))).tolist())
if non_select_duts:
logging.info('These DUTs will not be aligned: %s' % ", ".join(telescope[dut_index].name for dut_index in non_select_duts))
# Create list
if alignment_parameters is None:
alignment_parameters = [[None] * len(duts) for duts in select_duts]
# Check for value errors
if not isinstance(alignment_parameters, Iterable):
raise ValueError("Parameter alignment_parameters is not an iterable.")
elif not alignment_parameters: # empty iterable
raise ValueError("Parameter alignment_parameters has no items.")
# Finally check length of all arrays
if len(alignment_parameters) != len(select_duts): # empty iterable
raise ValueError("Parameter alignment_parameters has the wrong length.")
for index, alignment_parameter in enumerate(alignment_parameters):
if alignment_parameter is None:
alignment_parameters[index] = [None] * len(select_duts[index])
if len(alignment_parameters[index]) != len(select_duts[index]): # check the length of the items
raise ValueError("Item in parameter alignment_parameter has the wrong length.")
# Create track, hit selection
if select_hit_duts is None: # If None: use all DUTs
select_hit_duts = []
# copy each item
for duts in select_duts:
select_hit_duts.append(duts[:]) # require a hit for each fit DUT
# Check iterable and length
if not isinstance(select_hit_duts, Iterable):
raise ValueError("Parameter select_hit_duts is not an iterable.")
elif not select_hit_duts: # empty iterable
raise ValueError("Parameter select_hit_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), select_hit_duts)):
select_hit_duts = [select_hit_duts[:] for _ in select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_hit_duts)):
raise ValueError("Not all items in parameter select_hit_duts are iterable.")
# Finally check length of all arrays
if len(select_hit_duts) != len(select_duts): # empty iterable
raise ValueError("Parameter select_hit_duts has the wrong length.")
for hit_dut in select_hit_duts:
if len(hit_dut) < 2: # check the length of the items
raise ValueError("Item in parameter select_hit_duts has length < 2.")
# Create track, hit selection
if select_fit_duts is None: # If None: use all DUTs
select_fit_duts = []
# copy each item from select_hit_duts
for hit_duts in select_hit_duts:
select_fit_duts.append(hit_duts[:]) # require a hit for each fit DUT
# Check iterable and length
if not isinstance(select_fit_duts, Iterable):
raise ValueError("Parameter select_fit_duts is not an iterable.")
elif not select_fit_duts: # empty iterable
raise ValueError("Parameter select_fit_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), select_fit_duts)):
select_fit_duts = [select_fit_duts[:] for _ in select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_fit_duts)):
raise ValueError("Not all items in parameter select_fit_duts are iterable.")
# Finally check length of all arrays
if len(select_fit_duts) != len(select_duts): # empty iterable
raise ValueError("Parameter select_fit_duts has the wrong length.")
for index, fit_dut in enumerate(select_fit_duts):
if len(fit_dut) < 2: # check the length of the items
raise ValueError("Item in parameter select_fit_duts has length < 2.")
if set(fit_dut) - set(select_hit_duts[index]): # fit DUTs are required to have a hit
raise ValueError("DUT in select_fit_duts is not in select_hit_duts.")
# Create chi2 array
if not isinstance(track_chi2, Iterable):
track_chi2 = [track_chi2] * len(select_duts)
# Finally check length
if len(track_chi2) != len(select_duts):
raise ValueError("Parameter track_chi2 has the wrong length.")
# expand dimensions
# Check iterable and length for each item
for index, chi2 in enumerate(track_chi2):
# Check if non-iterable
if not isinstance(chi2, Iterable):
track_chi2[index] = [chi2] * len(select_duts[index])
# again check for consistency
for index, chi2 in enumerate(track_chi2):
# Check iterable and length
if not isinstance(chi2, Iterable):
raise ValueError("Item in parameter track_chi2 is not an iterable.")
if len(chi2) != len(select_duts[index]): # empty iterable
raise ValueError("Item in parameter track_chi2 has the wrong length.")
# Create cluster shape selection
if cluster_shapes is None: # If None: set default value for all DUTs
cluster_shapes = [cluster_shapes] * len(select_duts)
# Check iterable and length
if not isinstance(cluster_shapes, Iterable):
raise ValueError("Parameter cluster_shapes is not an iterable.")
# elif not cluster_shapes: # empty iterable
# raise ValueError("Parameter cluster_shapes has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable) and val is not None, cluster_shapes)):
cluster_shapes = [cluster_shapes[:] for _ in select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable) or val is None, cluster_shapes)):
raise ValueError("Not all items in parameter cluster_shapes are iterable or None.")
# Finally check length of all arrays
if len(cluster_shapes) != len(select_duts): # empty iterable
raise ValueError("Parameter cluster_shapes has the wrong length.")
# expand dimensions
# Check iterable and length for each item
for index, shapes in enumerate(cluster_shapes):
# Check if only non-iterable in iterable
if shapes is None:
cluster_shapes[index] = [shapes] * len(select_duts[index])
elif all(map(lambda val: not isinstance(val, Iterable) and val is not None, shapes)):
cluster_shapes[index] = [shapes[:] for _ in select_duts[index]]
# again check for consistency
for index, shapes in enumerate(cluster_shapes):
# Check iterable and length
if not isinstance(shapes, Iterable):
raise ValueError("Item in parameter cluster_shapes is not an iterable.")
elif not shapes: # empty iterable
raise ValueError("Item in parameter cluster_shapes has no items.")
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable) or val is None, shapes)):
raise ValueError("Not all items of item in cluster_shapes are iterable or None.")
if len(shapes) != len(select_duts[index]): # empty iterable
raise ValueError("Item in parameter cluster_shapes has the wrong length.")
# Create quality distance
if isinstance(quality_distances, tuple) or quality_distances is None:
quality_distances = [quality_distances] * n_duts
# Check iterable and length
if not isinstance(quality_distances, Iterable):
raise ValueError("Parameter quality_distances is not an iterable.")
elif not quality_distances: # empty iterable
raise ValueError("Parameter quality_distances has no items.")
# Finally check length of all arrays
if len(quality_distances) != n_duts: # empty iterable
raise ValueError("Parameter quality_distances has the wrong length.")
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable) or val is None, quality_distances)):
raise ValueError("Not all items in parameter quality_distances are iterable or None.")
# Finally check length of all arrays
for distance in quality_distances:
if distance is not None and len(distance) != 2: # check the length of the items
raise ValueError("Item in parameter quality_distances has length != 2.")
# Create reject quality distance
if isinstance(isolation_distances, tuple) or isolation_distances is None:
isolation_distances = [isolation_distances] * n_duts
# Check iterable and length
if not isinstance(isolation_distances, Iterable):
raise ValueError("Parameter isolation_distances is no iterable.")
elif not isolation_distances: # empty iterable
raise ValueError("Parameter isolation_distances has no items.")
# Finally check length of all arrays
if len(isolation_distances) != n_duts: # empty iterable
raise ValueError("Parameter isolation_distances has the wrong length.")
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable) or val is None, isolation_distances)):
raise ValueError("Not all items in Parameter isolation_distances are iterable or None.")
# Finally check length of all arrays
for distance in isolation_distances:
if distance is not None and len(distance) != 2: # check the length of the items
raise ValueError("Item in parameter isolation_distances has length != 2.")
if not isinstance(max_iterations, Iterable):
max_iterations = [max_iterations] * len(select_duts)
# Finally check length of all arrays
if len(max_iterations) != len(select_duts): # empty iterable
raise ValueError("Parameter max_iterations has the wrong length.")
if not isinstance(max_events, Iterable):
max_events = [max_events] * len(select_duts)
# Finally check length
if len(max_events) != len(select_duts):
raise ValueError("Parameter max_events has the wrong length.")
if output_telescope_configuration is None:
if 'prealigned' in telescope_configuration:
output_telescope_configuration = telescope_configuration.replace('prealigned', 'aligned')
else:
output_telescope_configuration = os.path.splitext(telescope_configuration)[0] + '_aligned.yaml'
elif output_telescope_configuration == telescope_configuration:
raise ValueError('Output telescope configuration file must be different from input telescope configuration file.')
if os.path.isfile(output_telescope_configuration):
logging.info('Output telescope configuration file already exists. Keeping telescope configuration file.')
aligned_telescope = Telescope(configuration_file=output_telescope_configuration)
# For the case where not all DUTs are aligned,
# only revert the alignment for the DUTs that will be aligned.
for align_duts in select_duts:
for dut in align_duts:
aligned_telescope[dut] = telescope[dut]
aligned_telescope.save_configuration()
else:
telescope.save_configuration(configuration_file=output_telescope_configuration)
prealigned_track_candidates_file = os.path.splitext(input_merged_file)[0] + '_track_candidates_prealigned_tmp.h5'
# clean up remaining files
if os.path.isfile(prealigned_track_candidates_file):
os.remove(prealigned_track_candidates_file)
for index, align_duts in enumerate(select_duts):
# Find pre-aligned tracks for the 1st step of the alignment.
# This file can be used for different sets of alignment DUTs,
# so keep the file and remove later.
if not os.path.isfile(prealigned_track_candidates_file):
logging.info('= Alignment step 1: Finding pre-aligned tracks =')
find_tracks(
telescope_configuration=telescope_configuration,
input_merged_file=input_merged_file,
output_track_candidates_file=prealigned_track_candidates_file,
select_extrapolation_duts=select_extrapolation_duts,
align_to_beam=True,
max_events=None)
logging.info('== Aligning %d DUTs: %s ==', len(align_duts), ", ".join(telescope[dut_index].name for dut_index in align_duts))
_duts_alignment(
output_telescope_configuration=output_telescope_configuration, # aligned configuration
merged_file=input_merged_file,
prealigned_track_candidates_file=prealigned_track_candidates_file,
align_duts=align_duts,
alignment_parameters=alignment_parameters[index],
select_telescope_duts=select_telescope_duts,
select_extrapolation_duts=select_extrapolation_duts,
select_fit_duts=select_fit_duts[index],
select_hit_duts=select_hit_duts[index],
max_iterations=max_iterations[index],
max_events=max_events[index],
fit_method=fit_method,
beam_energy=beam_energy,
particle_mass=particle_mass,
scattering_planes=scattering_planes,
track_chi2=track_chi2[index],
cluster_shapes=cluster_shapes[index],
quality_distances=quality_distances,
isolation_distances=isolation_distances,
use_limits=use_limits,
plot=plot,
chunk_size=chunk_size)
if os.path.isfile(prealigned_track_candidates_file):
os.remove(prealigned_track_candidates_file)
return output_telescope_configuration
def align_kalman(telescope_configuration, input_merged_file, output_telescope_configuration=None, output_alignment_file=None, select_duts=None, alignment_parameters=None, select_telescope_duts=None, select_extrapolation_duts=None, select_fit_duts=None, select_hit_duts=None, max_events=None, beam_energy=None, particle_mass=None, scattering_planes=None, track_chi2=10.0, cluster_shapes=None, annealing_factor=10000, annealing_tracks=5000, max_tracks=10000, alignment_parameters_errors=None, use_limits=True, plot=True, chunk_size=1000):
''' This function does an alignment of the DUTs and sets translation and rotation values for all DUTs.
The reference DUT defines the global coordinate system position at 0, 0, 0 and should be well in the beam and not heavily rotated.
To solve the chicken-and-egg problem that a good dut alignment needs hits belonging to one track, but good track finding needs a good dut alignment this
function work only on already prealigned hits belonging to one track. Thus this function can be called only after track finding.
These steps are done
1. Take the found tracks and revert the pre-alignment
2. Take the track hits belonging to one track and fit tracks for all DUTs
3. Calculate the residuals for each DUT
4. Deduce rotations from the residuals and apply them to the hits
5. Deduce the translation of each plane
6. Store and apply the new alignment
repeat step 3 - 6 until the total residual does not decrease (RMS_total = sqrt(RMS_x_1^2 + RMS_y_1^2 + RMS_x_2^2 + RMS_y_2^2 + ...))
Parameters
----------
telescope_configuration : string
Filename of the telescope configuration file.
input_merged_file : string
Filename of the input merged file.
output_telescope_configuration : string
Filename of the output telescope configuration file.
select_duts : iterable or iterable of iterable
The combination of duts that are algined at once. One should always align the high resolution planes first.
E.g. for a telesope (first and last 3 planes) with 2 devices in the center (3, 4):
select_duts=[[0, 1, 2, 5, 6, 7], # align the telescope planes first
[4], # align first DUT
[3]] # align second DUT
alignment_parameters : list of lists of strings
The list of alignment parameters for each align_dut. Valid parameters:
- translation_x: horizontal axis
- translation_y: vertical axis
- translation_z: beam axis
- rotation_alpha: rotation around x-axis
- rotation_beta: rotation around y-axis
- rotation_gamma: rotation around z-axis (beam axis)
If None, all paramters will be selected.
select_telescope_duts : iterable
The given DUTs will be used to align the telescope along the z-axis.
Usually the coordinates of these DUTs are well specified.
At least 2 DUTs need to be specified. The z-position of the selected DUTs will not be changed by default.
select_extrapolation_duts : list
The given DUTs will be used for track extrapolation for improving track finding efficiency.
In some rare cases, removing DUTs with a coarse resolution might improve track finding efficiency.
If None, select all DUTs.
If list is empty or has a single entry, disable extrapolation (at least 2 DUTs are required for extrapolation to work).
select_fit_duts : iterable or iterable of iterable
Defines for each select_duts combination wich devices to use in the track fit.
E.g. To use only the telescope planes (first and last 3 planes) but not the 2 center devices
select_fit_duts=[0, 1, 2, 5, 6, 7]
select_hit_duts : iterable or iterable of iterable
Defines for each select_duts combination wich devices must have a hit to use the track for fitting. The hit
does not have to be used in the fit itself! This is useful for time reference planes.
E.g. To use telescope planes (first and last 3 planes) + time reference plane (3)
select_hit_duts = [0, 1, 2, 4, 5, 6, 7]
max_iterations : uint
Maximum number of iterations of calc residuals, apply rotation refit loop until constant result is expected.
Usually the procedure converges rather fast (< 5 iterations).
Non-telescope DUTs usually require 2 itearations.
max_events: uint
Radomly select max_events for alignment. If None, use all events, which might slow down the alignment.
fit_method : string
Available methods are 'kalman', which uses a Kalman Filter for track calculation, and 'fit', which uses a simple
straight line fit for track calculation.
beam_energy : float
Energy of the beam in MeV, e.g., 2500.0 MeV for ELSA beam. Only used for the Kalman Filter.
particle_mass : float
Mass of the particle in MeV, e.g., 0.511 MeV for electrons. Only used for the Kalman Filter.
scattering_planes : list or dict
Specifies additional scattering planes in case of DUTs which are not used or additional material in the way of the tracks.
The list must contain dictionaries containing the following keys:
material_budget: material budget of the scattering plane
translation_x/translation_y/translation_z: x/y/z position of the plane (in um)
rotation_alpha/rotation_beta/rotation_gamma: alpha/beta/gamma angle of scattering plane (in radians)
The material budget is defined as the thickness devided by the radiation length.
If scattering_planes is None, no scattering plane will be added.
track_chi2 : float or list
Setting the limit on the track chi^2. If None or 0.0, no cut will be applied.
A smaller value reduces the number of tracks for the alignment.
A large value increases the number of tracks but at the cost of alignment efficiency bacause of potentially bad tracks.
A good start value is 5.0 to 10.0 for high energy beams and 15.0 to 50.0 for low energy beams.
cluster_shapes : iterable or iterable of iterables
List of cluster shapes (unsigned integer) for each DUT. Only the selected cluster shapes will be used for the alignment.
Cluster shapes have impact on precision of the alignment. Larger clusters and certain cluster shapes can have a significant uncertainty for the hit position.
If None, use default cluster shapes [1, 3, 5, 13, 14, 7, 11, 15], i.e. 1x1, 2x1, 1x2, 3-pixel cluster, 4-pixel cluster. If empty list, all cluster sizes will be used.
The cluster shape can be calculated with the help of beam_telescope_analysis.tools.analysis_utils.calculate_cluster_array/calculate_cluster_shape.
quality_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the quality flag. The selected track and corresponding hit
must have a smaller distance to have the quality flag to be set to 1.
The purpose of quality_distances is to find good tracks for the alignment.
A good start value is 1-2x the pixel pitch for large pixels and high-energy beams and 5-10x the pixel pitch for small pixels and low-energy beams.
A too small value will remove good tracks, a too large value will allow bad tracks to contribute to the alignment.
If None, set distance to infinite.
isolation_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the isolated track/hit flag. Any other occurence of tracks or hits from the same event
within this distance will prevent the flag from beeing set.
The purpose of isolation_distances is to find good tracks for the alignment. Hits and tracks which are too close to each other should be removed.
The value given by isolation_distances should be larger than the quality_distances value to be effective,
A too small value will remove almost no tracks, a too large value will remove good tracks.
If None, set distance to 0.
isolation_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the quality flag. Any other occurence of tracks or hits from the same event
within this distance will reject the quality flag.
The purpose of isolation_distances is to remove tracks from alignment that could be potentially fake tracks (noisy detector / high beam density).
If None, use infinite distance.
use_limits : bool
If True, use column and row limits from pre-alignment for selecting the data.
plot : bool
If True, create additional output plots.
chunk_size : uint
Chunk size of the data when reading from file.
'''
telescope = Telescope(telescope_configuration)
n_duts = len(telescope)
logging.info('=== Alignment of %d DUTs ===' % len(set(np.unique(np.hstack(np.array(select_duts))).tolist())))
# Create list with combinations of DUTs to align
if select_duts is None: # If None: align all DUTs
select_duts = list(range(n_duts))
# Check for value errors
if not isinstance(select_duts, Iterable):
raise ValueError("Parameter select_duts is not an iterable.")
elif not select_duts: # empty iterable
raise ValueError("Parameter select_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), select_duts)):
select_duts = [select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_duts)):
raise ValueError("Not all items in parameter select_duts are iterable.")
# Finally check length of all iterables in iterable
for dut in select_duts:
if not dut: # check the length of the items
raise ValueError("Item in parameter select_duts has length 0.")
# Check if some DUTs will not be aligned
non_select_duts = set(range(n_duts)) - set(np.unique(np.hstack(np.array(select_duts))).tolist())
if non_select_duts:
logging.info('These DUTs will not be aligned: %s' % ", ".join(telescope[dut_index].name for dut_index in non_select_duts))
# Create list
if alignment_parameters is None:
alignment_parameters = [[None] * len(duts) for duts in select_duts]
# Check for value errors
if not isinstance(alignment_parameters, Iterable):
raise ValueError("Parameter alignment_parameters is not an iterable.")
elif not alignment_parameters: # empty iterable
raise ValueError("Parameter alignment_parameters has no items.")
# Finally check length of all arrays
if len(alignment_parameters) != len(select_duts): # empty iterable
raise ValueError("Parameter alignment_parameters has the wrong length.")
# for index, alignment_parameter in enumerate(alignment_parameters):
# if alignment_parameter is None:
# alignment_parameters[index] = [None] * len(select_duts[index])
# if len(alignment_parameters[index]) != len(select_duts[index]): # check the length of the items
# raise ValueError("Item in parameter alignment_parameter has the wrong length.")
# Create track, hit selection
if select_hit_duts is None: # If None: use all DUTs
select_hit_duts = []
# copy each item
for duts in select_duts:
select_hit_duts.append(duts[:]) # require a hit for each fit DUT
# Check iterable and length
if not isinstance(select_hit_duts, Iterable):
raise ValueError("Parameter select_hit_duts is not an iterable.")
elif not select_hit_duts: # empty iterable
raise ValueError("Parameter select_hit_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), select_hit_duts)):
select_hit_duts = [select_hit_duts[:] for _ in select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_hit_duts)):
raise ValueError("Not all items in parameter select_hit_duts are iterable.")
# Finally check length of all arrays
if len(select_hit_duts) != len(select_duts): # empty iterable
raise ValueError("Parameter select_hit_duts has the wrong length.")
for hit_dut in select_hit_duts:
if len(hit_dut) < 2: # check the length of the items
raise ValueError("Item in parameter select_hit_duts has length < 2.")
# Create track, hit selection
if select_fit_duts is None: # If None: use all DUTs
select_fit_duts = []
# copy each item from select_hit_duts
for hit_duts in select_hit_duts:
select_fit_duts.append(hit_duts[:]) # require a hit for each fit DUT
# Check iterable and length
if not isinstance(select_fit_duts, Iterable):
raise ValueError("Parameter select_fit_duts is not an iterable.")
elif not select_fit_duts: # empty iterable
raise ValueError("Parameter select_fit_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable), select_fit_duts)):
select_fit_duts = [select_fit_duts[:] for _ in select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_fit_duts)):
raise ValueError("Not all items in parameter select_fit_duts are iterable.")
# Finally check length of all arrays
if len(select_fit_duts) != len(select_duts): # empty iterable
raise ValueError("Parameter select_fit_duts has the wrong length.")
for index, fit_dut in enumerate(select_fit_duts):
if len(fit_dut) < 2: # check the length of the items
raise ValueError("Item in parameter select_fit_duts has length < 2.")
if set(fit_dut) - set(select_hit_duts[index]): # fit DUTs are required to have a hit
raise ValueError("DUT in select_fit_duts is not in select_hit_duts.")
# Create cluster shape selection
if cluster_shapes is None: # If None: set default value for all DUTs
cluster_shapes = [cluster_shapes] * len(select_duts)
# Check iterable and length
if not isinstance(cluster_shapes, Iterable):
raise ValueError("Parameter cluster_shapes is not an iterable.")
# elif not cluster_shapes: # empty iterable
# raise ValueError("Parameter cluster_shapes has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable) and val is not None, cluster_shapes)):
cluster_shapes = [cluster_shapes[:] for _ in select_duts]
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable) or val is None, cluster_shapes)):
raise ValueError("Not all items in parameter cluster_shapes are iterable or None.")
# Finally check length of all arrays
if len(cluster_shapes) != len(select_duts): # empty iterable
raise ValueError("Parameter cluster_shapes has the wrong length.")
# expand dimensions
# Check iterable and length for each item
for index, shapes in enumerate(cluster_shapes):
# Check if only non-iterable in iterable
if shapes is None:
cluster_shapes[index] = [shapes] * len(select_duts[index])
elif all(map(lambda val: not isinstance(val, Iterable) and val is not None, shapes)):
cluster_shapes[index] = [shapes[:] for _ in select_duts[index]]
# again check for consistency
for index, shapes in enumerate(cluster_shapes):
# Check iterable and length
if not isinstance(shapes, Iterable):
raise ValueError("Item in parameter cluster_shapes is not an iterable.")
elif not shapes: # empty iterable
raise ValueError("Item in parameter cluster_shapes has no items.")
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable) or val is None, shapes)):
raise ValueError("Not all items of item in cluster_shapes are iterable or None.")
if len(shapes) != len(select_duts[index]): # empty iterable
raise ValueError("Item in parameter cluster_shapes has the wrong length.")
if not isinstance(max_events, Iterable):
max_events = [max_events] * len(select_duts)
# Finally check length
if len(max_events) != len(select_duts):
raise ValueError("Parameter max_events has the wrong length.")
if not isinstance(max_tracks, Iterable):
max_tracks = [max_tracks] * len(select_duts)
# Finally check length
if len(max_tracks) != len(select_duts):
raise ValueError("Parameter max_tracks has the wrong length.")
if output_telescope_configuration is None:
if 'prealigned' in telescope_configuration:
output_telescope_configuration = telescope_configuration.replace('prealigned', 'aligned_kalman')
else:
output_telescope_configuration = os.path.splitext(telescope_configuration)[0] + '_aligned_kalman.yaml'
elif output_telescope_configuration == telescope_configuration:
raise ValueError('Output telescope configuration file must be different from input telescope configuration file.')
if os.path.isfile(output_telescope_configuration):
logging.info('Output telescope configuration file already exists. Keeping telescope configuration file.')
aligned_telescope = Telescope(configuration_file=output_telescope_configuration)
# For the case where not all DUTs are aligned,
# only revert the alignment for the DUTs that will be aligned.
for align_duts in select_duts:
for dut in align_duts:
aligned_telescope[dut] = telescope[dut]
aligned_telescope.save_configuration()
else:
telescope.save_configuration(configuration_file=output_telescope_configuration)
if output_alignment_file is None:
output_alignment_file = os.path.splitext(input_merged_file)[0] + '_KFA_alignment.h5'
else:
output_alignment_file = output_alignment_file
for index, align_duts in enumerate(select_duts):
# Find pre-aligned tracks for the 1st step of the alignment.
# This file can be used for different sets of alignment DUTs,
# so keep the file and remove later.
prealigned_track_candidates_file = os.path.splitext(input_merged_file)[0] + '_track_candidates_prealigned_%i_tmp.h5' % index
find_tracks(
telescope_configuration=telescope_configuration,
input_merged_file=input_merged_file,
output_track_candidates_file=prealigned_track_candidates_file,
select_extrapolation_duts=select_extrapolation_duts,
align_to_beam=True,
max_events=max_events[index])
logging.info('== Aligning %d DUTs: %s ==', len(align_duts), ", ".join(telescope[dut_index].name for dut_index in align_duts))
_duts_alignment_kalman(
telescope_configuration=output_telescope_configuration, # aligned configuration
output_alignment_file=output_alignment_file,
input_track_candidates_file=prealigned_track_candidates_file,
select_duts=align_duts,
alignment_parameters=alignment_parameters[index],
select_telescope_duts=select_telescope_duts,
select_fit_duts=select_fit_duts[index],
select_hit_duts=select_hit_duts[index],
beam_energy=beam_energy,
particle_mass=particle_mass,
scattering_planes=scattering_planes,
track_chi2=track_chi2[index],
annealing_factor=annealing_factor,
annealing_tracks=annealing_tracks,
max_tracks=max_tracks[index],
alignment_parameters_errors=alignment_parameters_errors,
use_limits=use_limits,
plot=plot,
chunk_size=chunk_size,
iteration_index=index)
return output_telescope_configuration
def _duts_alignment(output_telescope_configuration, merged_file, align_duts, prealigned_track_candidates_file, alignment_parameters, select_telescope_duts, select_extrapolation_duts, select_fit_duts, select_hit_duts, max_iterations, max_events, fit_method, beam_energy, particle_mass, scattering_planes, track_chi2, cluster_shapes, quality_distances, isolation_distances, use_limits, plot=True, chunk_size=100000): # Called for each list of DUTs to align
alignment_duts = "_".join(str(dut) for dut in align_duts)
aligned_telescope = Telescope(configuration_file=output_telescope_configuration)
output_track_candidates_file = None
iteration_steps = range(max_iterations)
for iteration_step in iteration_steps:
# aligning telescope DUTs to the beam axis (z-axis)
if set(align_duts) & set(select_telescope_duts):
align_telescope(
telescope_configuration=output_telescope_configuration,
select_telescope_duts=list(set(align_duts) & set(select_telescope_duts)))
actual_align_duts = align_duts
actual_fit_duts = select_fit_duts
# reqire hits in each DUT that will be aligned
actual_hit_duts = [list(set(select_hit_duts) | set([dut_index])) for dut_index in actual_align_duts]
actual_quality_duts = actual_hit_duts
fit_quality_distances = np.zeros_like(quality_distances)
for index, item in enumerate(quality_distances):
if index in align_duts:
fit_quality_distances[index, 0] = np.linspace(item[0] * 1.08447**max_iterations, item[0], max_iterations)[iteration_step]
fit_quality_distances[index, 1] = np.linspace(item[1] * 1.08447**max_iterations, item[1], max_iterations)[iteration_step]
else:
fit_quality_distances[index, 0] = item[0]
fit_quality_distances[index, 1] = item[1]
fit_quality_distances = fit_quality_distances.tolist()
if iteration_step > 0:
logging.info('= Alignment step 1 - iteration %d: Finding tracks for %d DUTs =', iteration_step, len(align_duts))
# remove temporary file
if output_track_candidates_file is not None:
os.remove(output_track_candidates_file)
output_track_candidates_file = os.path.splitext(merged_file)[0] + '_track_candidates_aligned_duts_%s_tmp_%d.h5' % (alignment_duts, iteration_step)
find_tracks(
telescope_configuration=output_telescope_configuration,
input_merged_file=merged_file,
output_track_candidates_file=output_track_candidates_file,
select_extrapolation_duts=select_extrapolation_duts,
align_to_beam=True,
max_events=max_events)
# The quality flag of the actual align DUT depends on the alignment calculated
# in the previous iteration, therefore this step has to be done every time
logging.info('= Alignment step 2 - iteration %d: Fitting tracks for %d DUTs =', iteration_step, len(align_duts))
output_tracks_file = os.path.splitext(merged_file)[0] + '_tracks_aligned_duts_%s_tmp_%d.h5' % (alignment_duts, iteration_step)
fit_tracks(
telescope_configuration=output_telescope_configuration,
input_track_candidates_file=prealigned_track_candidates_file if iteration_step == 0 else output_track_candidates_file,
output_tracks_file=output_tracks_file,
max_events=None if iteration_step > 0 else max_events,
select_duts=actual_align_duts,
select_fit_duts=actual_fit_duts,
select_hit_duts=actual_hit_duts,
exclude_dut_hit=False, # for biased residuals
select_align_duts=actual_align_duts, # correct residual offset for align DUTs
method=fit_method,
beam_energy=beam_energy,
particle_mass=particle_mass,
scattering_planes=scattering_planes,
quality_distances=quality_distances,
isolation_distances=isolation_distances,
use_limits=use_limits,
plot=plot,
chunk_size=chunk_size)
logging.info('= Alignment step 3a - iteration %d: Selecting tracks for %d DUTs =', iteration_step, len(align_duts))
output_selected_tracks_file = os.path.splitext(merged_file)[0] + '_tracks_aligned_selected_tracks_duts_%s_tmp_%d.h5' % (alignment_duts, iteration_step)
# generate query for select_tracks
# generate default selection of cluster shapes: 1x1, 2x1, 1x2, 3-pixel cluster, 4-pixel cluster
for index, shapes in enumerate(cluster_shapes):
if shapes is None:
cluster_shapes[index] = default_cluster_shapes
query_string = [((('(track_chi_red < %f)' % track_chi2[index]) if track_chi2[index] else '') + (' & ' if (track_chi2[index] and cluster_shapes[index]) else '') + (('(' + ' | '.join([('(cluster_shape_dut_{0} == %d)' % cluster_shape) for cluster_shape in cluster_shapes[index]]).format(dut_index) + ')') if cluster_shapes[index] else '')) for index, dut_index in enumerate(actual_align_duts)]
data_selection.select_tracks(
telescope_configuration=output_telescope_configuration,
input_tracks_file=output_tracks_file,
output_tracks_file=output_selected_tracks_file,
select_duts=actual_align_duts,
select_hit_duts=actual_hit_duts,
select_quality_duts=actual_quality_duts,
select_isolated_track_duts=actual_quality_duts,
select_isolated_hit_duts=actual_quality_duts,
query=query_string,
max_events=None,
chunk_size=chunk_size)
# if fit DUTs were aligned, update telescope alignment
if set(align_duts) & set(select_fit_duts):
logging.info('= Alignment step 3b - iteration %d: Aligning telescope =', iteration_step)
output_track_angles_file = os.path.splitext(merged_file)[0] + '_tracks_angles_aligned_selected_tracks_duts_%s_tmp_%d.h5' % (alignment_duts, iteration_step)
histogram_track_angle(
telescope_configuration=output_telescope_configuration,
input_tracks_file=output_selected_tracks_file,
output_track_angle_file=output_track_angles_file,
select_duts=actual_align_duts,
n_bins=100,
plot=plot)
# Read and store beam angle to improve track finding
if (set(align_duts) & set(select_fit_duts)):
with tb.open_file(output_track_angles_file, mode="r") as in_file_h5:
if not np.isnan(in_file_h5.root.Global_alpha_track_angle_hist.attrs.mean) and not np.isnan(in_file_h5.root.Global_beta_track_angle_hist.attrs.mean):
aligned_telescope = Telescope(configuration_file=output_telescope_configuration)
aligned_telescope.rotation_alpha = in_file_h5.root.Global_alpha_track_angle_hist.attrs.mean
aligned_telescope.rotation_beta = in_file_h5.root.Global_beta_track_angle_hist.attrs.mean
aligned_telescope.save_configuration()
else:
logging.warning("Cannot read track angle histograms, track finding might be spoiled")
os.remove(output_track_angles_file)
if plot:
logging.info('= Alignment step 3c - iteration %d: Calculating residuals =', iteration_step)
output_residuals_file = os.path.splitext(merged_file)[0] + '_residuals_aligned_selected_tracks_%s_tmp_%d.h5' % (alignment_duts, iteration_step)
calculate_residuals(
telescope_configuration=output_telescope_configuration,
input_tracks_file=output_selected_tracks_file,
output_residuals_file=output_residuals_file,
select_duts=actual_align_duts,
use_limits=use_limits,
plot=True,
chunk_size=chunk_size)
os.remove(output_residuals_file)
logging.info('= Alignment step 4 - iteration %d: Calculating transformation matrix for %d DUTs =', iteration_step, len(align_duts))
calculate_transformation(
telescope_configuration=output_telescope_configuration,
input_tracks_file=output_selected_tracks_file,
select_duts=actual_align_duts,
select_alignment_parameters=[(["translation_x", "translation_y", "rotation_alpha", "rotation_beta", "rotation_gamma"] if (dut_index in select_telescope_duts and (alignment_parameters is None or alignment_parameters[i] is None)) else (default_alignment_parameters if (alignment_parameters is None or alignment_parameters[i] is None) else alignment_parameters[i])) for i, dut_index in enumerate(actual_align_duts)],
use_limits=use_limits,
max_iterations=100,
chunk_size=chunk_size)
# Delete temporary files
os.remove(output_tracks_file)
os.remove(output_selected_tracks_file)
# Delete temporary files
if output_track_candidates_file is not None:
os.remove(output_track_candidates_file)
def _duts_alignment_kalman(telescope_configuration, output_alignment_file, input_track_candidates_file, alignment_parameters, select_telescope_duts, select_duts=None, select_hit_duts=None, select_fit_duts=None, min_track_hits=None, beam_energy=2500, particle_mass=0.511, scattering_planes=None, track_chi2=25.0, use_limits=True, iteration_index=0, exclude_dut_hit=False, annealing_factor=10000, annealing_tracks=5000, max_tracks=10000, alignment_parameters_errors=None, plot=True, chunk_size=1000):
'''Calculate tracks and set tracks quality flag for selected DUTs.
Two methods are available to generate tracks: a linear fit (method="fit") and a Kalman Filter (method="kalman").
Parameters
----------
telescope_configuration : string
Filename of the telescope configuration file.
input_track_candidates_file : string
Filename of the input track candidate file.
output_tracks_file : string
Filename of the output tracks file.
max_events : uint
Maximum number of randomly chosen events. If None, all events are taken.
select_duts : list
Specify the fit DUTs for which tracks will be fitted and a track array will be generated.
If None, for all DUTs are selected.
select_hit_duts : list or list of lists
Specifying DUTs that are required to have a hit for each selected DUT.
If None, no DUT is required to have a hit.
select_fit_duts : list or list of lists
Specifying DUTs that are used for the track fit for each selected DUT.
If None, all DUTs are used for the track fit.
Note: This parameter needs to be set correctly. Usually not all available DUTs should be used for track fitting.
The list usually only contains DUTs, which are part of the telescope.
min_track_hits : uint or list
Minimum number of track hits for each selected DUT.
If None or list item is None, the minimum number of track hits is the length of select_fit_duts.
exclude_dut_hit : bool or list
Decide whether or not to use hits in the actual fit DUT for track fitting (for unconstrained residuals).
If False (default), use all DUTs as specified in select_fit_duts and use them for track fitting if hits are available (potentially constrained residuals).
If True, do not use hits form the actual fit DUT for track fitting, even if specified in select_fit_duts (unconstrained residuals).
method : string
Available methods are 'kalman', which uses a Kalman Filter for track calculation, and 'fit', which uses a simple
straight line fit for track calculation.
beam_energy : float
Energy of the beam in MeV, e.g., 2500.0 MeV for ELSA beam. Only used for the Kalman Filter.
particle_mass : float
Mass of the particle in MeV, e.g., 0.511 MeV for electrons. Only used for the Kalman Filter.
scattering_planes : list of Dut objects
Specifies additional scattering planes in case of DUTs which are not used or additional material in the way of the tracks.
Scattering planes must contain the following attributes:
name: name of the scattering plane
material_budget: material budget of the scattering plane
translation_x/translation_y/translation_z: x/y/z position of the plane (in um)
rotation_alpha/rotation_beta/rotation_gamma: alpha/beta/gamma angle of scattering plane (in radians)
The material budget is defined as the thickness devided by the radiation length.
If scattering_planes is None, no scattering plane will be added. Only available when using the Kalman Filter.
See the example on how to create scattering planes in the example script folder.
quality_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the quality flag. The selected track and corresponding hit
must have a smaller distance to have the quality flag to be set to 1.
If None, set distance to infinite.
isolation_distances : 2-tuple or list of 2-tuples
X and y distance (in um) for each DUT to calculate the isolated track/hit flag. Any other occurence of tracks or hits from the same event
within this distance will prevent the flag from beeing set.
If None, set distance to 0.
use_limits : bool
If True, use column and row limits from pre-alignment for selecting the data.
keep_data : bool
Keep all track candidates in data and add track info only to fitted tracks. Necessary for purity calculations.
full_track_info : bool
If True, the track vector and position of all DUTs is appended to track table in order to get the full track information.
If False, only the track vector and position of the actual fit DUT is appended to track table.
chunk_size : uint
Chunk size of the data when reading from file.
Returns
-------
output_tracks_file : string
Filename of the output tracks file.
'''
def _store_alignment_data(alignment_values, n_tracks_processed, chi2s, chi2s_probs, deviation_cuts):
''' Helper function to write alignment data to output file.
'''
# Do not forget to save configuration to .yaml file.
telescope.save_configuration()
# Store alignment results in file
for dut_index, _ in enumerate(telescope):
try: # Check if table exists already, then append data
alignment_table = out_file_h5.get_node('/Alignment_DUT%i' % dut_index)
except tb.NoSuchNodeError: # Table does not exist, thus create new
alignment_table = out_file_h5.create_table(
where=out_file_h5.root,
name='Alignment_DUT%i' % dut_index,
description=alignment_values[dut_index].dtype,
title='Alignment_DUT%i' % dut_index,
filters=tb.Filters(
complib='blosc',
complevel=5,
fletcher32=False))
alignment_table.append(alignment_values[dut_index])
alignment_table.attrs.deviation_cuts = deviation_cuts
alignment_table.attrs.n_tracks_processed = n_tracks_processed[dut_index]
alignment_table.flush()
# Store chi2 values
try: # Check if table exists already, then append data
out_chi2s = out_file_h5.get_node('/TrackChi2')
out_chi2s_probs = out_file_h5.get_node('/TrackpValue')
except tb.NoSuchNodeError: # Table does not exist, thus create new
out_chi2s = out_file_h5.create_earray(
where=out_file_h5.root,
name='TrackChi2',
title='Track Chi2',
atom=tb.Atom.from_dtype(chi2s.dtype),
shape=(0,),
filters=tb.Filters(
complib='blosc',
complevel=5,
fletcher32=False))
out_chi2s_probs = out_file_h5.create_earray(
where=out_file_h5.root,
name='TrackpValue',
title='Track pValue',
atom=tb.Atom.from_dtype(chi2s.dtype),
shape=(0,),
filters=tb.Filters(
complib='blosc',
complevel=5,
fletcher32=False))
out_chi2s.append(chi2s)
out_chi2s.flush()
out_chi2s_probs.append(chi2s_probs)
out_chi2s_probs.flush()
out_chi2s.attrs.max_track_chi2 = track_chi2
def _alignment_loop(actual_align_state, actual_align_cov, initial_rotation_matrix, initial_position_vector):
''' Helper function which loops over track chunks and performs the alignnment.
'''
# Init progressbar
n_tracks = in_file_h5.root.TrackCandidates.shape[0]
pbar = tqdm(total=n_tracks, ncols=80)
# Number of processed tracks for every DUT
n_tracks_processed = np.zeros(shape=(len(telescope)), dtype=np.int)
# Number of tracks fulfilling hit requirement
total_n_tracks_valid_hits = 0
# Maximum allowed relative change for each alignment parameter. Can be adjusted.
deviation_cuts = [0.05, 0.05, 0.05, 0.05, 0.05, 0.05]
alpha = np.zeros(shape=len(telescope), dtype=np.float64) # annealing factor
# Loop in chunks over tracks. After each chunk, alignment values are stored.
for track_candidates_chunk, index_chunk in analysis_utils.data_aligned_at_events(in_file_h5.root.TrackCandidates, chunk_size=10000):
# Select only tracks for which hit requirement is fulfilled
track_candidates_chunk_valid_hits = track_candidates_chunk[track_candidates_chunk['hit_flag'] & dut_hit_mask == dut_hit_mask]
total_n_tracks_valid_hits_chunk = track_candidates_chunk_valid_hits.shape[0]
total_n_tracks_valid_hits += total_n_tracks_valid_hits_chunk
# Per chunk variables
chi2s = np.zeros(shape=(total_n_tracks_valid_hits_chunk), dtype=np.float64) # track chi2s
chi2s_probs = np.zeros(shape=(total_n_tracks_valid_hits_chunk), dtype=np.float64) # track pvalues
alignment_values = np.full(shape=(len(telescope), total_n_tracks_valid_hits_chunk), dtype=kfa_alignment_descr, fill_value=np.nan) # alignment values
# Loop over tracks in chunk
for track_index, track in enumerate(track_candidates_chunk_valid_hits):
track_hits = np.full((1, n_duts, 6), fill_value=np.nan, dtype=np.float64)
# Compute aligned position and apply the alignment
for dut_index, dut in enumerate(telescope):
# Get local track hits
track_hits[:, dut_index, 0] = track['x_dut_%s' % dut_index]
track_hits[:, dut_index, 1] = track['y_dut_%s' % dut_index]
track_hits[:, dut_index, 2] = track['z_dut_%s' % dut_index]
track_hits[:, dut_index, 3] = track['x_err_dut_%s' % dut_index]
track_hits[:, dut_index, 4] = track['y_err_dut_%s' % dut_index]
track_hits[:, dut_index, 5] = track['z_err_dut_%s' % dut_index]
# Calculate new alignment (takes initial alignment and actual *change* of parameters)
new_rotation_matrix, new_position_vector = _update_alignment(initial_rotation_matrix[dut_index], initial_position_vector[dut_index], actual_align_state[dut_index])
# Get euler angles from rotation matrix
alpha_average, beta_average, gamma_average = geometry_utils.euler_angles(R=new_rotation_matrix)
# Set new alignment to DUT
dut._translation_x = float(new_position_vector[0])
dut._translation_y = float(new_position_vector[1])
dut._translation_z = float(new_position_vector[2])
dut._rotation_alpha = float(alpha_average)
dut._rotation_beta = float(beta_average)
dut._rotation_gamma = float(gamma_average)
alignment_values[dut_index, track_index]['translation_x'] = dut.translation_x
alignment_values[dut_index, track_index]['translation_y'] = dut.translation_y
alignment_values[dut_index, track_index]['translation_z'] = dut.translation_z
alignment_values[dut_index, track_index]['rotation_alpha'] = dut.rotation_alpha
alignment_values[dut_index, track_index]['rotation_beta'] = dut.rotation_beta
alignment_values[dut_index, track_index]['rotation_gamma'] = dut.rotation_gamma
C = actual_align_cov[dut_index]
alignment_values[dut_index, track_index]['translation_x_err'] = np.sqrt(C[0, 0])
alignment_values[dut_index, track_index]['translation_y_err'] = np.sqrt(C[1, 1])
alignment_values[dut_index, track_index]['translation_z_err'] = np.sqrt(C[2, 2])
alignment_values[dut_index, track_index]['rotation_alpha_err'] = np.sqrt(C[3, 3])
alignment_values[dut_index, track_index]['rotation_beta_err'] = np.sqrt(C[4, 4])
alignment_values[dut_index, track_index]['rotation_gamma_err'] = np.sqrt(C[5, 5])
# Calculate deterministic annealing (scaling factor for covariance matrix) in order to take into account misalignment
alpha[dut_index] = _calculate_annealing(k=n_tracks_processed[dut_index], annealing_factor=annealing_factor, annealing_tracks=annealing_tracks)
# Store annealing factor
alignment_values[dut_index, track_index]['annealing_factor'] = alpha[dut_index]
# Run Kalman Filter
try:
offsets, slopes, chi2s_reg, chi2s_red, chi2s_prob, x_err, y_err, cov, cov_obs, obs_mat = _fit_tracks_kalman_loop(track_hits, telescope, fit_duts, beam_energy, particle_mass, scattering_planes, alpha)
except Exception as e:
print(e, 'TRACK FITTING')
continue
# Store chi2 and pvalue
chi2s[track_index] = chi2s_red
chi2s_probs[track_index] = chi2s_prob
# Data quality check I: Check chi2 of track
if chi2s_red > track_chi2:
continue
# Actual track states
p0 = np.column_stack((offsets[0, :, 0], offsets[0, :, 1],
slopes[0, :, 0], slopes[0, :, 1]))
# Covariance matrix (x, y, dx, dy) of track estimates
C0 = cov[0, :, :, :]
# Covariance matrix (x, y, dx, dy) of observations
V = cov_obs[0, :, :, :]
# Measurement matrix
H = obs_mat[0, :, :, :]
# Actual alignment parameters and its covariance
a0 = actual_align_state.copy()
E0 = actual_align_cov.copy()
# Updated alignment parameters and its covariance
E1 = np.zeros_like(E0)
a1 = np.zeros_like(a0)
# Update all alignables
actual_align_state, actual_align_cov, alignment_values, n_tracks_processed = _update_alignment_parameters(
telescope, H, V, C0, p0, a0, E0, track_hits, a1, E1,
alignment_values, deviation_cuts,
actual_align_state, actual_align_cov, n_tracks_processed, track_index)
# Reached number of max. specified tracks. Stop alignment
if n_tracks_processed.min() > max_tracks:
pbar.update(track_index)
pbar.write('Processed {0} tracks (per DUT) out of {1} tracks'.format(n_tracks_processed, total_n_tracks_valid_hits))
pbar.close()
logging.info('Maximum number of tracks reached! Stopping alignment...')
# Store alignment data
_store_alignment_data(alignment_values[:, :track_index + 1], n_tracks_processed, chi2s[:track_index + 1], chi2s_probs[:track_index + 1], deviation_cuts)
return
pbar.update(track_candidates_chunk.shape[0])
pbar.write('Processed {0} tracks (per DUT) out of {1} tracks'.format(n_tracks_processed, total_n_tracks_valid_hits))
# Store alignment data
_store_alignment_data(alignment_values, n_tracks_processed, chi2s, chi2s_probs, deviation_cuts)
pbar.close()
telescope = Telescope(telescope_configuration)
n_duts = len(telescope)
logging.info('= Alignment step 2 - Fitting tracks for %d DUTs =', len(select_duts))
if iteration_index == 0: # clean up before starting alignment. In case different sets of DUTs are aligned after each other only clean up once.
if os.path.exists(output_alignment_file):
os.remove(output_alignment_file)
logging.info('=== Fitting tracks of %d DUTs ===' % n_duts)
if not beam_energy:
raise ValueError('Beam energy not given (in MeV).')
if not particle_mass:
raise ValueError('Particle mass not given (in MeV).')
if select_duts is None:
select_duts = list(range(n_duts)) # standard setting: fit tracks for all DUTs
elif not isinstance(select_duts, Iterable):
select_duts = [select_duts]
# Check for duplicates
if len(select_duts) != len(set(select_duts)):
raise ValueError("Found douplicate in select_duts.")
# Check if any iterable in iterable
if any(map(lambda val: isinstance(val, Iterable), select_duts)):
raise ValueError("Item in parameter select_duts is iterable.")
# Create track, hit selection
if select_fit_duts is None: # If None: use all DUTs
select_fit_duts = list(range(n_duts))
# # copy each item
# for hit_duts in select_hit_duts:
# select_fit_duts.append(hit_duts[:]) # require a hit for each fit DUT
# Check iterable and length
if not isinstance(select_fit_duts, Iterable):
raise ValueError("Parameter select_fit_duts is not an iterable.")
elif not select_fit_duts: # empty iterable
raise ValueError("Parameter select_fit_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable) and val is not None, select_fit_duts)):
select_fit_duts = [select_fit_duts[:] for _ in select_duts]
# if None use all DUTs
for index, item in enumerate(select_fit_duts):
if item is None:
select_fit_duts[index] = list(range(n_duts))
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_fit_duts)):
raise ValueError("Not all items in parameter select_fit_duts are iterable.")
# Finally check length of all arrays
if len(select_fit_duts) != len(select_duts): # empty iterable
raise ValueError("Parameter select_fit_duts has the wrong length.")
for index, fit_dut in enumerate(select_fit_duts):
if len(fit_dut) < 2: # check the length of the items
raise ValueError("Item in parameter select_fit_duts has length < 2.")
# Create track, hit selection
if select_hit_duts is None: # If None, require no hit
# select_hit_duts = list(range(n_duts))
select_hit_duts = []
# Check iterable and length
if not isinstance(select_hit_duts, Iterable):
raise ValueError("Parameter select_hit_duts is not an iterable.")
# elif not select_hit_duts: # empty iterable
# raise ValueError("Parameter select_hit_duts has no items.")
# Check if only non-iterable in iterable
if all(map(lambda val: not isinstance(val, Iterable) and val is not None, select_hit_duts)):
select_hit_duts = [select_hit_duts[:] for _ in select_duts]
# If None, require no hit
for index, item in enumerate(select_hit_duts):
if item is None:
select_hit_duts[index] = []
# Check if only iterable in iterable
if not all(map(lambda val: isinstance(val, Iterable), select_hit_duts)):
raise ValueError("Not all items in parameter select_hit_duts are iterable.")
# Finally check length of all arrays
if len(select_hit_duts) != len(select_duts): # empty iterable
raise ValueError("Parameter select_hit_duts has the wrong length.")
# Check iterable and length
if not isinstance(exclude_dut_hit, Iterable):
exclude_dut_hit = [exclude_dut_hit] * len(select_duts)
elif not exclude_dut_hit: # empty iterable
raise ValueError("Parameter exclude_dut_hit has no items.")
# Finally check length of all array
if len(exclude_dut_hit) != len(select_duts): # empty iterable
raise ValueError("Parameter exclude_dut_hit has the wrong length.")
# Check if only bools in iterable
if not all(map(lambda val: isinstance(val, (bool,)), exclude_dut_hit)):
raise ValueError("Not all items in parameter exclude_dut_hit are boolean.")
# Check iterable and length
if not isinstance(min_track_hits, Iterable):
min_track_hits = [min_track_hits] * len(select_duts)
# Finally check length of all arrays
if len(min_track_hits) != len(select_duts): # empty iterable
raise ValueError("Parameter min_track_hits has the wrong length.")
fitted_duts = []
with tb.open_file(input_track_candidates_file, mode='r') as in_file_h5:
with tb.open_file(output_alignment_file, mode='a') as out_file_h5:
for fit_dut_index, actual_fit_dut in enumerate(select_duts): # Loop over the DUTs where tracks shall be fitted for
# Test whether other DUTs have identical tracks
# if yes, save some CPU time and fit only once.
# This following list contains all DUT indices that will be fitted
# during this step of the loop.
if actual_fit_dut in fitted_duts:
continue
# calculate all DUTs with identical tracks to save processing time
actual_fit_duts = []
for curr_fit_dut_index, curr_fit_dut in enumerate(select_duts):
if (curr_fit_dut == actual_fit_dut or
(((exclude_dut_hit[curr_fit_dut_index] is False and exclude_dut_hit[fit_dut_index] is False and set(select_fit_duts[curr_fit_dut_index]) == set(select_fit_duts[fit_dut_index])) or
(exclude_dut_hit[curr_fit_dut_index] is False and exclude_dut_hit[fit_dut_index] is True and set(select_fit_duts[curr_fit_dut_index]) == (set(select_fit_duts[fit_dut_index]) - set([actual_fit_dut]))) or
(exclude_dut_hit[curr_fit_dut_index] is True and exclude_dut_hit[fit_dut_index] is False and (set(select_fit_duts[curr_fit_dut_index]) - set([curr_fit_dut])) == set(select_fit_duts[fit_dut_index])) or
(exclude_dut_hit[curr_fit_dut_index] is True and exclude_dut_hit[fit_dut_index] is True and (set(select_fit_duts[curr_fit_dut_index]) - set([curr_fit_dut])) == (set(select_fit_duts[fit_dut_index]) - set([actual_fit_dut])))) and
set(select_hit_duts[curr_fit_dut_index]) == set(select_hit_duts[fit_dut_index]) and
min_track_hits[curr_fit_dut_index] == min_track_hits[fit_dut_index])):
actual_fit_duts.append(curr_fit_dut)
# continue with fitting
logging.info('== Fit tracks for %s ==', ', '.join([telescope[curr_dut].name for curr_dut in actual_fit_duts]))
# select hit DUTs based on input parameters
# hit DUTs are always enforced
hit_duts = select_hit_duts[fit_dut_index]
dut_hit_mask = 0 # DUTs required to have hits
for dut_index in hit_duts:
dut_hit_mask |= ((1 << dut_index))
logging.info('Require hits in %d DUTs for track selection: %s', len(hit_duts), ', '.join([telescope[curr_dut].name for curr_dut in hit_duts]))
# select fit DUTs based on input parameters
# exclude actual DUTs from fit DUTs if exclude_dut_hit parameter is set (for, e.g., unbiased residuals)
fit_duts = list(set(select_fit_duts[fit_dut_index]) - set([actual_fit_dut])) if exclude_dut_hit[fit_dut_index] else select_fit_duts[fit_dut_index]
if min_track_hits[fit_dut_index] is None:
actual_min_track_hits = len(fit_duts)
else:
actual_min_track_hits = min_track_hits[fit_dut_index]
if actual_min_track_hits < 2:
raise ValueError('The number of required hits is smaller than 2. Cannot fit tracks for %s.', telescope[actual_fit_dut].name)
dut_fit_mask = 0 # DUTs to be used for the fit
for dut_index in fit_duts:
dut_fit_mask |= ((1 << dut_index))
if actual_min_track_hits > len(fit_duts):
raise RuntimeError("min_track_hits for DUT%d is larger than the number of fit DUTs" % (actual_fit_dut,))
logging.info('Require at least %d hits in %d DUTs for track selection: %s', actual_min_track_hits, len(fit_duts), ', '.join([telescope[curr_dut].name for curr_dut in fit_duts]))
if scattering_planes is not None:
logging.info('Adding the following scattering planes: %s', ', '.join([scp.name for scp in scattering_planes]))
# Actual *change* of alignment parameters and covariance
actual_align_state = np.zeros(shape=(len(telescope), 6), dtype=np.float64) # No change at beginning
actual_align_cov = np.zeros(shape=(len(telescope), 6, 6), dtype=np.float64)
# Calculate initial alignment
initial_rotation_matrix, initial_position_vector, actual_align_cov = _calculate_initial_alignment(telescope, select_duts, select_telescope_duts, alignment_parameters, actual_align_cov, alignment_parameters_errors)
# Loop over tracks in chunks and perform alignment.
_alignment_loop(actual_align_state, actual_align_cov, initial_rotation_matrix, initial_position_vector)
fitted_duts.extend(actual_fit_duts)
output_pdf_file = output_alignment_file[:-3] + '.pdf'
# Plot alignment result
plot_utils.plot_kf_alignment(output_alignment_file, telescope, output_pdf_file)
# Delete tmp track candidates file
os.remove(input_track_candidates_file)
def align_telescope(telescope_configuration, select_telescope_duts, reference_dut=None):
telescope = Telescope(telescope_configuration)
logging.info('= Beam-alignment of the telescope =')
logging.info('Use %d DUTs for beam-alignment: %s', len(select_telescope_duts), ', '.join([telescope[index].name for index in select_telescope_duts]))
telescope_duts_positions = np.full((len(select_telescope_duts), 3), fill_value=np.nan, dtype=np.float64)
for index, dut_index in enumerate(select_telescope_duts):
telescope_duts_positions[index, 0] = telescope[dut_index].translation_x
telescope_duts_positions[index, 1] = telescope[dut_index].translation_y
telescope_duts_positions[index, 2] = telescope[dut_index].translation_z
# the x and y translation for the reference DUT will be set to 0
if reference_dut is not None:
first_telescope_dut_index = reference_dut
else:
# calculate reference DUT, use DUT with the smallest z position
first_telescope_dut_index = select_telescope_duts[np.argmin(telescope_duts_positions[:, 2])]
offset, slope = line_fit_3d(positions=telescope_duts_positions)
first_telescope_dut = telescope[first_telescope_dut_index]
logging.info('Reference DUT for beam-alignment: %s', first_telescope_dut.name)
first_dut_translation_x = first_telescope_dut.translation_x
first_dut_translation_y = first_telescope_dut.translation_y
first_telescope_dut_intersection = geometry_utils.get_line_intersections_with_dut(
line_origins=offset[np.newaxis, :],
line_directions=slope[np.newaxis, :],
translation_x=first_telescope_dut.translation_x,
translation_y=first_telescope_dut.translation_y,
translation_z=first_telescope_dut.translation_z,
rotation_alpha=first_telescope_dut.rotation_alpha,
rotation_beta=first_telescope_dut.rotation_beta,
rotation_gamma=first_telescope_dut.rotation_gamma)
for actual_dut in telescope:
dut_intersection = geometry_utils.get_line_intersections_with_dut(
line_origins=offset[np.newaxis, :],
line_directions=slope[np.newaxis, :],
translation_x=actual_dut.translation_x,
translation_y=actual_dut.translation_y,
translation_z=actual_dut.translation_z,
rotation_alpha=actual_dut.rotation_alpha,
rotation_beta=actual_dut.rotation_beta,
rotation_gamma=actual_dut.rotation_gamma)
actual_dut.translation_x -= (dut_intersection[0, 0] - first_telescope_dut_intersection[0, 0] + first_dut_translation_x)
actual_dut.translation_y -= (dut_intersection[0, 1] - first_telescope_dut_intersection[0, 1] + first_dut_translation_y)
# set telescope alpha/beta rotation for a better beam alignment and track finding improvement
# this is compensating the previously made changes to the DUT coordinates
total_angles, alpha_angles, beta_angles = get_angles(
slopes=slope[np.newaxis, :],
xz_plane_normal=np.array([0.0, 1.0, 0.0]),
yz_plane_normal=np.array([1.0, 0.0, 0.0]),
dut_plane_normal=np.array([0.0, 0.0, 1.0]))
telescope.rotation_alpha -= alpha_angles[0]
telescope.rotation_beta -= beta_angles[0]
telescope.save_configuration()
def calculate_transformation(telescope_configuration, input_tracks_file, select_duts, select_alignment_parameters=None, use_limits=True, max_iterations=None, chunk_size=1000000):
'''Takes the tracks and calculates and stores the transformation parameters.
Parameters
----------
telescope_configuration : string
Filename of the telescope configuration file.
input_tracks_file : string
Filename of the input tracks file.
select_duts : list
Selecting DUTs that will be processed.
select_alignment_parameters : list
Selecting the transformation parameters that will be stored to the telescope configuration file for each selected DUT.
If None, all 6 transformation parameters will be calculated.
use_limits : bool
If True, use column and row limits from pre-alignment for selecting the data.
chunk_size : int
Chunk size of the data when reading from file.
'''
telescope = Telescope(telescope_configuration)
logging.info('== Calculating transformation for %d DUTs ==' % len(select_duts))
if select_alignment_parameters is None:
select_alignment_parameters = [default_alignment_parameters] * len(select_duts)
if len(select_duts) != len(select_alignment_parameters):
raise ValueError("Parameter select_alignment_parameters has the wrong length.")
for index, actual_alignment_parameters in enumerate(select_alignment_parameters):
if actual_alignment_parameters is None:
select_alignment_parameters[index] = default_alignment_parameters
else:
non_valid_paramters = set(actual_alignment_parameters) - set(default_alignment_parameters)
if non_valid_paramters:
raise ValueError("Found invalid values in parameter select_alignment_parameters: %s." % ", ".join(non_valid_paramters))
with tb.open_file(input_tracks_file, mode='r') as in_file_h5:
for index, actual_dut_index in enumerate(select_duts):
actual_dut = telescope[actual_dut_index]
node = in_file_h5.get_node(in_file_h5.root, 'Tracks_DUT%d' % actual_dut_index)
logging.info('= Calculate transformation for %s =', actual_dut.name)
logging.info("Modify alignment parameters: %s", ', '.join([alignment_paramter for alignment_paramter in select_alignment_parameters[index]]))
if use_limits:
limit_x_local = actual_dut.x_limit # (lower limit, upper limit)
limit_y_local = actual_dut.y_limit # (lower limit, upper limit)
else:
limit_x_local = None
limit_y_local = None
rotation_average = None
translation_average = None
# euler_angles_average = None
# calculate equal chunk size
start_val = max(int(node.nrows / chunk_size), 2)
while True:
chunk_indices = np.linspace(0, node.nrows, start_val).astype(np.int)
if np.all(np.diff(chunk_indices) <= chunk_size):
break
start_val += 1
chunk_index = 0
n_tracks = 0
total_n_tracks = 0
while chunk_indices[chunk_index] < node.nrows:
tracks_chunk = node.read(start=chunk_indices[chunk_index], stop=chunk_indices[chunk_index + 1])
# select good hits and tracks
selection = np.logical_and(~np.isnan(tracks_chunk['x_dut_%d' % actual_dut_index]), ~np.isnan(tracks_chunk['track_chi2']))
tracks_chunk = tracks_chunk[selection] # Take only tracks where actual dut has a hit, otherwise residual wrong
# Coordinates in global coordinate system (x, y, z)
hit_x_local, hit_y_local, hit_z_local = tracks_chunk['x_dut_%d' % actual_dut_index], tracks_chunk['y_dut_%d' % actual_dut_index], tracks_chunk['z_dut_%d' % actual_dut_index]
offsets = np.column_stack(actual_dut.local_to_global_position(
x=tracks_chunk['offset_x'],
y=tracks_chunk['offset_y'],
z=tracks_chunk['offset_z']))
slopes = np.column_stack(actual_dut.local_to_global_position(
x=tracks_chunk['slope_x'],
y=tracks_chunk['slope_y'],
z=tracks_chunk['slope_z'],
translation_x=0.0,
translation_y=0.0,
translation_z=0.0,
rotation_alpha=actual_dut.rotation_alpha,
rotation_beta=actual_dut.rotation_beta,
rotation_gamma=actual_dut.rotation_gamma))
if not np.allclose(hit_z_local, 0.0):
raise RuntimeError("Transformation into local coordinate system gives z != 0")
limit_xy_local_sel = np.ones_like(hit_x_local, dtype=np.bool)
if limit_x_local is not None and np.isfinite(limit_x_local[0]):
limit_xy_local_sel &= hit_x_local >= limit_x_local[0]
if limit_x_local is not None and np.isfinite(limit_x_local[1]):
limit_xy_local_sel &= hit_x_local <= limit_x_local[1]
if limit_y_local is not None and np.isfinite(limit_y_local[0]):
limit_xy_local_sel &= hit_y_local >= limit_y_local[0]
if limit_y_local is not None and np.isfinite(limit_y_local[1]):
limit_xy_local_sel &= hit_y_local <= limit_y_local[1]
hit_x_local = hit_x_local[limit_xy_local_sel]
hit_y_local = hit_y_local[limit_xy_local_sel]
hit_z_local = hit_z_local[limit_xy_local_sel]
hit_local = np.column_stack([hit_x_local, hit_y_local, hit_z_local])
slopes = slopes[limit_xy_local_sel]
offsets = offsets[limit_xy_local_sel]
n_tracks = np.count_nonzero(limit_xy_local_sel)
x_dut_start = actual_dut.translation_x
y_dut_start = actual_dut.translation_y
z_dut_start = actual_dut.translation_z
alpha_dut_start = actual_dut.rotation_alpha
beta_dut_start = actual_dut.rotation_beta
gamma_dut_start = actual_dut.rotation_gamma
delta_t = 0.9 # TODO: optimize
if max_iterations is None:
iterations = 100
else:
iterations = max_iterations
lin_alpha = 1.0
initialize_angles = True
translation_old = None
rotation_old = None
for i in range(iterations):
if initialize_angles:
alpha, beta, gamma = alpha_dut_start, beta_dut_start, gamma_dut_start
rotation = geometry_utils.rotation_matrix(
alpha=alpha,
beta=beta,
gamma=gamma)
translation = np.array([x_dut_start, y_dut_start, z_dut_start], dtype=np.float64)
initialize_angles = False
# start_delta_t = 0.01
# stop_delta_t = 1.0
# delta_t = start_delta_t * np.exp(i * (np.log(stop_delta_t) - np.log(start_delta_t)) / iterations)
tot_matr = None
tot_b = None
identity = np.identity(3, dtype=np.float64)
n_identity = -np.identity(3, dtype=np.float64)
# vectorized calculation of matrix and b vector
outer_prod_slopes = np.einsum('ij,ik->ijk', slopes, slopes)
l_matr = identity - outer_prod_slopes
R_y = np.matmul(rotation, hit_local.T).T
skew_R_y = geometry_utils.skew(R_y)
tot_matr = np.concatenate([np.matmul(l_matr, skew_R_y), np.matmul(l_matr, n_identity)], axis=2)
tot_matr = tot_matr.reshape(-1, tot_matr.shape[2])
tot_b = np.matmul(l_matr, np.expand_dims(offsets, axis=2)) - np.matmul(l_matr, np.expand_dims(R_y + translation, axis=2))
tot_b = tot_b.reshape(-1)
# iterative calculation of matrix and b vector
# for count in range(len(hit_x_local)):
# if count >= max_n_tracks:
# count = count - 1
# break
# slope = slopes[count]
# l_matr = identity - np.outer(slope, slope)
# p = offsets[count]
# y = hit_local[count]
# R_y = np.dot(rotation_dut, y)
# b = np.dot(l_matr, p) - np.dot(l_matr, (R_y + translation))
# if tot_b is None:
# tot_b = b
# else:
# tot_b = np.hstack([tot_b, b])
# skew_R_y = geometry_utils.skew(R_y)
# matr = np.dot(l_matr, np.hstack([skew_R_y, n_identity]))
# if tot_matr is None:
# tot_matr = matr
# else:
# tot_matr = np.vstack([tot_matr, matr])
# SVD
u, s, v = np.linalg.svd(tot_matr, full_matrices=False)
diag = np.diag(s ** -1)
tot_matr_inv = np.dot(v.T, np.dot(diag, u.T))
omega_b_dot = np.dot(tot_matr_inv, -lin_alpha * tot_b)
# Some alignment parameters can be fixed to initial values
# If parameter is not in list, set infinitesimal change to zero
# Note: An impact on the rotation parameters cannot be completely
# avoided because of the orthogonalization of the rotation matrix
if 'translation_x' not in select_alignment_parameters[index]:
omega_b_dot[3] = 0.0
if 'translation_y' not in select_alignment_parameters[index]:
omega_b_dot[4] = 0.0
if 'translation_z' not in select_alignment_parameters[index]:
omega_b_dot[5] = 0.0
if 'rotation_alpha' not in select_alignment_parameters[index]:
omega_b_dot[0] = 0.0
if 'rotation_beta' not in select_alignment_parameters[index]:
omega_b_dot[1] = 0.0
if 'rotation_gamma' not in select_alignment_parameters[index]:
omega_b_dot[2] = 0.0
translation_old2 = translation_old
rotation_old2 = rotation_old
translation_old = translation
rotation_old = rotation
rotation = np.dot((np.identity(3, dtype=np.float64) + delta_t * geometry_utils.skew(omega_b_dot[:3])), rotation)
# apply UP (polar) decomposition to normalize/orthogonalize rotation matrix (det = 1)
u_rot, _, v_rot = np.linalg.svd(rotation, full_matrices=False)
rotation = np.dot(u_rot, v_rot) # orthogonal matrix U
translation = translation + delta_t * omega_b_dot[3:]
if i >= 2:
allclose_trans = np.allclose(translation, translation_old, rtol=0.0, atol=1e-4)
allclose_rot = np.allclose(rotation, rotation_old, rtol=0.0, atol=1e-5)
allclose_trans2 = np.allclose(translation, translation_old2, rtol=0.0, atol=1e-3)
allclose_rot2 = np.allclose(rotation, rotation_old2, rtol=0.0, atol=1e-4)
# exit if paramters are more or less constant
if (allclose_rot and allclose_trans):
break
# change to smaller step size for smaller transformation parameters
# and check for oscillating result (every second result is identical)
elif (allclose_rot2 or allclose_trans2 or allclose_rot or allclose_trans):
delta_t = 0.3
if translation_average is None:
translation_average = translation
else:
translation_average = np.average([translation, translation_average], weights=[n_tracks, total_n_tracks], axis=0)
# alpha, beta, gamma = geometry_utils.euler_angles(R=rotation)
# euler_angles = np.array([alpha, beta, gamma], dtype=np.float64)
# if euler_angles_average is None:
# euler_angles_average = euler_angles
# else:
# euler_angles_average = np.average([euler_angles, euler_angles_average], weights=[n_tracks, total_n_tracks], axis=0)
if rotation_average is None:
rotation_average = rotation
else:
rotation_average = np.average([rotation, rotation_average], weights=[n_tracks, total_n_tracks], axis=0)
total_n_tracks += n_tracks
chunk_index += 1
# average rotation matrices from different chunks
u_rot, _, v_rot = np.linalg.svd(rotation_average, full_matrices=False)
rotation_average = np.dot(u_rot, v_rot) # orthogonal matrix U
alpha_average, beta_average, gamma_average = geometry_utils.euler_angles(R=rotation)
actual_dut.translation_x = translation_average[0]
actual_dut.translation_y = translation_average[1]
actual_dut.translation_z = translation_average[2]
actual_dut.rotation_alpha = alpha_average
actual_dut.rotation_beta = beta_average
actual_dut.rotation_gamma = gamma_average
telescope.save_configuration()
def _calculate_initial_alignment(telescope, select_duts, select_telescope_duts, alignment_parameters, actual_align_cov, alignment_parameters_errors):
''' Calculate initial alignment parameters. Setting initial covariance to zero excludes alignment parameter from alignment.
'''
initial_rotation_matrix = np.zeros(shape=(len(telescope), 3, 3), dtype=np.float64)
initial_position_vector = np.zeros(shape=(len(telescope), 3), dtype=np.float64)
for dut_index, dut in enumerate(telescope):
# Initial (global) postion
initial_position_vector[dut_index, 0] = dut.translation_x
initial_position_vector[dut_index, 1] = dut.translation_y
initial_position_vector[dut_index, 2] = dut.translation_z
# Initial rotation matrix
initial_rotation_matrix[dut_index] = geometry_utils.rotation_matrix(
alpha=dut.rotation_alpha,
beta=dut.rotation_beta,
gamma=dut.rotation_gamma)
# Errors on initial alignment parameters
if dut_index in select_duts:
if 'translation_x' in alignment_parameters[dut_index]:
actual_align_cov[dut_index, 0, 0] = np.square(alignment_parameters_errors[0]) # 50 um error
if 'translation_y' in alignment_parameters[dut_index]:
actual_align_cov[dut_index, 1, 1] = np.square(alignment_parameters_errors[1]) # 50 um error
if 'translation_z' in alignment_parameters[dut_index]:
actual_align_cov[dut_index, 2, 2] = np.square(alignment_parameters_errors[2]) # 1 mm error
if 'rotation_alpha' in alignment_parameters[dut_index]:
actual_align_cov[dut_index, 3, 3] = np.square(alignment_parameters_errors[3]) # 20 mrad error
if 'rotation_beta' in alignment_parameters[dut_index]:
actual_align_cov[dut_index, 4, 4] = np.square(alignment_parameters_errors[4]) # 20 mrad error
if 'rotation_gamma' in alignment_parameters[dut_index]:
actual_align_cov[dut_index, 5, 5] = np.square(alignment_parameters_errors[5]) # 20 mrad error
# Fix first and last telescope plane (only rotation_gamma is not fixed).
# In principle only z needs to be fixed to avoid telescope stretching and very first plane (+ usage of beam alignment).
for k in [0, 1, 2, 3, 4]: # leave rotation_gamma floating
actual_align_cov[select_telescope_duts[0], k, k] = 0.0
actual_align_cov[select_telescope_duts[-1], k, k] = 0.0
# actual_align_cov[4, 5, 5] = 0.0 # 20 mrad error
return initial_rotation_matrix, initial_position_vector, actual_align_cov
def _update_alignment_parameters(telescope, H, V, C0, p0, a0, E0, track_hits, a1, E1, alignment_values, deviation_cuts, actual_align_state, actual_align_cov, n_tracks_processed, track_index):
''' Update alignment parameters and check for change. If rel. change too large, change is rejected.
'''
for dut_index, dut in enumerate(telescope):
R = geometry_utils.rotation_matrix(
alpha=dut.rotation_alpha,
beta=dut.rotation_beta,
gamma=dut.rotation_gamma).T
# Calculate update for alignment parameters
a1[dut_index, :], E1[dut_index, :] = _calculate_alignment_parameters(H[dut_index], V[dut_index], C0[dut_index], p0[dut_index],
a0[dut_index], E0[dut_index], R, track_hits[0, dut_index, :2])
# Data quality check II: Check change of alignment parameters
filter_track = False
for par, par_name in enumerate(['translation_x', 'translation_y', 'translation_z', 'rotation_alpha', 'rotation_beta', 'rotation_gamma']): # loop over all parameters for alignable
if deviation_cuts[par] > 0.0:
if np.sqrt(E0[dut_index, par, par]) == 0:
alignment_values[dut_index, track_index][par_name + '_delta'] = 0.0
else:
alignment_values[dut_index, track_index][par_name + '_delta'] = np.abs(a1[dut_index, par] - a0[dut_index, par]) / np.sqrt(E0[dut_index, par, par])
if np.abs(a1[dut_index, par] - a0[dut_index, par]) > deviation_cuts[par] * np.sqrt(E0[dut_index, par, par]):
filter_track = True
break
if filter_track:
continue
# Update actual alignment state
actual_align_state[dut_index] = a1[dut_index].copy()
actual_align_cov[dut_index] = E1[dut_index].copy()
n_tracks_processed[dut_index] += 1
return actual_align_state, actual_align_cov, alignment_values, n_tracks_processed
def _update_alignment(initial_rotation_matrix, initial_position_vector, actual_align_state):
''' Update alignment by applying corrections to initial alignment.
'''
# Extract alignment parameters
dx = actual_align_state[0]
dy = actual_align_state[1]
dz = actual_align_state[2]
dalpha = actual_align_state[3]
dbeta = actual_align_state[4]
dgamma = actual_align_state[5]
# Compute a 'delta' frame from corrections
delta_frame = _create_karimaki_delta(dx, dy, dz, dalpha, dbeta, dgamma)
# Merge initial alignment and corrections ('delta')
return _combine_karimaki(initial_rotation_matrix, initial_position_vector, delta_frame)
def _create_karimaki_delta(dx, dy, dz, dalpha, dbeta, dgamma):
''' Reference: https://cds.cern.ch/record/619975/files/cr03_022.pdf, Eq. 4
Use full rotation matrix here, instead of small angle approximation used in paper.
'''
# Small rotation by dalpha, dbeta, dgamma around
delta_rot = geometry_utils.rotation_matrix(
alpha=dalpha,
beta=dbeta,
gamma=dgamma).T
# Shift of sensor center by dx,dy,dz in global coord. system.
delta_offset = np.array([dx, dy, dz])
return delta_rot, delta_offset
def _combine_karimaki(initial_rotation_matrix, initial_position_vector, delta_frame):
''' Apply corrections ('delta') to initial alignment.
Reference: https://cds.cern.ch/record/619975/files/cr03_022.pdf
'''
combined_rot = np.matmul(delta_frame[0], initial_rotation_matrix)
combined_offset = delta_frame[1] + initial_position_vector
return combined_rot, combined_offset
def _calculate_annealing(k, annealing_factor, annealing_tracks):
'''Geometrical annealing scheme according to https://iopscience.iop.org/article/10.1088/0954-3899/29/3/309.
'''
if k < annealing_tracks:
alpha = annealing_factor ** ((annealing_tracks - k) / annealing_tracks)
else:
alpha = 1.0
return alpha
def _calculate_alignment_parameters(H, V, C0, p0, a0, E0, R, m):
''' Update formulars for alignment parameters and its covariance.
Reference: https://iopscience.iop.org/article/10.1088/0954-3899/29/3/309
'''
# Jacobian of aligmnent parameters
D = _jacobian_alignment(p0, R)
# Weight matrix
W = np.linalg.inv((V + np.matmul(H, np.matmul(C0, H.T)) + np.matmul(D, np.matmul(E0, D.T))))
# Update alignment parameter states and covariance
a1 = a0 + np.matmul(np.matmul(E0, np.matmul(D.T, W)), (m - np.matmul(H, p0)))
E1 = E0 - np.matmul(E0, np.matmul(D.T, np.matmul(W, np.matmul(D, E0.T))))
return a1, E1
def _jacobian_alignment(p0, R):
''' Derivative of measuremnts with respect to global alignment parameters.
Reference: https://cds.cern.ch/record/619975/files/cr03_022.pdf
'''
# Extract position and slope from track state
u = p0[0]
v = p0[1]
tu = p0[2]
tv = p0[3]
# Jacobian matrix
jaq = np.zeros(shape=(2, 6), dtype=np.float64)
jaq[0, 0] = -1.0 # dfu / ddu
jaq[1, 0] = 0.0 # dfv / ddu
jaq[0, 1] = 0.0 # dfu / ddv
jaq[1, 1] = -1.0 # dfv / ddv
jaq[0, 2] = tu # dfu / ddw
jaq[1, 2] = tv # dfv / ddw
jaq[0, 3] = -v * tu # dfu / ddalpha
jaq[1, 3] = -v * tv # dfv / ddalpha
jaq[0, 4] = u * tu # dfu / ddbeta
jaq[1, 4] = u * tv # dfv / ddbeta
jaq[0, 5] = -v # dfu / ddgamma
jaq[1, 5] = u # dfv / ddgamma
A = np.eye(6, dtype=np.float64)
A[:3, :3] = R
# Apply chain rule to transform into global coordinate system
return np.matmul(jaq, A)
|
{"hexsha": "b0d92d6ce6ddf1824814e9c83d5a5866b69734bc", "size": 129777, "ext": "py", "lang": "Python", "max_stars_repo_path": "beam_telescope_analysis/dut_alignment.py", "max_stars_repo_name": "SiLab-Bonn/beam-telescope-analysis", "max_stars_repo_head_hexsha": "69a6a79a93ddb5e32064ec61f7c5bc323165bb99", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "beam_telescope_analysis/dut_alignment.py", "max_issues_repo_name": "SiLab-Bonn/beam-telescope-analysis", "max_issues_repo_head_hexsha": "69a6a79a93ddb5e32064ec61f7c5bc323165bb99", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "beam_telescope_analysis/dut_alignment.py", "max_forks_repo_name": "SiLab-Bonn/beam-telescope-analysis", "max_forks_repo_head_hexsha": "69a6a79a93ddb5e32064ec61f7c5bc323165bb99", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.3757602085, "max_line_length": 536, "alphanum_fraction": 0.6604328964, "include": true, "reason": "import numpy,import scipy", "num_tokens": 28783}
|
import os
import warnings
import numpy as np
import pandas as pd
from tensorflow.keras import backend as K
from tensorflow.keras import Model
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.eager import monitoring
from tensorflow.python.keras.engine import training_utils #, training_v2_utils,
from keras_custom.engine import training_generator
# _keras_api_gauge = monitoring.BoolGauge('/tensorflow/api/keras',
# 'keras api usage', 'method')
class Model_custom(Model):
def fit_generator_custom(self,
generator,
steps_per_epoch=None,
epochs=1,
verbose=1,
callbacks=None,
validation_data=None,
validation_steps=None,
validation_freq=1,
train_class_weight=None,
val_class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
shuffle=True,
initial_epoch=0):
# TODO: failed to pass some monitoring checks.
# _keras_api_gauge.get_cell('fit_generator_custom').set(True)
return training_generator.fit_generator(
self,
generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=validation_steps,
validation_freq=validation_freq,
train_class_weight=train_class_weight,
val_class_weight=val_class_weight,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
shuffle=shuffle,
initial_epoch=initial_epoch,
steps_name='steps_per_epoch')
def train_on_batch_custom(self,
x,
y=None,
sample_weight=None,
class_weight=None,
reset_metrics=True,
return_dict=False):
self._assert_compile_was_called()
self._check_call_args('train_on_batch')
with self.distribute_strategy.scope(), \
training_utils.RespectCompiledTrainableState(self):
iterator = data_adapter.single_batch_iterator(
self.distribute_strategy,
x,
y,
sample_weight,
class_weight)
train_function = self.make_train_function()
logs = train_function(iterator)
if reset_metrics:
self.reset_metrics()
logs = tf_utils.to_numpy_or_python_type(logs)
if return_dict:
return logs
else:
results = [logs.get(name, None) for name in self.metrics_names]
if len(results) == 1:
return results[0]
return results
def test_on_batch_custom(self,
x,
y=None,
class_weight=None,
sample_weight=None,
reset_metrics=True,
return_dict=False):
self._assert_compile_was_called()
self._check_call_args('test_on_batch')
with self.distribute_strategy.scope():
iterator = data_adapter.single_batch_iterator(
self.distribute_strategy,
x,
y,
sample_weight,
class_weight)
test_function = self.make_test_function()
logs = test_function(iterator)
if reset_metrics:
self.reset_metrics()
logs = tf_utils.to_numpy_or_python_type(logs)
if return_dict:
return logs
else:
results = [logs.get(name, None) for name in self.metrics_names]
if len(results) == 1:
return results[0]
return results
|
{"hexsha": "5752f6143e8573e9d6d1fd5501fc53e57a33956b", "size": 4810, "ext": "py", "lang": "Python", "max_stars_repo_path": "top_down_attention/keras_custom/engine/training.py", "max_stars_repo_name": "don-tpanic/CostsBenefitsAttention", "max_stars_repo_head_hexsha": "7221b873283c210cafd54fa895ba10b418d8ca67", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-03-20T19:19:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-21T18:12:05.000Z", "max_issues_repo_path": "top_down_attention/keras_custom/engine/training.py", "max_issues_repo_name": "don-tpanic/CostsBenefitsAttention", "max_issues_repo_head_hexsha": "7221b873283c210cafd54fa895ba10b418d8ca67", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "top_down_attention/keras_custom/engine/training.py", "max_forks_repo_name": "don-tpanic/CostsBenefitsAttention", "max_forks_repo_head_hexsha": "7221b873283c210cafd54fa895ba10b418d8ca67", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.1111111111, "max_line_length": 81, "alphanum_fraction": 0.4871101871, "include": true, "reason": "import numpy", "num_tokens": 727}
|
# Linear Discriminant Analysis
#### Type to represent a linear discriminant functional
abstract type Discriminant{T} end
struct LinearDiscriminant{T<:Real} <: Discriminant{T}
w::Vector{T}
b::T
end
length(f::LinearDiscriminant) = length(f.w)
evaluate(f::LinearDiscriminant, x::AbstractVector) = dot(f.w, x) + f.b
function evaluate(f::LinearDiscriminant, X::AbstractMatrix)
R = transpose(X) * f.w
if f.b != 0
broadcast!(+, R, R, f.b)
end
return R
end
predict(f::Discriminant, x::AbstractVector) = evaluate(f, x) > 0
predict(f::Discriminant, X::AbstractMatrix) = (Y = evaluate(f, X); Bool[y > 0 for y in Y])
#### function to solve linear discriminant
function ldacov(C::DenseMatrix{T},
μp::DenseVector{T},
μn::DenseVector{T}) where T<:Real
w = cholesky(C) \ (μp - μn)
ap = w ⋅ μp
an = w ⋅ μn
c = 2 / (ap - an)
LinearDiscriminant(rmul!(w, c), 1 - c * ap)
end
ldacov(Cp::DenseMatrix{T},
Cn::DenseMatrix{T},
μp::DenseVector{T},
μn::DenseVector{T}) where T<:Real = ldacov(Cp + Cn, μp, μn)
#### interface functions
function fit(::Type{LinearDiscriminant}, Xp::DenseMatrix{T}, Xn::DenseMatrix{T};
covestimator::CovarianceEstimator = SimpleCovariance()) where T<:Real
μp = vec(mean(Xp, dims=2))
μn = vec(mean(Xn, dims=2))
Zp = Xp .- μp
Zn = Xn .- μn
Cp = calcscattermat(covestimator, Zp)
Cn = calcscattermat(covestimator, Zn)
ldacov(Cp, Cn, μp, μn)
end
#### Multiclass LDA Stats
mutable struct MulticlassLDAStats{T<:Real, M<:AbstractMatrix{T}, N<:AbstractMatrix{T}}
dim::Int # sample dimensions
nclasses::Int # number of classes
cweights::Vector{T} # class weights
tweight::T # total sample weight
mean::Vector{T} # overall sample mean
cmeans::Matrix{T} # class-specific means
Sw::M # within-class scatter matrix
Sb::N # between-class scatter matrix
end
mean(S::MulticlassLDAStats) = S.mean
classweights(S::MulticlassLDAStats) = S.cweights
classmeans(S::MulticlassLDAStats) = S.cmeans
withclass_scatter(S::MulticlassLDAStats) = S.Sw
betweenclass_scatter(S::MulticlassLDAStats) = S.Sb
function MulticlassLDAStats(cweights::Vector{T},
mean::Vector{T},
cmeans::Matrix{T},
Sw::AbstractMatrix{T},
Sb::AbstractMatrix{T}) where T<:Real
d, nc = size(cmeans)
length(mean) == d || throw(DimensionMismatch("Incorrect length of mean"))
length(cweights) == nc || throw(DimensionMismatch("Incorrect length of cweights"))
tw = sum(cweights)
size(Sw) == (d, d) || throw(DimensionMismatch("Incorrect size of Sw"))
size(Sb) == (d, d) || throw(DimensionMismatch("Incorrect size of Sb"))
MulticlassLDAStats(d, nc, cweights, tw, mean, cmeans, Sw, Sb)
end
function multiclass_lda_stats(nc::Int, X::AbstractMatrix{T}, y::AbstractVector{Int};
covestimator_within::CovarianceEstimator=SimpleCovariance(),
covestimator_between::CovarianceEstimator=SimpleCovariance()) where T<:Real
# check sizes
d = size(X, 1)
n = size(X, 2)
n ≥ nc || throw(ArgumentError("The number of samples is less than the number of classes"))
length(y) == n || throw(DimensionMismatch("Inconsistent array sizes."))
# compute class-specific weights and means
cmeans, cweights, Z = center(X, y, nc)
Sw = calcscattermat(covestimator_within, Z)
# compute between-class scattering
mean = cmeans * (cweights ./ T(n))
U = rmul!(cmeans .- mean, Diagonal(sqrt.(cweights)))
Sb = calcscattermat(covestimator_between, U)
return MulticlassLDAStats(Vector{T}(cweights), mean, cmeans, Sw, Sb)
end
#### Multiclass LDA
mutable struct MulticlassLDA{T<:Real}
proj::Matrix{T}
pmeans::Matrix{T}
stats::MulticlassLDAStats{T}
end
indim(M::MulticlassLDA) = size(M.proj, 1)
outdim(M::MulticlassLDA) = size(M.proj, 2)
projection(M::MulticlassLDA) = M.proj
mean(M::MulticlassLDA) = mean(M.stats)
classmeans(M::MulticlassLDA) = classmeans(M.stats)
classweights(M::MulticlassLDA) = classweights(M.stats)
withclass_scatter(M::MulticlassLDA) = withclass_scatter(M.stats)
betweenclass_scatter(M::MulticlassLDA) = betweenclass_scatter(M.stats)
transform(M::MulticlassLDA, x::AbstractVecOrMat{<:Real}) = M.proj'x
function fit(::Type{MulticlassLDA}, nc::Int, X::DenseMatrix{T}, y::AbstractVector{Int};
method::Symbol=:gevd,
outdim::Int=min(size(X,1), nc-1),
regcoef::T=T(1.0e-6),
covestimator_within::CovarianceEstimator=SimpleCovariance(),
covestimator_between::CovarianceEstimator=SimpleCovariance()) where T<:Real
multiclass_lda(multiclass_lda_stats(nc, X, y;
covestimator_within=covestimator_within,
covestimator_between=covestimator_between);
method=method,
regcoef=regcoef,
outdim=outdim)
end
function multiclass_lda(S::MulticlassLDAStats{T};
method::Symbol=:gevd,
outdim::Int=min(size(X,1), S.nclasses-1),
regcoef::T=T(1.0e-6)) where T<:Real
P = mclda_solve(S.Sb, S.Sw, method, outdim, regcoef)
MulticlassLDA(P, P'S.cmeans, S)
end
mclda_solve(Sb::AbstractMatrix{T}, Sw::AbstractMatrix{T}, method::Symbol, p::Int, regcoef::T) where T<:Real =
mclda_solve!(copy(Sb), copy(Sw), method, p, regcoef)
function mclda_solve!(Sb::AbstractMatrix{T},
Sw::AbstractMatrix{T},
method::Symbol, p::Int, regcoef::T) where T<:Real
p <= size(Sb, 1) || throw(ArgumentError("p cannot exceed sample dimension."))
if method == :gevd
regularize_symmat!(Sw, regcoef)
E = eigen!(Symmetric(Sb), Symmetric(Sw))
ord = sortperm(E.values; rev=true)
P = E.vectors[:, ord[1:p]]
elseif method == :whiten
W = _lda_whitening!(Sw, regcoef)
wSb = transpose(W) * (Sb * W)
Eb = eigen!(Symmetric(wSb))
ord = sortperm(Eb.values; rev=true)
P = W * Eb.vectors[:, ord[1:p]]
else
throw(ArgumentError("Invalid method name $(method)"))
end
return P::Matrix{T}
end
function _lda_whitening!(C::AbstractMatrix{T}, regcoef::T) where T<:Real
n = size(C,1)
E = eigen!(Symmetric(C))
v = E.values
a = regcoef * maximum(v)
for i = 1:n
@inbounds v[i] = 1.0 / sqrt(v[i] + a)
end
return rmul!(E.vectors, Diagonal(v))
end
#### SubspaceLDA
# When the dimensionality is much higher than the number of samples,
# it makes more sense to perform LDA on the space spanned by the
# within-group scatter.
struct SubspaceLDA{T<:Real}
projw::Matrix{T}
projLDA::Matrix{T}
λ::Vector{T}
cmeans::Matrix{T}
cweights::Vector{Int}
end
indim(M::SubspaceLDA) = size(M.projw,1)
outdim(M::SubspaceLDA) = size(M.projLDA, 2)
projection(M::SubspaceLDA) = M.projw * M.projLDA
mean(M::SubspaceLDA) = vec(sum(M.cmeans * Diagonal(M.cweights / sum(M.cweights)), dims=2))
classmeans(M::SubspaceLDA) = M.cmeans
classweights(M::SubspaceLDA) = M.cweights
transform(M::SubspaceLDA, x) = M.projLDA' * (M.projw' * x)
fit(::Type{F}, X::AbstractMatrix{T}, nc::Int, label::AbstractVector{Int}) where {T<:Real, F<:SubspaceLDA} =
fit(F, X, label, nc)
function fit(::Type{F}, X::AbstractMatrix{T},
label::AbstractVector{Int},
nc=maximum(label);
normalize::Bool=false) where {T<:Real, F<:SubspaceLDA}
d, n = size(X, 1), size(X, 2)
n ≥ nc || throw(ArgumentError("The number of samples is less than the number of classes"))
length(label) == n || throw(DimensionMismatch("Inconsistent array sizes."))
# Compute centroids, class weights, and deviation from centroids
# Note Sb = Hb*Hb', Sw = Hw*Hw'
cmeans, cweights, Hw = center(X, label, nc)
dmeans = cmeans .- (normalize ? mean(cmeans, dims=2) : cmeans * (cweights / T(n)))
Hb = normalize ? dmeans : dmeans * Diagonal(convert(Vector{T}, sqrt.(cweights)))
if normalize
Hw /= T(sqrt(n))
end
# Project to the subspace spanned by the within-class scatter
# (essentially, PCA before LDA)
Uw, Σw, _ = svd(Hw, full=false)
keep = Σw .> sqrt(eps(T)) * maximum(Σw)
projw = Uw[:,keep]
pHb = projw' * Hb
pHw = projw' * Hw
λ, G = lda_gsvd(pHb, pHw, cweights)
SubspaceLDA(projw, G, λ, cmeans, cweights)
end
# Reference: Howland & Park (2006), "Generalizing discriminant analysis
# using the generalized singular value decomposition", IEEE
# Trans. Patt. Anal. & Mach. Int., 26: 995-1006.
function lda_gsvd(Hb::AbstractMatrix{T}, Hw::AbstractMatrix{T}, cweights::AbstractVector{Int}) where T<:Real
nc = length(cweights)
K = vcat(Hb', Hw')
P, R, Q = svd(K, full=false)
keep = R .> sqrt(eps(T))*maximum(R)
R = R[keep]
Pk = P[1:nc, keep]
U, ΣA, W = svd(Pk)
ncnz = sum(cweights .> 0)
G = Q[:,keep]*(Diagonal(1 ./ R) * W[:,1:ncnz-1])
# Normalize
Gw = G' * Hw
nrm = Gw * Gw'
G = G ./ reshape(sqrt.(diag(nrm)), 1, ncnz-1)
# Also get the eigenvalues
Gw = G' * Hw
Gb = G' * Hb
λ = diag(Gb * Gb')./diag(Gw * Gw')
λ, G
end
function center(X::AbstractMatrix{T}, label::AbstractVector{Int}, nc=maximum(label)) where T<:Real
d, n = size(X,1), size(X,2)
# Calculate the class weights and means
cmeans = zeros(T, d, nc)
cweights = zeros(Int, nc)
for j = 1:n
k = label[j]
for i = 1:d
cmeans[i,k] += X[i,j]
end
cweights[k] += 1
end
for j = 1:nc
cw = cweights[j]
cw == 0 && continue
for i = 1:d
cmeans[i,j] /= cw
end
end
# Compute differences from the means
dX = Matrix{T}(undef, d, n)
for j = 1:n
k = label[j]
for i = 1:d
dX[i,j] = X[i,j] - cmeans[i,k]
end
end
cmeans, cweights, dX
end
|
{"hexsha": "46601146e7066e9e13ac6a3d7911f9a3e6ee1e6d", "size": 10150, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/lda.jl", "max_stars_repo_name": "yingqiuz/MultivariateStats.jl", "max_stars_repo_head_hexsha": "b84cbc94c04057297acc42168a58b599a984f87d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/lda.jl", "max_issues_repo_name": "yingqiuz/MultivariateStats.jl", "max_issues_repo_head_hexsha": "b84cbc94c04057297acc42168a58b599a984f87d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lda.jl", "max_forks_repo_name": "yingqiuz/MultivariateStats.jl", "max_forks_repo_head_hexsha": "b84cbc94c04057297acc42168a58b599a984f87d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7419354839, "max_line_length": 109, "alphanum_fraction": 0.6104433498, "num_tokens": 3140}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 Lee McCuller <mcculler@mit.edu>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
Calculation of the RMS of a filter with unit white noise passing through a ZPK.
This is done using an integral and residue calculus. The filter must have more
P1 than Z, unless the ZPK is in the Z domain,
where there is a natural cutoff frequency.
"""
import numpy as np
import numbers
from wavestate.bunch.depbunch import (
DepBunch,
depB_property,
# NOARG,
)
from .root_bunch import (
RBAlgorithms,
RootBunch,
root_constraints,
)
from .ratmath import (
ZPKsum,
ZPKprod,
ZPKscalarprod,
ZPKdiv,
ZPKscalardiv,
ZPKscalarsum,
ZPKdivscalar,
)
class ZPKTF(DepBunch):
RBalgo = RBAlgorithms()
root_constraint = RBalgo.root_constraints.mirror_real
def __build__(
self,
ZPK=None,
zeros=None,
poles=None,
gain=None,
F_nyquist_Hz="unknown",
**kwargs
):
super(ZPKTF, self).__build__(**kwargs)
if zeros is None and ZPK is not None:
try:
zeros = ZPK.zeros
except AttributeError:
zeros = ZPK[0]
if zeros is None:
zeros = ()
if poles is None and ZPK is not None:
try:
poles = ZPK.poles
except AttributeError:
poles = ZPK[1]
if poles is None:
poles = ()
if gain is None and ZPK is not None:
try:
gain = ZPK.gain
except AttributeError:
gain = ZPK[2]
if gain is None:
gain = 1
self.zeros = zeros
self.poles = poles
self.gain = gain
self.F_nyquist_Hz = F_nyquist_Hz
def __iter__(self):
yield self.zeros.fullplane
yield self.poles.fullplane
yield self.gain
def __str__(self):
return "ZPKTF(Z={},P={},K={})".format(
str(self.zeros), str(self.poles), str(self.gain)
)
@depB_property
def test(self, val=1):
return val
@depB_property
def gain(self, val):
return val
@depB_property
def zeros(self, val):
val = self.RBalgo.expect_atleast(val, constraint=self.root_constraint)
return val
@depB_property
def poles(self, val):
val = self.RBalgo.expect_atleast(val, constraint=self.root_constraint)
return val
@depB_property
def order(self):
return max(
len(self.poles),
len(self.zeros),
)
@depB_property
def order_sos(self):
return (
max(
len(self.poles),
len(self.zeros),
)
+ 1
) // 2
@depB_property
def order_relative(self):
return len(self.zeros) - len(self.poles)
@depB_property
def order_total(self):
return len(self.poles) + len(self.zeros)
def xfer_eval(self, F_Hz):
# TODO must add pole-zero rephasing for Z filters
# TODO, make this name consistent in all classes
if self.F_nyquist_Hz is None:
X_grid = 1j * F_Hz
else:
# use Z^-1
X_grid = np.exp(1j * np.pi * F_Hz / self.F_nyquist_Hz)
h, lnG = self.poles.val_lnG(X_grid)
h, lnG = self.zeros.val_lnG(X_grid, h=1 / h, lnG=-lnG)
return h * (np.exp(lnG) * self.gain)
def __add__(self, other):
if isinstance(other, numbers.Real):
Z3, P3, K3 = ZPKscalarsum(self, other)
F_nyquist_Hz = self.F_nyquist_Hz
elif isinstance(other, tuple):
F_nyquist_Hz = common_nyquist(self, other)
Z3, P3, K3 = ZPKsum(self, other)
else:
return NotImplemented
return self.__class__(Z3, P3, K3, F_nyquist_Hz=F_nyquist_Hz)
def __radd__(self, other):
if isinstance(other, numbers.Real):
Z3, P3, K3 = ZPKscalarsum(self, other)
F_nyquist_Hz = self.F_nyquist_Hz
elif isinstance(other, tuple):
F_nyquist_Hz = common_nyquist(other, self)
Z3, P3, K3 = ZPKsum(self, other)
else:
return NotImplemented
return self.__class__(Z3, P3, K3, F_nyquist_Hz=F_nyquist_Hz)
def __sub__(self, other):
if isinstance(other, numbers.Real):
Z3, P3, K3 = ZPKscalarsum(self, -other)
F_nyquist_Hz = self.F_nyquist_Hz
elif isinstance(other, tuple):
F_nyquist_Hz = common_nyquist(self, other)
Z2, P2, K2 = other
Z3, P3, K3 = ZPKsum(self, (Z2, P2, -K2))
else:
return NotImplemented
return self.__class__(Z3, P3, K3, F_nyquist_Hz=F_nyquist_Hz)
def __rsub__(self, other):
if isinstance(other, numbers.Real):
Z3, P3, K3 = ZPKscalarsum(self, -other)
F_nyquist_Hz = self.F_nyquist_Hz
elif isinstance(other, tuple):
F_nyquist_Hz = common_nyquist(other, self)
Z2, P2, K2 = other
Z3, P3, K3 = ZPKsum(self, (Z2, P2, -K2))
else:
return NotImplemented
return self.__class__(Z3, P3, -K3, F_nyquist_Hz=F_nyquist_Hz)
def __mul__(self, other):
if isinstance(other, numbers.Real):
Z3, P3, K3 = ZPKscalarprod(self, other)
F_nyquist_Hz = self.F_nyquist_Hz
elif isinstance(other, tuple):
F_nyquist_Hz = common_nyquist(self, other)
Z3, P3, K3 = ZPKprod(self, other)
else:
return NotImplemented
return self.__class__(Z3, P3, K3, F_nyquist_Hz=F_nyquist_Hz)
def __rmul__(self, other):
if isinstance(other, numbers.Real):
Z3, P3, K3 = ZPKscalarprod(self, other)
F_nyquist_Hz = self.F_nyquist_Hz
elif isinstance(other, tuple):
F_nyquist_Hz = common_nyquist(other, self)
Z3, P3, K3 = ZPKprod(other, self)
else:
return NotImplemented
return self.__class__(Z3, P3, K3, F_nyquist_Hz=F_nyquist_Hz)
def __truediv__(self, other):
if isinstance(other, numbers.Real):
Z3, P3, K3 = ZPKscalardiv(self, other)
F_nyquist_Hz = self.F_nyquist_Hz
elif isinstance(other, tuple):
F_nyquist_Hz = common_nyquist(self, other)
Z3, P3, K3 = ZPKdiv(self, other)
else:
return NotImplemented
return self.__class__(Z3, P3, K3, F_nyquist_Hz=F_nyquist_Hz)
def __rtruediv__(self, other):
if isinstance(other, numbers.Real):
Z3, P3, K3 = ZPKdivscalar(other, self)
F_nyquist_Hz = self.F_nyquist_Hz
elif isinstance(other, tuple):
F_nyquist_Hz = common_nyquist(other, self)
Z3, P3, K3 = ZPKdiv(other, self)
else:
return NotImplemented
return self.__class__(Z3, P3, K3, F_nyquist_Hz=F_nyquist_Hz)
# TODO, version check here?
__div__ = __truediv__
__rdiv__ = __rtruediv__
def __pow__(self, other):
if isinstance(other, numbers.Complex):
if other.imag != 0:
return NotImplemented
other = other.real
if not isinstance(other, numbers.Integral):
residual = other % 1
if residual < -1e-14 or residual > 1e-14:
return NotImplemented
other = int((other + 1e-14) // 1)
if other == 0:
return self.__class__((), (), 1)
elif other == 1:
return self
elif other == -1:
return self.__class__(
self.poles, self.zeros, 1 / self.gain, F_nyquist_Hz=self.F_nyquist_Hz
)
elif other > 1:
return self.__class__(
tuple(self.zeros) * other,
tuple(self.poles) * other,
self.gain ** other,
F_nyquist_Hz=self.F_nyquist_Hz,
)
elif other < 1:
other = -other
return self.__class__(
tuple(self.poles) * other,
tuple(self.zeros) * other,
self.gain ** (-other),
F_nyquist_Hz=self.F_nyquist_Hz,
)
def __neg__(self):
return self.__class__(
self.poles, self.zeros, -self.gain, F_nyquist_Hz=self.F_nyquist_Hz
)
def __pos__(self):
return self
def abs_sq(self, F_nyquist_Hz="unknown"):
if self.F_nyquist_Hz != "unknown":
F_nyquist_Hz = self.F_nyquist_Hz
if F_nyquist_Hz == "unknown":
raise RuntimeError(
" F_nyquist_Hz must be known to perform the correct"
" ZPK manipulation for abs_sq"
)
elif F_nyquist_Hz is None:
return self.__class__(
tuple(self.poles) + tuple(np.asarray(self.poles).conjugate()),
tuple(self.zeros) + tuple(np.asarray(self.zeros).conjugate()),
self.gain ** 2,
F_nyquist_Hz=self.F_nyquist_Hz,
)
else:
return self.__class__(
tuple(self.poles) + tuple(1 / np.asarray(self.poles)),
tuple(self.zeros) + tuple(1 / np.asarray(self.zeros)),
self.gain ** 2,
F_nyquist_Hz=self.F_nyquist_Hz,
)
def assert_F_nyquist_Hz(self, F_nyquist_Hz):
if self.F_nyquist_Hz == "unknown":
return
if self.F_nyquist_Hz != F_nyquist_Hz:
raise RuntimeError("Incompatible ZPK representations!")
return F_nyquist_Hz
def common_nyquist(LHS, RHS):
"""
Assumes LHS and RHS are a ZPK or has a
F_nyquist_Hz property.
"""
if isinstance(LHS, ZPKTF):
LHS_F_nyquist_Hz = LHS.F_nyquist_Hz
else:
LHS_F_nyquist_Hz = "unknown"
if isinstance(RHS, ZPKTF):
RHS_F_nyquist_Hz = RHS.F_nyquist_Hz
else:
RHS_F_nyquist_Hz = "unknown"
if LHS_F_nyquist_Hz == "unknown":
return RHS_F_nyquist_Hz
elif RHS_F_nyquist_Hz == "unknown":
return LHS_F_nyquist_Hz
if LHS_F_nyquist_Hz != RHS_F_nyquist_Hz:
raise RuntimeError(
(
"Math Operation should not be done on ZPKs arising from "
"different representations, LHS is {}, RHS is {}"
).format(LHS_F_nyquist_Hz, RHS_F_nyquist_Hz)
)
return LHS_F_nyquist_Hz
# TODO,
def asZPKTF(
ZPK,
complete=False,
F_nyquist_Hz=None,
delay_s=None,
):
from .zpk_with_data import ZPKwData
if isinstance(ZPK, ZPKTF):
return ZPK
elif isinstance(ZPK, (tuple, list)):
Z, P, K = ZPK
return ZPKTF(
zeros=asMRRB(Z, complete=complete),
poles=asMRRB(P, complete=complete),
gain=K,
F_nyquist_Hz=F_nyquist_Hz,
)
elif isinstance(ZPK, ZPKwData):
return ZPKTF(
zeros=ZPK.zeros * ZPK.zeros_overlay,
poles=ZPK.poles * ZPK.poles_overlay,
gain=ZPK.gain,
F_nyquist_Hz=F_nyquist_Hz,
)
# last ditch effort if it is some other wavestate.iirrational type
return asZPKTF(ZPK.ZPKrep, complete=False)
def asMRRB(
roots=None,
r=(),
c=(),
complete=False,
):
"""
Convenience Method to generate root_bunches with mirror real constraints
from raw root lists
"""
rb0 = RootBunch(r=r, c=c, constraint=root_constraints.mirror_real)
if roots is not None:
roots = np.asarray(roots)
select_real = roots.imag == 0
rr = roots[select_real]
rc = roots[~select_real]
if np.all(rc.imag < 0) and complete:
return RootBunch(
r=np.concatenate([rb0.r, rr]),
c=np.concatenate([rb0.c, rc.conjugate()]),
constraint=root_constraints.mirror_real,
)
elif np.all(rc.imag > 0) and complete:
return RootBunch(
r=np.concatenate([rb0.r, rr]),
c=np.concatenate([rb0.c, rc]),
constraint=root_constraints.mirror_real,
)
else:
rb = ZPKTF.RBalgo.expect(roots, constraint=root_constraints.mirror_real)
return rb * rb0
else:
return rb0
|
{"hexsha": "817576b09521c527afccf13d690703f05029eeb9", "size": 12575, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/wavestate/iirrational/representations/zpktf.py", "max_stars_repo_name": "wavestate/wavestate-iirrational", "max_stars_repo_head_hexsha": "01d6dba8b2131fa2a099a74f17e6540f30cee606", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/wavestate/iirrational/representations/zpktf.py", "max_issues_repo_name": "wavestate/wavestate-iirrational", "max_issues_repo_head_hexsha": "01d6dba8b2131fa2a099a74f17e6540f30cee606", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/wavestate/iirrational/representations/zpktf.py", "max_forks_repo_name": "wavestate/wavestate-iirrational", "max_forks_repo_head_hexsha": "01d6dba8b2131fa2a099a74f17e6540f30cee606", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6707317073, "max_line_length": 85, "alphanum_fraction": 0.5716898608, "include": true, "reason": "import numpy", "num_tokens": 3424}
|
import numpy as np
import tensorflow as tf
from model import GCN_LPA
def print_statistics(features, labels, adj):
n_nodes = features[2][0]
n_edges = (len(adj[0]) - labels.shape[0]) // 2
n_features = features[2][1]
n_labels = labels.shape[1]
labeled_node_rate = 20 * n_labels / n_nodes
n_intra_class_edge = 0
for i, j in adj[0]:
if i < j and np.argmax(labels[i]) == np.argmax(labels[j]):
n_intra_class_edge += 1
intra_class_edge_rate = n_intra_class_edge / n_edges
print('n_nodes: %d' % n_nodes)
print('n_edges: %d' % n_edges)
print('n_features: %d' % n_features)
print('n_labels: %d' % n_labels)
print('labeled node rate: %.4f' % labeled_node_rate)
print('intra-class edge rate: %.4f' % intra_class_edge_rate)
def train(args, data, batch_test=False):
features, labels, adj, train_mask, val_mask, test_mask = [data[i] for i in range(6)]
# uncomment the next line if you want to print statistics of the current dataset
# print_statistics(features, labels, adj)
model = GCN_LPA(args, features, labels, adj)
def get_feed_dict(mask, dropout):
feed_dict = {model.label_mask: mask, model.dropout: dropout}
return feed_dict
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
best_val_acc = 0
final_test_acc = 0
for epoch in range(args.epochs):
# train
_, train_loss, train_acc = sess.run(
[model.optimizer, model.loss, model.accuracy], feed_dict=get_feed_dict(train_mask, args.dropout))
# validation
val_loss, val_acc = sess.run([model.loss, model.accuracy], feed_dict=get_feed_dict(val_mask, 0.0))
# test
test_loss, test_acc = sess.run([model.loss, model.accuracy], feed_dict=get_feed_dict(test_mask, 0.0))
if val_acc >= best_val_acc:
best_val_acc = val_acc
final_test_acc = test_acc
if not batch_test:
print('epoch %d train loss: %.4f acc: %.4f val loss: %.4f acc: %.4f test loss: %.4f acc: %.4f'
% (epoch, train_loss, train_acc, val_loss, val_acc, test_loss, test_acc))
if not batch_test:
print('final test acc: %.4f' % final_test_acc)
else:
return final_test_acc
|
{"hexsha": "170f6e329cc1064dc67bdb2445128f0989efac20", "size": 2374, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/train.py", "max_stars_repo_name": "hwwang55/GCN-LPA", "max_stars_repo_head_hexsha": "21df23afee0912380ac682b0f80ac244140c33e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 70, "max_stars_repo_stars_event_min_datetime": "2020-02-18T09:06:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T06:05:49.000Z", "max_issues_repo_path": "src/train.py", "max_issues_repo_name": "hwwang55/GCN-LPA", "max_issues_repo_head_hexsha": "21df23afee0912380ac682b0f80ac244140c33e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-06-03T14:33:22.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-23T17:39:11.000Z", "max_forks_repo_path": "src/train.py", "max_forks_repo_name": "hwwang55/GCN-LPA", "max_forks_repo_head_hexsha": "21df23afee0912380ac682b0f80ac244140c33e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2020-04-05T10:12:42.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-03T01:15:03.000Z", "avg_line_length": 35.4328358209, "max_line_length": 119, "alphanum_fraction": 0.6225779275, "include": true, "reason": "import numpy", "num_tokens": 624}
|
import sys
import pandas as pd
import numpy as np
import re
import nltk
import pickle
from sqlalchemy import create_engine
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, accuracy_score
from sklearn.metrics.scorer import make_scorer
from sklearn.model_selection import GridSearchCV
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.tokenize import word_tokenize
def load_data(database_filepath):
"""Load processed dataset from a SQL database
Args:
database_filepath: the path to the SQL database
Returns:
DataFrame: the processed dataset
"""
engine = create_engine('sqlite:///{}'.format(database_filepath))
df = pd.read_sql_table('Data', engine)
X = df['message']
Y = df.drop(columns=['message','original','genre'])
category_names = Y.columns
return X, Y, category_names
def tokenize(text):
"""Tokenize a message
1. Replace non-alphabetical letter and non-digit by space
2. Tokenize words in the message
3. Remove stop words
4. Lemmatize word tokens
Args:
text: a text message
Returns:
List: the token list
"""
text = re.sub(r'\W', ' ', text.lower())
tokens = word_tokenize(text)
stop_words = stopwords.words('english')
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
if tok not in stop_words:
clean_tok = lemmatizer.lemmatize(tok).strip()
clean_tokens.append(clean_tok)
return clean_tokens
def average_accuracy(y_true, y_pred):
"""Custom scorer calculates the average accuracy over all categories"""
y_true = np.array(y_true).T
y_pred = np.array(y_pred).T
accuracies = []
for i in range(len(y_true)):
accuracies.append(accuracy_score(y_true[i], y_pred[i]))
return np.mean(accuracies)
def build_model():
"""Build a pipeline with GridSearchCV to train the dataset"""
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier(), n_jobs=-1))
])
parameters = {
'tfidf__norm': ['l1', 'l2'],
'clf__estimator__max_depth': [2, 5, 10, 20],
'clf__estimator__criterion': ['gini', 'entropy']
}
scorer = make_scorer(average_accuracy, greater_is_better=True)
cv = GridSearchCV(pipeline, parameters, scoring=scorer, verbose=10)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
"""Predict the test dataset and print out the precision, callback, f1-score and accuracy for each category
Args:
model: the trained model to evaluate
X_test: the test set
Y_test: the test labels
categories_name: list of categories
"""
Y_pred = model.predict(X_test)
print('average accuracy:', average_accuracy(Y_test, Y_pred))
print('---------------------')
Y_test = pd.DataFrame(Y_test, columns=category_names)
Y_pred = pd.DataFrame(Y_pred, columns=category_names)
for category in category_names:
print('category:', category)
print(classification_report(Y_test[category], Y_pred[category]))
print('---------------------')
def save_model(model, model_filepath):
"""Save the trained model
Args:
model: the trained model
model_filepath: the path to save the trained model
"""
pickle.dump(model, open(model_filepath, 'wb'))
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model.best_estimator_, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model.best_estimator_, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
|
{"hexsha": "fd4800c3dfd941068bbd9800305444bee7504a3c", "size": 4994, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/train_classifier.py", "max_stars_repo_name": "minhtuan2000/disaster-response-pipeline-project", "max_stars_repo_head_hexsha": "bef0f33e2c5a397e6525d53dc636553edd73d002", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/train_classifier.py", "max_issues_repo_name": "minhtuan2000/disaster-response-pipeline-project", "max_issues_repo_head_hexsha": "bef0f33e2c5a397e6525d53dc636553edd73d002", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/train_classifier.py", "max_forks_repo_name": "minhtuan2000/disaster-response-pipeline-project", "max_forks_repo_head_hexsha": "bef0f33e2c5a397e6525d53dc636553edd73d002", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6075949367, "max_line_length": 110, "alphanum_fraction": 0.6712054465, "include": true, "reason": "import numpy", "num_tokens": 1131}
|
from __future__ import print_function
from __future__ import with_statement
import os
import math
import numpy
from tqdm import tqdm
from PIL import Image
from configparser import ConfigParser, Error
from tigre.utilities.geometry import Geometry
def BrukerDataLoader(filepath, **kwargs):
# BrukerDataLoader(filepath) Loads Bruker Skyscan datasets into TIGRE standard
#
# BrukerDataLoader(filepath, OPT=VAL, ...) uses options and values.
# These are options in case you don't want to load the entire
# dataset, but only particular sets of projections.
# The possible arguments are:
# 'dataset_number': Some fodlers will have several scans.
# Set to 'all' to load all of them, or give a number (starting from 0)
# 'sampling': type of sampling. default 'equidistant' Can be:
# 'equidistant': equidistantly sample the entire set
# of angles. 'num_angles' should be set for partial
# loading of the data.
# 'step': sample the entire set of projections every
# 'sampling_step' angles.
# 'continous': Load the first 'num_angles' amount of
# angles only.
#
# 'num_angles': Number of total angles to load. Default all of
# them. Useful for 'equidistant' and 'continous' loading
#
# 'sampling_step': step to load when loading projections.
# Default=1. Useful for 'step' loading.
dataset_number = kwargs["dataset_number"] if "dataset_number" in kwargs else None
if dataset_number == "all":
print("Loading all scans in folder, assuming the same geometry for all \n\n")
del kwargs["dataset_number"]
num_scans = find_number_of_datasets(filepath)
angles = [None] * num_scans
projections = [None] * num_scans
for scan in range(num_scans):
print("Loading scan number " + str(scan) + "...\n")
folder, geometry, angles[scan] = read_Bruker_geometry(
filepath, dataset_number=scan, **kwargs
)
projections[scan], geometry, angles[scan] = load_Bruker_projections(
folder, geometry, angles[scan], dataset_number=scan, **kwargs
)
projections = numpy.concatenate(projections)
angles = numpy.concatenate(angles)
return projections, geometry, angles
else:
folder, geometry, angles = read_Bruker_geometry(filepath, **kwargs)
return load_Bruker_projections(folder, geometry, angles, **kwargs)
def read_Bruker_geometry(filepath, **kwargs):
# check if input was log file itself, or just the folder
if filepath.endswith(".log"):
folder, ini = os.path.split(filepath)
else:
folder = filepath
files = [file for file in os.listdir(folder) if file.endswith(".log")]
if not files:
raise ValueError("No .log file found in folder: " + folder)
num_scans = find_number_of_datasets(filepath)
if num_scans is not None:
dataset_number = kwargs["dataset_number"] if "dataset_number" in kwargs else None
if dataset_number is None:
raise ValueError(
"This folder contains many datasets, please select which one to load with BrukerDataLoader(..., dataset_number=a_number)"
)
if dataset_number >= num_scans:
raise ValueError("Dataset number given larger than total number of datasets")
matching = [s for s in files if "{0:0=2d}".format(dataset_number) + ".log" in s]
if len(matching) > 1:
raise AssertionError(
"More than 1 file for the same dataset found, confused what to do, so I error"
)
ini = matching[0]
else:
ini = files[0]
# create configureation parser
cfg = ConfigParser()
cfg.read(os.path.join(folder, ini))
cfg_system = cfg["System"]
# start empty geometry
geometry = Geometry()
geometry.accuracy = 0.5
## Detector information
# Size of pixels in the detector
try:
ratio = float(cfg_system["CameraXYRatio"])
except KeyError:
ratio = float(cfg_system["Camera X/Y Ratio"])
except:
ratio = 1
# need to verify if this is correct.
ratio = 1
cfg_aq = cfg["Acquisition"]
try:
binu = float(cfg_aq["Camera binning"][0])
binv = float(cfg_aq["Camera binning"][2])
except:
binu = 1
binv = 1
geometry.dDetector = numpy.array(
(
float(cfg_system["Camera Pixel Size (um)"]) / 1000.0 * binv*ratio,
float(cfg_system["Camera Pixel Size (um)"]) / 1000.0 * binu,
)
)
# Number of pixel in the detector
geometry.nDetector = numpy.array(
(float(cfg_aq["Number of Rows"]), float(cfg_aq["Number of Columns"]))
)
# Total size of the detector
geometry.sDetector = geometry.nDetector * geometry.dDetector
geometry.nDetector = geometry.nDetector.astype(int)
## Offset of the detector:
try:
offs = (
-(geometry.nDetector[0] / 2 - float(cfg_aq["Optical Axis (line)"]))
* geometry.dDetector[0]
)
except:
offs = 0.0
geometry.offDetector = numpy.array((offs, 0.0))
# Size of each voxel
geometry.dVoxel = numpy.array(
(
float(cfg_aq["Image Pixel Size (um)"]) / 1000,
float(cfg_aq["Image Pixel Size (um)"]) / 1000,
float(cfg_aq["Image Pixel Size (um)"]) / 1000,
)
)
geometry.nVoxel = numpy.array(
(geometry.nDetector[0], geometry.nDetector[1], geometry.nDetector[1])
)
geometry.sVoxel = geometry.nVoxel * geometry.dVoxel
geometry.nVoxel = geometry.nVoxel.astype(int)
#% Global geometry
geometry.DSO = float(cfg_aq["Object to Source (mm)"])
geometry.DSD = float(cfg_aq["Camera to Source (mm)"])
geometry.whitelevel = 2 ** int(cfg_aq["Depth (bits)"])
angles = numpy.arange(
0.0,
float(cfg_aq["Number of Files"]) * float(cfg_aq["Rotation Step (deg)"]),
float(cfg_aq["Rotation Step (deg)"]),
)
angles = angles[:-1] * numpy.pi / 180
files = [file for file in os.listdir(folder) if file.endswith(".csv")]
if files:
offset = numpy.genfromtxt(os.path.join(folder, files[0]), delimiter=',',skip_header=5,dtype=float)
offset = numpy.delete(offset, 0, 1)
geometry.offDetector=geometry.offDetector+numpy.fliplr(offset)*(geometry.dDetector/[binv,binu])
return filepath, geometry, angles
def load_Bruker_projections(folder, geometry, angles, **kwargs):
angles, indices, dataset_number = parse_inputs(geometry, angles, **kwargs)
# load images
files = sorted([file for file in os.listdir(folder) if file.lower().endswith(".tif")])
if dataset_number is not None:
files = [file for file in files if file[-10:-8] == "{0:0=2d}".format(dataset_number)]
image = Image.open(os.path.join(folder, files[indices[0]]))
image = numpy.asarray(image).astype(numpy.float32)
projections = numpy.zeros([len(indices), image.shape[0], image.shape[1]], dtype=numpy.single)
projections[0, :, :] = -numpy.log(image / float(geometry.whitelevel))
index = 1
print("Loading Bruker Skyscan dataset: " + folder)
for i in tqdm(indices[1:]):
image = Image.open(os.path.join(folder, files[i]))
image = numpy.asarray(image).astype(numpy.float32)
projections[index, :, :] = -numpy.log(image / float(geometry.whitelevel))
index = index + 1
del geometry.whitelevel
return numpy.asarray(projections), geometry, angles
## This should be on a separate "io_common.py" file.
def parse_inputs(geometry, angles, **kwargs):
# TODO: warn user about invalid options or values
sampling = kwargs["sampling"] if "sampling" in kwargs else "equidistant"
nangles = int(kwargs["num_angles"]) if "num_angles" in kwargs else len(angles)
step = int(kwargs["sampling_step"]) if "sampling_step" in kwargs else 1
dataset_number = kwargs["dataset_number"] if "dataset_number" in kwargs else None
indices = numpy.arange(0, len(angles))
if sampling == "equidistant":
step = int(round(len(angles) / nangles))
indices = indices[::step]
angles = angles[::step]
elif sampling == "continuous":
indices = indices[:nangles]
angles = angles[:nangles]
elif sampling == "step":
indices = indices[::step]
angles = angles[::step]
else:
raise ValueError("Unknown sampling type: " + str(sampling))
return angles, indices, dataset_number
def find_number_of_datasets(filepath):
# check if input was log file itself, or just the folder
if filepath.endswith(".log"):
folder, ini = os.path.split(filepath)
else:
folder = filepath
files = [file for file in os.listdir(folder) if file.endswith(".log")]
if not files:
raise ValueError("No .log file found in folder: " + folder)
ini = min(files, key=len) # shortest one is the main one?
cfg = ConfigParser()
cfg.read(os.path.join(folder, ini))
cfg_aq = cfg["Acquisition"]
if cfg.has_option("Acquisition", "Number of connected scans"):
return int(cfg_aq["Number of connected scans"])
else:
return None
|
{"hexsha": "c492ffcff48aa1df9b8570de2400eaa139a6b110", "size": 9586, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/tigre/utilities/io/BrukerDataLoader.py", "max_stars_repo_name": "tsadakane/TIGRE", "max_stars_repo_head_hexsha": "a853cd2d4a6bc9509c01414b85ca75b4448fd700", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 326, "max_stars_repo_stars_event_min_datetime": "2016-07-01T10:48:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T07:34:52.000Z", "max_issues_repo_path": "Python/tigre/utilities/io/BrukerDataLoader.py", "max_issues_repo_name": "tsadakane/TIGRE", "max_issues_repo_head_hexsha": "a853cd2d4a6bc9509c01414b85ca75b4448fd700", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 311, "max_issues_repo_issues_event_min_datetime": "2016-07-05T16:00:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T12:14:55.000Z", "max_forks_repo_path": "Python/tigre/utilities/io/BrukerDataLoader.py", "max_forks_repo_name": "tsadakane/TIGRE", "max_forks_repo_head_hexsha": "a853cd2d4a6bc9509c01414b85ca75b4448fd700", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 157, "max_forks_repo_forks_event_min_datetime": "2016-08-08T12:13:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T00:37:45.000Z", "avg_line_length": 37.4453125, "max_line_length": 141, "alphanum_fraction": 0.6197579804, "include": true, "reason": "import numpy", "num_tokens": 2274}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 2 07:06:17 2020
@author: A. Goulart
"""
import numpy as np
import matplotlib.pyplot as plt
from M1_FileReader_v2 import FileReader_1
class Method_1:
def __init__(self, foldername, filename):
self.it_lim = 1000
self.erro_max = 1e-4
self.fontsize = 10
self.offset = 26
self.dpi_value = 800
self.fmt = '| {{:^{}s}} | {{:^{}s}} |'.format(25, 15) # widths only
self.fr = FileReader_1()
self.fr.setFolderName(foldername)
self.fr.setFileName(filename)
def set_it_lim(self,n):
self.it_lim = n
def get_it_lim(self):
print("it_lim = ", self.it_lim)
def set_erro_max(self,n):
self.erro_max = n
def get_erro_max(self):
print("erro_max = ", self.erro_max)
def set_textbox_fontsize(self,fs):
self.fontsize = int(fs)
def get_textbox_fontsize(self):
print("TextBox fontise = ", self.fontsize)
def set_textbox_offset(self,off):
self.offset = off
def get_textbox_offset(self):
print("TextBox offset = ", self.offset)
def show_plot_inj(self,answer):
self.sh_plot_inj = answer
def show_plot_error(self,answer):
self.sh_plot_error = answer
def set_dpi(self,n):
self.dpi_value = n
def get_dpi(self):
print("DPI = ", self.dpi_value)
def print_data(self, label, value):
print(self.fmt.format(str(label), str(value)))
def check_value(self, var_name, min_value, max_value, var_value):
if(var_value > max_value):
print("ERROR: %s value out of bounds [%.2f, %.2f] (current value: %.2f)"(var_name,min_value,max_value,var_value))
elif(var_value < min_value):
print("ERROR: %s value out of bounds [%.2f, %.2f] (current value: %.2f)"(var_name,min_value,max_value,var_value))
##### Métodos - Injetor Ideal #####
def calc_phi(self, alpha2):
return 2/(2+(np.tan(alpha2/2*np.pi/180))**2)
def calc_geom_char(self, phi):
return (1-phi)*np.sqrt(2/phi)/phi
def calc_mu(self, phi):
return phi*np.sqrt(phi/(2-phi))
def calc_a(self,phi):
return 2*((1-phi)**2)/(2-phi)
def calc_2alpha(self, phi):
return 2*180/np.pi*np.arctan(np.sqrt(2*(1-phi)/phi))
##### Métodos - Injetor Real #####
## Calcula o valor do nº de Reynolds nos canais tangenciais do injetor
def calc_reynolds_in(self,m_in,din_visc,n,r_in_orf,rho):
return (2*m_in)/(din_visc*n*np.pi*r_in_orf)
#return (0.637*m_in)/(np.sqrt(n)*r_in_orf*rho*din_visc)
## calcula o valor do coeficiente de atrito (Lambda)
def calc_friction_coef(self, reynolds_in):
#return 10**(25.8/(np.log(reynolds_in))-2) #Bayvel
return 0.3164*reynolds_in**(-0.25) #Bazarov
## Calcula o valor da perda hidráulica no injetor (Ksi)
def calc_hyd_loss(self, inlet_type, r_s, l_in, inlet_radius, r_in_orf, friction_coef):
if(inlet_type == "curved\n"):
self.alpha_in = 90 - 180/np.pi * np.arctan(r_s/l_in)
self.ksi_in = -1*self.alpha_in/150 + 1.1
elif(inlet_type == "straigth\n"):
self.ksi_in = 0.5*np.exp(-1.4*(inlet_radius/(2*r_in_orf)))
else:
print("Error in calc_hyd_loss:\n Inlet type not recognized")
return self.ksi_in + friction_coef*l_in/(2*r_in_orf)
## calcula o parâmetro geométrico característico corrigido (A_eq) [Bazarov, eq(100)]
def calc_geom_char_eq(self, r_in_pos, r_n, n, r_in_orf, friction_coef):
return (r_in_pos*r_n)/(n*r_in_orf**2+(friction_coef/2)*r_in_pos*(r_in_pos-r_n))
## Calcula o Valor do Coeficiente de Preenchimento corrigido (Phi_eq) [Bazarov, eq(99), eq(100)]
def calc_phi_eq(self, geom_char_eq):
self.coefs = [geom_char_eq**2, -2, 4, -2]
self.roots = np.roots(self.coefs)
self.count = 0
self.phi_eq = -1
for root in self.roots:
if(0 < root < 1 and root.imag == 0):
self.count+=1
self.phi_eq = root.real
if(self.phi_eq == -1):
print("Error in Calc_Phi_eq:\n No valid root found.")
if(self.count > 1):
print("Error in Calc_Phi_eq:\n Multiple valid values found")
print(self.roots)
return self.phi_eq
## Calcula o coeficiente de descarga equivalente [Bazarov, eq(99)]
def calc_mu_eq(self, phi_eq):
return phi_eq**1.5/np.sqrt(2-phi_eq)
## Calcula o coeficiente de descarga real do injetor [Bazarov, eq(99)]
def calc_mu_i(self, mu_eq, geom_char, r_in_pos, r_n, ksi):
return mu_eq/np.sqrt(1+ksi*mu_eq*(geom_char*r_in_pos/r_n)**2)
##### Métodos - Cálculos adicionais #####
##Calcula o valor da não-uniformidade percentual esperada do injetor [Bayvel]
def calc_non_uniformity(self, r_in_pos, r_n, n, l_s, r_s, phi_eq):
return 23.7/((r_in_pos/r_n)**2.7*n**1.34*phi_eq**1.1*(l_s/(2*r_s))**0.15)
##### Roda o método de dimensionamento do injetor #####
def run_M1(self):
self.fr.read()
print("Config: ", self.fr.config)
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
##### ##### ##### Parte 1 - Cálculo do ST1 #### ##### ##### ##### ##### #####
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
self.phi_1 = self.calc_phi(self.fr.alpha2_1)
self.A_1 = self.calc_geom_char(self.phi_1)
self.mu_1 = self.calc_mu(self.phi_1)
#self.r_n_1 = 1e3/2* np.sqrt(4/np.pi) * np.sqrt(self.fr.m_1*1e-3/(self.mu_1*np.sqrt(2*self.fr.rho_1*self.fr.delta_p_1*1e5)))
#self.r_n_1 = 1e3 * np.sqrt(self.fr.m_1*1e-3/(np.pi*self.mu_1*np.sqrt(2*self.fr.rho_1*self.fr.delta_p_1*1e5)))
self.r_n_1 = 1e3*0.475*np.sqrt(self.fr.m_1*1e-3 / (self.mu_1 * np.sqrt(self.fr.rho_1*self.fr.delta_p_1*1e5)))
print("\n\n ST1 Initial Values:")
self.print_data("A_1", "%.2f"%(self.A_1))
self.print_data("Phi_1", "%.3f"%(self.phi_1))
self.print_data("R_n_1", "%.3f mm"%(self.r_n_1))
self.print_data("2Alpha", "%.1f deg"%(self.fr.alpha2_1))
self.inlet_diameter = []
self.outlet_error = []
self.erro_r = self.erro_max + 1
self.contador = 0
print("\n ST1 Iteration data:")
while self.erro_r >= self.erro_max and self.contador < self.it_lim:
##### Iteração do injetor ideal #####
self.r_in_pos_1 = self.fr.opening_coef_1*self.r_n_1
self.r_in_orf_1 = np.sqrt(self.r_in_pos_1*self.r_n_1/(self.fr.n_1*self.A_1))
self.inlet_diameter.append(2*self.r_in_orf_1)
self.r_s_1 = self.r_in_pos_1+self.r_in_orf_1
self.l_in_1 = self.fr.ratio_l_in_1*2*self.r_in_orf_1
self.l_n_1 = self.fr.ratio_l_n_1*2*self.r_n_1
self.l_s_1 = self.fr.ratio_l_s_1*2*self.r_s_1
self.a_1 = self.calc_a(self.phi_1)
self.alpha2_1 = self.calc_2alpha(self.phi_1)
##### Iteração do injetor real #####
self.rey_in_1 = self.calc_reynolds_in(self.fr.m_1, self.fr.din_visc_1, self.fr.n_1, self.r_in_orf_1,self.fr.rho_1)
self.lambda_1 = self.calc_friction_coef(self.rey_in_1)
self.ksi_1 = self.calc_hyd_loss(self.fr.in_type_1, self.r_s_1, self.l_in_1, self.fr.inlet_radius_1, self.r_in_orf_1, self.lambda_1)
self.check_value("ksi_1", 0, float("inf"), self.ksi_1)
self.A_eq_1 = self.calc_geom_char_eq(self.r_in_pos_1, self.r_n_1, self.fr.n_1, self.r_in_orf_1, self.lambda_1)
self.phi_eq_1 = self.calc_phi_eq(self.A_eq_1)
self.mu_eq_1 = self.calc_mu_eq(self.phi_eq_1)
self.mu_i_1 = self.calc_mu_i(self.mu_eq_1, self.A_eq_1, self.r_in_pos_1, self.r_n_1, self.ksi_1)
self.alpha2_eq_1 = self.calc_2alpha(self.phi_1)
## cálculo do novo raio do bocal
self.r_n_old = self.r_n_1
#self.r_n_1 = 1e3/2* np.sqrt(4/np.pi) * np.sqrt(self.fr.m_1*1e-3/(self.mu_i_1*np.sqrt(2*self.fr.rho_1*self.fr.delta_p_1*1e5)))
#self.r_n_1 = 1e3 * np.sqrt(self.fr.m_1*1e-3/(np.pi*self.mu_i_1*np.sqrt(2*self.fr.rho_1*self.fr.delta_p_1*1e5)))
self.r_n_1 = 1e3*0.475*np.sqrt(self.fr.m_1*1e-3 / (self.mu_i_1 * np.sqrt(self.fr.rho_1*self.fr.delta_p_1*1e5)))
self.erro_r = abs(self.r_n_old-self.r_n_1)/self.r_n_old
self.outlet_error.append(self.erro_r)
self.A_1 = self.r_in_pos_1*self.r_n_1/(self.fr.n_1*self.r_in_orf_1**2)
self.phi_1 = self.calc_phi_eq(self.A_1)
self.K_1 = self.A_eq_1/self.A_1
self.check_value("K_1", 0, 1, self.K_1)
self.r_mn_1 = self.r_n_1*np.sqrt(1-self.phi_eq_1)
self.r_mk_1 = self.r_mn_1 * np.sqrt(2*(1-self.phi_eq_1)/(2-self.phi_eq_1))
self.t_fluid_1 = self.r_n_1-self.r_mn_1
## print dos dados da iteração
print("\nIt. %d)"%(self.contador+1))
self.print_data("A_1", "%.4f"%(self.A_1))
self.print_data("A_eq_1", "%.4f"%(self.A_eq_1))
self.print_data("K_1", "%.4f"%(self.K_1))
self.print_data("Phi_1", "%.4f"%(self.phi_1))
self.print_data("Phi_eq_1", "%.4f"%(self.phi_eq_1))
self.print_data("rey_in_1", "%.4f"%(self.rey_in_1))
self.print_data("ksi_1", "%.4f"%(self.ksi_1))
self.print_data("lambda_1", "%.4f"%(self.lambda_1))
self.print_data("2Alpha ideal", "%.2f deg"%(self.alpha2_1))
self.print_data("2Alpha real", "%.2f deg"%(self.alpha2_eq_1))
self.print_data("R_n_1", "%.3f mm"%(self.r_n_1))
self.print_data("r_in_1", "%.3f mm"%(self.r_in_orf_1))
self.contador += 1
print("\n\n")
##Cálculo da não-uniformidade esperada do injetor
self.I_1 = self.calc_non_uniformity(self.r_in_pos_1, self.r_n_1, self.fr.n_1, self.l_s_1, self.r_s_1, self.phi_eq_1)
if(self.contador >= self.it_lim):
print ("\tWarning:\n ST1 reached iteration limit.")
if(np.pi*self.r_n_1**2 < self.fr.n_1*np.pi*self.r_in_orf_1**2):
print("\tWarning:\n Outlet area smaller than total inlet area.")
##### ST1 design console report #####
print("\n\nST1 Injector Geometry: \n")
self.print_data("# of Inlet Channels", "%.1f"%(self.fr.n_1))
print("\n\tRadial Dimensions:")
self.print_data("Nozzle Radius", "%.3f \tmm"%(self.r_n_1))
self.print_data("Inlet Radius", "%.3f \tmm"%(self.r_in_pos_1))
self.print_data("Swirl chamber Radius", "%.3f \tmm"%(self.r_s_1))
self.print_data("Inlet Channel Radius", "%.3f \tmm"%(self.r_in_orf_1))
print("\n\tLinear dimensions:")
self.print_data("Nozzle Length", "%.3f \tmm"%(self.l_n_1))
self.print_data("Swirl Chamber Length", "%.3f \tmm"%(self.l_s_1))
self.print_data("Inlet Channel Length", "%.3f \tmm"%(self.l_in_1))
print("\n\nST1 Injector Properties: ")
self.print_data("Delta P", "%.2f bar"%(self.fr.delta_p_1))
self.print_data("Mass flow target", "%.2f g/s"%(self.fr.m_1))
self.print_data("A", "%.2f"%(self.A_1))
self.print_data("A_eq", "%.2f"%(self.A_eq_1))
self.print_data("K", "%.2f"%(self.K_1))
self.print_data("Phi", "%.2f"%(self.phi_1))
self.print_data("Phi_eq", "%.2f"%(self.phi_eq_1))
self.print_data("Ksi", "%.2f"%(self.ksi_1))
self.print_data("Lambda", "%.2f"%(self.lambda_1))
self.print_data("r_mk_1", "%.2f mm"%(self.r_mk_1))
self.print_data("r_mn_1", "%.2f mm"%(self.r_mn_1))
self.print_data("t_fluid_1", "%.2f mm"%(self.t_fluid_1))
self.print_data("Expected non-uniformity", "%.2f %%"%(self.I_1))
##### ST1 - Convergence plot #####
self.step_x = np.ceil(self.contador/10)
if(self.sh_plot_error == 1):
fig, (ax1, ax2) = plt.subplots(2, dpi = self.dpi_value)
fig.suptitle("Stage 1 - Solution convergence")
ax1.plot(range(self.contador),self.inlet_diameter)
ax1.set_xlabel("Iteration")
ax1.set_ylabel("d_in [mm]")
ax1.set_xticks(np.arange(0,self.contador+self.step_x,self.step_x))
ax1.grid()
ax2.plot(range(self.contador),self.outlet_error)
ax2.set_xlabel("Iteration")
ax2.set_ylabel("R_n error [ad.]")
ax2.set_xticks(np.arange(0,self.contador+self.step_x,self.step_x))
ax2.grid()
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
##### ##### ##### Parte 2 - Cálculo do ST2 #### ##### ##### ##### ##### #####
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
if(self.fr.config == "Bi\n"):
self.phi_2 = self.calc_phi(self.fr.alpha2_2)
self.A_2 = self.calc_geom_char(self.phi_2)
self.mu_2 = self.calc_mu(self.phi_2)
#self.r_n_2 = 1e3/2* np.sqrt(4/np.pi) * np.sqrt(self.fr.m_2*1e-3/(self.mu_2*np.sqrt(2*self.fr.rho_2*self.fr.delta_p_2*1e5)))
#self.r_n_2 = 1e3 * np.sqrt(self.fr.m_2*1e-3/(np.pi*self.mu_2*np.sqrt(2*self.fr.rho_2*self.fr.delta_p_2*1e5)))
self.r_n_2 = 1e3*0.475*np.sqrt(self.fr.m_2*1e-3 / (self.mu_2 * np.sqrt(self.fr.rho_2*self.fr.delta_p_2*1e5)))
print("\n\n\n\n\n ST2 Initial Values:")
self.print_data("A_2", "%.2f"%(self.A_2))
self.print_data("Phi_2", "%.3f"%(self.phi_2))
self.print_data("R_n_2", "%.3f mm"%(self.r_n_2))
self.print_data("2Alpha", "%.1f deg"%(self.fr.alpha2_2))
self.inlet_diameter = []
self.outlet_error = []
self.erro_r = 100
self.contador = 0
print("\n ST2 Iteration data:")
while self.erro_r >= self.erro_max and self.contador < self.it_lim:
##### Iteração do injetor ideal #####
self.r_in_pos_2 = self.fr.opening_coef_2*self.r_n_2
self.r_in_orf_2 = np.sqrt((self.r_in_pos_2*self.r_n_2)/(self.fr.n_2*self.A_2))
self.inlet_diameter.append(2*self.r_in_orf_2)
self.r_s_2 = self.r_in_pos_2+self.r_in_orf_2
self.l_in_2 = self.fr.ratio_l_in_2*2*self.r_in_orf_2
self.l_n_2 = self.fr.ratio_l_n_2*2*self.r_n_2
self.l_s_2 = self.fr.ratio_l_s_2*2*self.r_s_2
self.a_2 = self.calc_a(self.phi_2)
self.alpha2_2 = self.calc_2alpha(self.phi_2)
##### Iteração do injetor real #####
self.rey_in_2 = self.calc_reynolds_in(self.fr.m_2, self.fr.din_visc_2, self.fr.n_2, self.r_in_orf_2, self.fr.rho_2)
self.lambda_2 = self.calc_friction_coef(self.rey_in_2)
self.ksi_2 = self.calc_hyd_loss(self.fr.in_type_2, self.r_s_2, self.l_in_2, self.fr.inlet_radius_2, self.r_in_orf_2, self.lambda_2)
self.check_value("ksi_2", 0, float("inf"), self.ksi_2)
self.A_eq_2 = self.calc_geom_char_eq(self.r_in_pos_2, self.r_n_2, self.fr.n_2, self.r_in_orf_2, self.lambda_2)
self.phi_eq_2 = self.calc_phi_eq(self.A_eq_2)
self.mu_eq_2 = self.calc_mu(self.phi_eq_2)
self.mu_i_2 = self.calc_mu_i(self.mu_eq_2, self.A_2, self.r_in_pos_2, self.r_n_2, self.ksi_2)
self.alpha2_eq_2 = self.calc_2alpha(self.phi_eq_2)
## cálculo do novo raio do bocal
self.r_n_old = self.r_n_2
#self.r_n_2 = 1e3/2* np.sqrt(4/np.pi) * np.sqrt(self.fr.m_2*1e-3/(self.mu_i_2*np.sqrt(2*self.fr.rho_2*self.fr.delta_p_2*1e5)))
#self.r_n_2 = 1e3 * np.sqrt(self.fr.m_2*1e-3/(np.pi*self.mu_i_2*np.sqrt(2*self.fr.rho_2*self.fr.delta_p_2*1e5)))
self.r_n_2 = 1e3*0.475*np.sqrt(self.fr.m_2*1e-3 / (self.mu_i_2 * np.sqrt(self.fr.rho_2*self.fr.delta_p_2*1e5)))
self.erro_r = abs(self.r_n_old-self.r_n_2)/self.r_n_old
self.outlet_error.append(self.erro_r)
self.A_2 = self.r_in_pos_2*self.r_n_2/(self.fr.n_2*self.r_in_orf_2**2)
self.phi_2 = self.calc_phi_eq(self.A_2)
self.K_2 = self.A_eq_2/self.A_2
self.check_value("K_2", 0, 1, self.K_2)
self.r_mn_2 = self.r_n_2*np.sqrt(1-self.phi_eq_2)
self.r_mk_2 = self.r_mn_2 * np.sqrt(2*(1-self.phi_eq_2)/(2-self.phi_eq_2))
self.t_fluid_2 = self.r_n_2-self.r_mn_2
## print dos dados da iteração
print("\nIt. %d)"%(self.contador+1))
self.print_data("A_2", "%.4f"%(self.A_2))
self.print_data("A_eq_2", "%.4f"%(self.A_eq_2))
self.print_data("K_2", "%.4f"%(self.K_2))
self.print_data("Phi_2", "%.4f"%(self.phi_2))
self.print_data("Phi_eq_2", "%.4f"%(self.phi_eq_2))
self.print_data("rey_in_2", "%.4f"%(self.rey_in_2))
self.print_data("ksi_2", "%.4f"%(self.ksi_2))
self.print_data("lambda_2", "%.4f"%(self.lambda_2))
self.print_data("2Alpha ideal", "%.2f deg"%(self.alpha2_2))
self.print_data("2Alpha real", "%.2f deg"%(self.alpha2_eq_2))
self.print_data("R_n_2", "%.3f mm"%(self.r_n_2))
self.print_data("r_in_2", "%.3f mm"%(self.r_in_orf_2))
self.print_data("Erro", "%.2E"%(self.erro_r))
self.contador += 1
print("\n\n")
##Cálculo da não-uniformidade esperada do injetor
self.I_2 = self.calc_non_uniformity(self.r_in_pos_2, self.r_n_2, self.fr.n_2, self.l_s_2, self.r_s_2, self.phi_eq_2)
if(self.contador == self.it_lim):
print ("\tWarning:\n ST1 reached iteration limit.")
if(np.pi*self.r_n_2**2 < self.fr.n_2*np.pi*self.r_in_orf_2**2):
print("\tWarning:\n Outlet area smaller than total inlet area.")
##### ST2 design console report #####
print("\n\nST2 Injector Geometry: \n")
self.print_data("# of Inlet Channels", "%.1f"%(self.fr.n_2))
print("\n\tRadial Dimensions:")
self.print_data("Nozzle Radius", "%.3f \tmm"%(self.r_n_2))
self.print_data("Inlet Radius", "%.3f \tmm"%(self.r_in_pos_2))
self.print_data("Swirl chamber Radius", "%.3f \tmm"%(self.r_s_2))
self.print_data("Inlet Channel Radius", "%.3f \tmm"%(self.r_in_orf_2))
print("\n\tLinear dimensions:")
self.print_data("Nozzle Length", "%.3f \tmm"%(self.l_n_2))
self.print_data("Swirl Chamber Length", "%.3f \tmm"%(self.l_s_2))
self.print_data("Inlet Channel Length", "%.3f \tmm"%(self.l_in_2))
print("\n\nST2 Injector Properties: ")
self.print_data("Delta P", "%.2f bar"%(self.fr.delta_p_2))
self.print_data("Mass flow target", "%.2f g/s"%(self.fr.m_2))
self.print_data("A", "%.2f"%(self.A_2))
self.print_data("A_eq", "%.2f"%(self.A_eq_2))
self.print_data("K", "%.2f"%(self.K_2))
self.print_data("Phi", "%.2f"%(self.phi_2))
self.print_data("Phi_eq", "%.2f"%(self.phi_eq_2))
self.print_data("Ksi", "%.2f"%(self.ksi_2))
self.print_data("Lambda", "%.2f"%(self.lambda_2))
self.print_data("r_mk_2", "%.2f mm"%(self.r_mk_2))
self.print_data("r_mn_2", "%.2f mm"%(self.r_mn_2))
self.print_data("t_fluid_2", "%.2f mm"%(self.t_fluid_2))
self.print_data("Expected non-uniformity", "%.2f %%"%(self.I_2))
##### ST2 - Convergence plot #####
self.step_x = np.ceil(self.contador/10)
if(self.sh_plot_error == 1):
fig, (ax3, ax4) = plt.subplots(2, dpi = self.dpi_value)
fig.suptitle("Stage 2 - Solution convergence")
ax3.plot(range(self.contador),self.inlet_diameter)
ax3.set_xlabel("Iteration")
ax3.set_ylabel("d_in [mm]")
ax3.set_xticks(np.arange(0,self.contador+self.step_x,self.step_x))
ax3.grid()
ax4.plot(range(self.contador),self.outlet_error)
ax4.set_xlabel("Iteration")
ax4.set_ylabel("R_n error [ad.]")
ax4.set_xticks(np.arange(0,self.contador+self.step_x,self.step_x))
ax4.grid()
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
##### ##### ##### Parte 3 - Integração dos Estágios # ##### ##### ##### #####
##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### ##### #####
self.gap = self.r_mk_2 - (self.r_n_1 + self.fr.t_w)
self.fluid_t_2_s = self.r_s_2 - self.r_mk_2
print("\n\nGeometry Check: \t")
self.print_data("D_n_1", "%.3f mm"%(2*self.r_n_1))
self.print_data("D_w_1", "%.3f mm"%(2*self.r_n_1+2*self.fr.t_w))
self.print_data("D_n_2", "%.3f mm"%(2*self.r_n_2))
self.print_data("fluid_t_2_s", "%.3f mm"%(self.fluid_t_2_s))
if(self.gap >= self.fr.delta/100*self.fluid_t_2_s):
print("\n\tHydraulic Independance OK! \n\tDeltaR = %.3f mm (mín: %.2f mm)"%(self.gap, self.fr.delta/100*self.fluid_t_2_s))
##### ##### ##### ##### ######
# Alteração do ângulo do ST2 #
##### ##### ##### ##### ######
self.alpha2_goal = 2*(self.alpha2_eq_1/2 - self.fr.angle_dif)
if(self.alpha2_goal < self.alpha2_eq_2):
#self.a_2 = 2*(1-self.phi_eq_2)**2/(2-self.phi_eq_2)
#self.a_2 = 2*(1-self.phi_2)**2/(2-self.phi_2)
self.a_2 = self.calc_a(self.phi_2)
self.r_out = self.r_n_2*np.sqrt(self.a_2*(1+(np.tan(self.alpha2_goal/2*np.pi/180))**-2))
self.l_out = self.fr.ratio_l_n_out*2*self.r_n_2
#self.phi_out = self.phi_eq_2/np.sqrt(3-2*self.phi_eq_2)
self.phi_out = self.phi_2/np.sqrt(3-2*self.phi_2)
self.r_mn_out = self.r_out*np.sqrt(1-self.phi_out)
self.t_fluid_out = self.r_out-self.r_mn_out
print("\n\nST2 Modified Injector Geometry: \t")
self.print_data("2Alpha_2_goal", "%.1f deg"%(self.alpha2_goal))
self.print_data("Phi_out", "%.3f mm"%(self.phi_out))
self.print_data("R_out", "%.3f mm"%(self.r_out))
self.print_data("L_out", "%.3f"%(self.l_out))
self.print_data("R_mn_out", "%.3f mm"%(self.r_mn_out))
self.print_data("t_fluid_out", "%.3f mm"%(self.t_fluid_out))
self.check_out = 1
else:
print("\n\tWarning: Intercepting angle greater than input. \n\tInt_angle = %.1f deg"%(self.alpha2_eq_1/2 - self.alpha2_eq_2/2))
self.check_out = -1
else:
print("\n\tERROR: Hydraulic Independance FAIL! \n\tDeltaR = %.3f mm (mín: %.2f mm)"%(self.gap, self.fr.delta/100*self.fluid_t_2_s))
self.check_out = 0
##### ##### ##### ##### #
##### Injector plot #####
##### ##### ##### ##### #
if(self.fr.config == "Mono\n"):
##### conventional injector plot #####
## ST1
self.st1_n_bottom = 0
self.st1_n_top = self.st1_n_bottom + self.l_n_1
self.st1_s_bottom = self.st1_n_top + (self.r_s_1-self.r_n_1)/np.tan(self.fr.trans_angle_1*np.pi/180)
self.st1_s_top = self.st1_s_bottom + self.l_s_1
self.st1_x = [self.r_n_1, self.r_n_1, self.r_s_1, self.r_s_1, 0]
self.st1_y = [self.st1_n_bottom, self.st1_n_top, self.st1_s_bottom, self.st1_s_top, self.st1_s_top]
## pontos do orifício tangencial do ST1
self.st1_in_top = []
self.st1_in_bottom = []
self.circ_x_1 = np.arange(self.r_s_1-2*self.r_in_orf_1,self.r_s_1,0.005)
for x in self.circ_x_1:
self.st1_in_top.append((self.st1_s_top-self.r_in_orf_1) + np.sqrt(abs(self.r_in_orf_1**2-(x-(self.r_s_1-self.r_in_orf_1))**2)))
self.st1_in_bottom.append((self.st1_s_top-self.r_in_orf_1) - np.sqrt(abs(self.r_in_orf_1**2-(x-(self.r_s_1-self.r_in_orf_1))**2)))
## pontos do fluido interno do st1
self.st1_fluid_n_bottom = 0
self.st1_fluid_n_top = self.st1_n_bottom + self.l_n_1
self.st1_fluid_s_bottom = self.st1_n_top + (self.r_s_1-self.r_n_1)/np.tan(self.fr.trans_angle_1*np.pi/180)
self.st1_fluid_s_top = self.st1_s_bottom + self.l_s_1
self.st1_fluid_x = [self.r_mn_1, self.r_mn_1, self.r_mk_1, self.r_mk_1]
self.st1_fluid_y = [self.st1_fluid_n_bottom, self.st1_fluid_n_top, self.st1_fluid_s_bottom, self.st1_fluid_s_top]
## pontos do spray do st1
self.st1_spray_y_i = 0
self.st1_spray_y_f = -1*self.st1_s_top*1.1
self.st1_spray_y_step = (self.st1_spray_y_f-self.st1_spray_y_i)/100
self.st1_spray_y = np.arange(self.st1_spray_y_i,self.st1_spray_y_f+self.st1_spray_y_step,self.st1_spray_y_step)
self.st1_spray_x = []
self.r_av_1 = (self.r_n_1 + self.r_mn_1)/2
for y in self.st1_spray_y:
self.st1_spray_x.append(self.r_av_1*np.sqrt(1+y**2/(self.r_av_1*np.tan((90-0.5*self.alpha2_eq_1)*np.pi/180))**2))
if(self.sh_plot_inj == 1):
fig, ax5 = plt.subplots(dpi = self.dpi_value)
ax5.plot(self.st1_x,self.st1_y, linewidth = 2, color = '0.3', linestyle = '-')
ax5.plot(self.circ_x_1,self.st1_in_top, linewidth = 1, color = '0.3', linestyle = '-')
ax5.plot(self.circ_x_1,self.st1_in_bottom, linewidth = 1, color = '0.3', linestyle = '-')
ax5.plot(self.st1_fluid_x, self.st1_fluid_y, linewidth = 1, color = 'b', linestyle = '--')
ax5.plot(self.st1_spray_x, self.st1_spray_y, linewidth = 2, color = 'b', linestyle = '--')
ax5.grid()
ax5.set_xlabel("[mm]")
ax5.set_ylabel("[mm]")
self.x_min = 0
self.x_max = 2*self.st1_s_top*1.1
self.y_min = -1*self.st1_s_top*1.1
self.y_max = self.st1_s_top*1.1
ax5.set_xlim(self.x_min, self.x_max)
ax5.set_ylim(self.y_min, self.y_max)
ax5.set_aspect("equal")
self.st1_legend = "ST1:\n R_n: %.2f mm\n R_in: %.2f mm\n R_s: %.2f mm\n r_in: %.2f mm\n n_in: %d \n"%(self.r_n_1, self.r_in_pos_1, self.r_s_1, self.r_in_orf_1, self.fr.n_1)
self.bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9)
ax5.text(self.r_s_1*1.5, 0, self.st1_legend, fontsize=self.fontsize, ha="left", va="bottom", bbox=self.bbox_props)
##### plots para injetores bipropelente #####
elif(self.fr.config == "Bi\n"):
if(self.sh_plot_inj == 1 and (self.check_out == 1 or self.check_out == -1)):
##### conventional injector plot #####
## ST1
self.st1_n_bottom = self.fr.recess
self.st1_n_top = self.st1_n_bottom + self.l_n_1
self.st1_s_bottom = self.st1_n_top + (self.r_s_1-self.r_n_1)/np.tan(self.fr.trans_angle_1*np.pi/180)
self.st1_s_top = self.st1_s_bottom + self.l_s_1
self.st1_x = [self.r_n_1, self.r_n_1, self.r_s_1, self.r_s_1, 0]
self.st1_y = [self.st1_n_bottom, self.st1_n_top, self.st1_s_bottom, self.st1_s_top, self.st1_s_top]
## pontos do orifício tangencial do ST1
self.st1_in_top = []
self.st1_in_bottom = []
self.circ_x_1 = np.arange(self.r_s_1-2*self.r_in_orf_1,self.r_s_1,0.005)
for x in self.circ_x_1:
self.st1_in_top.append((self.st1_s_top-self.r_in_orf_1) + np.sqrt(abs(self.r_in_orf_1**2-(x-(self.r_s_1-self.r_in_orf_1))**2)))
self.st1_in_bottom.append((self.st1_s_top-self.r_in_orf_1) - np.sqrt(abs(self.r_in_orf_1**2-(x-(self.r_s_1-self.r_in_orf_1))**2)))
## pontos do fluido interno do st1
self.st1_fluid_n_bottom = self.fr.recess
self.st1_fluid_n_top = self.st1_n_bottom + self.l_n_1
self.st1_fluid_s_bottom = self.st1_n_top + (self.r_s_1-self.r_n_1)/np.tan(self.fr.trans_angle_1*np.pi/180)
self.st1_fluid_s_top = self.st1_s_bottom + self.l_s_1
self.st1_fluid_x = [self.r_mn_1, self.r_mn_1, self.r_mk_1, self.r_mk_1]
self.st1_fluid_y = [self.st1_fluid_n_bottom, self.st1_fluid_n_top, self.st1_fluid_s_bottom, self.st1_fluid_s_top]
## pontos do spray do st1
self.st1_spray_y_i = 0
self.st1_spray_y_f = -1*self.st1_s_top*1.1 - self.fr.recess
self.st1_spray_y_step = (self.st1_spray_y_f-self.st1_spray_y_i)/100
self.st1_spray_y = np.arange(self.st1_spray_y_i,self.st1_spray_y_f+self.st1_spray_y_step,self.st1_spray_y_step)
self.st1_spray_x = []
self.r_av_1 = (self.r_n_1 + self.r_mn_1)/2
for y in self.st1_spray_y:
self.st1_spray_x.append(self.r_av_1*np.sqrt(1+y**2/(self.r_av_1*np.tan((90-0.5*self.alpha2_eq_1)*np.pi/180))**2))
for i in range(len(self.st1_spray_y)):
self.st1_spray_y[i] = self.st1_spray_y[i] + self.fr.recess
## ST2
self.st2_n_bottom = 0
self.st2_n_top = self.st2_n_bottom + self.l_n_2
self.st2_s_bottom = self.st2_n_top + (self.r_s_2-self.r_n_2)/np.tan(self.fr.trans_angle_2*np.pi/180)
self.st2_s_top = self.st2_s_bottom + self.l_s_2
## pontos da estrutura do ST2
self.st2_x = [self.r_n_2, self.r_n_2, self.r_s_2, self.r_s_2, self.r_n_1+self.fr.t_w, self.r_n_1+self.fr.t_w, self.r_n_1]
self.st2_y = [self.st2_n_bottom, self.st2_n_top, self.st2_s_bottom, self.st2_s_top, self.st2_s_top, self.fr.recess, self.fr.recess]
## pontos do orifício tangencial do ST2
self.st2_in_top = []
self.st2_in_bottom = []
self.circ_x_2 = np.arange(self.r_s_2-2*self.r_in_orf_2,self.r_s_2,0.005)
for x in self.circ_x_2:
self.st2_in_top.append((self.st2_s_top-self.r_in_orf_2) + np.sqrt(abs(self.r_in_orf_2**2-(x-(self.r_s_2-self.r_in_orf_2))**2)))
self.st2_in_bottom.append((self.st2_s_top-self.r_in_orf_2) - np.sqrt(abs(self.r_in_orf_2**2-(x-(self.r_s_2-self.r_in_orf_2))**2)))
## pontos do fluido interno do ST2
self.st2_fluid_n_bottom = 0
self.st2_fluid_n_top = self.st2_fluid_n_bottom + self.l_n_2
self.st2_fluid_s_bottom = self.st2_fluid_n_top + (self.r_s_2-self.r_n_2)/np.tan(self.fr.trans_angle_2*np.pi/180)
self.st2_fluid_s_top = self.st2_fluid_s_bottom + self.l_s_2
self.st2_fluid_x = [self.r_mn_2, self.r_mn_2, self.r_mk_2, self.r_mk_2]
self.st2_fluid_y = [self.st2_fluid_n_bottom, self.st2_fluid_n_top, self.st2_fluid_s_bottom, self.st2_fluid_s_top]
## pontos do spray do ST2
self.st2_spray_y_i = 0
self.st2_spray_y_f = -1*self.st1_s_top*1.1
self.st2_spray_y_step = (self.st2_spray_y_f-self.st2_spray_y_i)/100
self.st2_spray_y = np.arange(self.st2_spray_y_i,self.st2_spray_y_f+self.st2_spray_y_step,self.st2_spray_y_step)
self.st2_spray_x = []
self.r_av_2 = (self.r_n_2 + self.r_mn_2)/2
for y in self.st2_spray_y:
self.st2_spray_x.append(self.r_av_2*np.sqrt(1+y**2/(self.r_av_2*np.tan((90-0.5*self.alpha2_eq_2)*np.pi/180))**2))
fig, ax6 = plt.subplots(dpi = self.dpi_value)
ax6.plot(self.st1_x,self.st1_y, linewidth = 1, color = '0.3', linestyle = '-')
ax6.plot(self.circ_x_1,self.st1_in_top, linewidth = 1, color = '0.3', linestyle = '-')
ax6.plot(self.circ_x_1,self.st1_in_bottom, linewidth = 1, color = '0.3', linestyle = '-')
ax6.plot(self.st1_fluid_x, self.st1_fluid_y, linewidth = 1, color = 'b', linestyle = '--')
ax6.plot(self.st1_spray_x, self.st1_spray_y, linewidth = 2, color = 'b', linestyle = '--')
ax6.plot(self.st2_x,self.st2_y, linewidth = 1, color = '0.3', linestyle = '-')
ax6.plot(self.circ_x_2,self.st2_in_top, linewidth = 1, color = '0.3', linestyle = '-')
ax6.plot(self.circ_x_2,self.st2_in_bottom, linewidth = 1, color = '0.3', linestyle = '-')
ax6.plot(self.st2_fluid_x, self.st2_fluid_y, linewidth = 1, color = 'y', linestyle = '--')
ax6.plot(self.st2_spray_x, self.st2_spray_y, linewidth = 2, color = 'y', linestyle = '--')
ax6.grid()
ax6.set_xlabel("[mm]")
ax6.set_ylabel("[mm]")
self.x_min = 0
self.x_max = 2*self.st1_s_top*1.1
self.y_min = -1*self.st1_s_top*1.1
self.y_max = self.st1_s_top*1.1
ax6.set_xlim(self.x_min, self.x_max)
ax6.set_ylim(self.y_min, self.y_max)
ax6.set_aspect("equal")
self.st1_legend = "ST1:\n R_n: %.2f mm\n R_in: %.2f mm\n R_s: %.2f mm\n r_in: %.2f mm\n n_in: %d \n recess: %.2f mm"%(self.r_n_1, self.r_in_pos_1, self.r_s_1, self.r_in_orf_1, self.fr.n_1, self.fr.recess)
self.st2_legend = "ST2:\n R_n: %.2f mm\n R_in: %.2f mm\n R_s: %.2f mm\n r_in: %.2f mm\n n_in: %d \n int_angle: %.1f°"%(self.r_n_2, self.r_in_pos_2, self.r_s_2, self.r_in_orf_2, self.fr.n_2, self.alpha2_eq_1/2 - self.alpha2_eq_2/2)
self.bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9)
ax6.text(max([self.r_s_1,self.r_s_2])*1.5, 0, self.st1_legend, fontsize=self.fontsize, ha="left", va="bottom", bbox=self.bbox_props)
ax6.text(max([self.r_s_1,self.r_s_2])*1.5+self.offset, 0, self.st2_legend, fontsize=self.fontsize, ha="left", va="bottom", bbox=self.bbox_props)
if(self.check_out == 1):
##### modified injector plot #####
## ST1
self.st1_n_bottom = self.fr.recess
self.st1_n_top = self.st1_n_bottom + self.l_n_1
self.st1_s_bottom = self.st1_n_top + (self.r_s_1-self.r_n_1)/np.tan(self.fr.trans_angle_1*np.pi/180)
self.st1_s_top = self.st1_s_bottom + self.l_s_1
self.st1_x = [self.r_n_1, self.r_n_1, self.r_s_1, self.r_s_1, 0]
self.st1_y = [self.st1_n_bottom, self.st1_n_top, self.st1_s_bottom, self.st1_s_top, self.st1_s_top]
## pontos do orifício tangencial do ST1
self.st1_in_top = []
self.st1_in_bottom = []
self.circ_x_1 = np.arange(self.r_s_1-2*self.r_in_orf_1,self.r_s_1,0.005)
for x in self.circ_x_1:
self.st1_in_top.append((self.st1_s_top-self.r_in_orf_1) + np.sqrt(abs(self.r_in_orf_1**2-(x-(self.r_s_1-self.r_in_orf_1))**2)))
self.st1_in_bottom.append((self.st1_s_top-self.r_in_orf_1) - np.sqrt(abs(self.r_in_orf_1**2-(x-(self.r_s_1-self.r_in_orf_1))**2)))
## pontos do fluido interno do st1
self.st1_fluid_n_bottom = self.fr.recess
self.st1_fluid_n_top = self.st1_n_bottom + self.l_n_1
self.st1_fluid_s_bottom = self.st1_n_top + (self.r_s_1-self.r_n_1)/np.tan(self.fr.trans_angle_1*np.pi/180)
self.st1_fluid_s_top = self.st1_s_bottom + self.l_s_1
self.st1_fluid_x = [self.r_mn_1, self.r_mn_1, self.r_mk_1, self.r_mk_1]
self.st1_fluid_y = [self.st1_fluid_n_bottom, self.st1_fluid_n_top, self.st1_fluid_s_bottom, self.st1_fluid_s_top]
## pontos do spray do st1
self.st1_spray_y_i = 0
self.st1_spray_y_f = -1*self.st1_s_top*1.1 - self.fr.recess
self.st1_spray_y_step = (self.st1_spray_y_f-self.st1_spray_y_i)/100
self.st1_spray_y = np.arange(self.st1_spray_y_i,self.st1_spray_y_f+self.st1_spray_y_step,self.st1_spray_y_step)
self.st1_spray_x = []
self.r_av_1 = (self.r_n_1 + self.r_mn_1)/2
for y in self.st1_spray_y:
self.st1_spray_x.append(self.r_av_1*np.sqrt(1+y**2/(self.r_av_1*np.tan((90-0.5*self.alpha2_eq_1)*np.pi/180))**2))
for i in range(len(self.st1_spray_y)):
self.st1_spray_y[i] = self.st1_spray_y[i] + self.fr.recess
## ST2
self.st2_out_bottom = 0
self.st2_out_top = self.l_out
self.st2_n_bottom = self.st2_out_top + (self.r_out-self.r_n_2)/np.tan(self.fr.trans_angle_out*np.pi/180)
self.st2_n_top = self.st2_n_bottom + self.l_n_2
self.st2_s_bottom = self.st2_n_top + (self.r_s_2-self.r_n_2)/np.tan(self.fr.trans_angle_2*np.pi/180)
self.st2_s_top = self.st2_s_bottom + self.l_s_2
## pontos da estrutura do ST2
self.st2_x = [self.r_out, self.r_out, self.r_n_2, self.r_n_2, self.r_s_2, self.r_s_2, self.r_n_1+self.fr.t_w, self.r_n_1+self.fr.t_w, self.r_n_1]
self.st2_y = [self.st2_out_bottom, self.st2_out_top, self.st2_n_bottom, self.st2_n_top, self.st2_s_bottom, self.st2_s_top, self.st2_s_top, self.fr.recess, self.fr.recess]
## pontos do orifício tangencial do ST2
self.st2_in_top = []
self.st2_in_bottom = []
self.circ_x_2 = np.arange(self.r_s_2-2*self.r_in_orf_2,self.r_s_2,0.005)
for x in self.circ_x_2:
self.st2_in_top.append((self.st2_s_top-self.r_in_orf_2) + np.sqrt(abs(self.r_in_orf_2**2-(x-(self.r_s_2-self.r_in_orf_2))**2)))
self.st2_in_bottom.append((self.st2_s_top-self.r_in_orf_2) - np.sqrt(abs(self.r_in_orf_2**2-(x-(self.r_s_2-self.r_in_orf_2))**2)))
## pontos do fluido interno do ST2
self.st2_fluid_out_bottom = 0
self.st2_fluid_out_top = self.l_out
self.st2_fluid_n_bottom = self.st2_out_top + (self.r_out-self.r_n_2)/np.tan(self.fr.trans_angle_out*np.pi/180)
self.st2_fluid_n_top = self.st2_n_bottom + self.l_n_2
self.st2_fluid_s_bottom = self.st2_n_top + (self.r_s_2-self.r_n_2)/np.tan(self.fr.trans_angle_2*np.pi/180)
self.st2_fluid_s_top = self.st2_s_bottom + self.l_s_2
self.st2_fluid_x = [self.r_mn_out, self.r_mn_out, self.r_mn_2, self.r_mn_2, self.r_mk_2, self.r_mk_2]
self.st2_fluid_y = [self.st2_fluid_out_bottom, self.st2_fluid_out_top, self.st2_fluid_n_bottom, self.st2_fluid_n_top, self.st2_fluid_s_bottom, self.st2_fluid_s_top]
## pontos do spray do ST2
self.st2_spray_y_i = 0
self.st2_spray_y_f = -1*self.st1_s_top*1.1
self.st2_spray_y_step = (self.st2_spray_y_f-self.st2_spray_y_i)/100
self.st2_spray_y = np.arange(self.st2_spray_y_i,self.st2_spray_y_f+self.st2_spray_y_step,self.st2_spray_y_step)
self.st2_spray_x = []
self.r_av_2 = (self.r_out + self.r_mn_out)/2
for y in self.st2_spray_y:
self.st2_spray_x.append(self.r_av_2*np.sqrt(1+y**2/(self.r_av_2*np.tan((90-0.5*self.alpha2_goal)*np.pi/180))**2))
fig, ax7 = plt.subplots(dpi = self.dpi_value)
ax7.plot(self.st1_x,self.st1_y, linewidth = 1, color = '0.3', linestyle = '-')
ax7.plot(self.circ_x_1,self.st1_in_top, linewidth = 1, color = '0.3', linestyle = '-')
ax7.plot(self.circ_x_1,self.st1_in_bottom, linewidth = 1, color = '0.3', linestyle = '-')
ax7.plot(self.st1_fluid_x, self.st1_fluid_y, linewidth = 1, color = 'b', linestyle = '--')
ax7.plot(self.st1_spray_x, self.st1_spray_y, linewidth = 2, color = 'b', linestyle = '--')
ax7.plot(self.st2_x,self.st2_y, linewidth = 1, color = '0.3', linestyle = '-')
ax7.plot(self.circ_x_2,self.st2_in_top, linewidth = 1, color = '0.3', linestyle = '-')
ax7.plot(self.circ_x_2,self.st2_in_bottom, linewidth = 1, color = '0.3', linestyle = '-')
ax7.plot(self.st2_fluid_x, self.st2_fluid_y, linewidth = 1, color = 'y', linestyle = '--')
ax7.plot(self.st2_spray_x, self.st2_spray_y, linewidth = 2, color = 'y', linestyle = '--')
ax7.grid()
ax7.set_xlabel("[mm]")
ax7.set_ylabel("[mm]")
self.x_min = 0
self.x_max = 2*self.st1_s_top*1.1
self.y_min = -1*self.st1_s_top*1.1
self.y_max = self.st1_s_top*1.1
ax7.set_xlim(self.x_min, self.x_max)
ax7.set_ylim(self.y_min, self.y_max)
ax7.set_aspect("equal")
self.st1_legend = "ST1:\n R_n: %.2f mm\n R_in: %.2f mm\n R_s: %.2f mm\n r_in: %.2f mm\n n_in: %d \n recess: %.2f mm"%(self.r_n_1, self.r_in_pos_1, self.r_s_1, self.r_in_orf_1, self.fr.n_1, self.fr.recess)
self.st2_legend = "ST2:\n R_n: %.2f mm\n R_in: %.2f mm\n R_s: %.2f mm\n r_in: %.2f mm\n n_in: %d \n R_out: %.2f mm"%(self.r_n_2, self.r_in_pos_2, self.r_s_2, self.r_in_orf_2, self.fr.n_2, self.r_out)
self.bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9)
ax7.text(max([self.r_s_1,self.r_s_2])*1.5, 0, self.st1_legend, fontsize=self.fontsize, ha="left", va="bottom", bbox=self.bbox_props)
ax7.text(max([self.r_s_1,self.r_s_2])*1.5+self.offset, 0, self.st2_legend, fontsize=self.fontsize, ha="left", va="bottom", bbox=self.bbox_props)
elif(self.check_out == -1):
print("\n\nPlot: Modified injector not plotted\n\t(Angle modification not needed)")
elif(self.check_out == 0):
print("\n\nPlot: injector not plotted\n\t(Hydraulic Independance FAIL!)")
elif(self.sh_plot_inj == 0):
print("\n\nPlot: injector not plotted\n\t(Disabled by user)")
else:
print("\n\nPlot: injector not plotted\n\t(Unknow Error)")
elif(self.sh_plot_inj == 1):
print("Plot: configuration not recognized")
|
{"hexsha": "04821209fb94dfd2c60caad3c7a6b2b754509223", "size": 49623, "ext": "py", "lang": "Python", "max_stars_repo_path": "M1/M1_Method_v2.py", "max_stars_repo_name": "alexandrecg/SID", "max_stars_repo_head_hexsha": "2da81a2e18d4faa329e5c722b4b544212cf3530a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "M1/M1_Method_v2.py", "max_issues_repo_name": "alexandrecg/SID", "max_issues_repo_head_hexsha": "2da81a2e18d4faa329e5c722b4b544212cf3530a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "M1/M1_Method_v2.py", "max_forks_repo_name": "alexandrecg/SID", "max_forks_repo_head_hexsha": "2da81a2e18d4faa329e5c722b4b544212cf3530a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 49.5239520958, "max_line_length": 259, "alphanum_fraction": 0.5015214719, "include": true, "reason": "import numpy", "num_tokens": 13662}
|
# coding: utf-8
"""
description:
Scikit-learn compatible implementation of the Gibberish detector
based on https://github.com/rrenaud/Gibberish-Detector
original author: rrenaud@github
author: Suraj Iyer
"""
__all__ = ['GibberishDetectorClassifier']
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_is_fitted
import numpy as np
from typing import Iterable, Any
class GibberishDetectorClassifier(BaseEstimator, ClassifierMixin):
def __init__(
self, accepted_chars: str = 'abcdefghijklmnopqrstuvwxyz ',
smoothing_factor: int = 10):
self.accepted_chars = accepted_chars
self.smoothing_factor = smoothing_factor
def set_params(self, **params):
return super(GibberishDetectorClassifier, self).set_params(**params)
@property
def accepted_chars(self):
return self._accepted_chars
@accepted_chars.setter
def accepted_chars(self, value: str):
self._accepted_chars = value
self._pos = dict([(char, idx) for idx, char in enumerate(value)])
def _normalize(self, line: str) -> list:
""" Return only the subset of chars from accepted_chars.
This helps keep the model relatively small by ignoring punctuation, infrequent symbols, etc. """
return [c.lower() for c in line if c.lower() in self.accepted_chars]
def _ngram(self, n: int, line: str) -> Iterable[str]:
""" Return all n grams from line after normalizing """
filtered = self._normalize(line)
for start in range(0, len(filtered) - n + 1):
yield ''.join(filtered[start:start + n])
def fit(self, X: Iterable[str], y: Any = None):
""" Write a simple model as a pickle file """
k = len(self._accepted_chars)
# Assume we have seen `self.smoothing_factor` of each character pair.
# This acts as a kind of prior or smoothing factor. This way, if we see a
# character transition live that we've never observed in the past, we won't
# assume the entire string has 0 probability.
counts = [[self.smoothing_factor for i in range(k)] for i in range(k)]
# Count transitions between characters in lines from X, taken
# from http://norvig.com/spell-correct.html
for line in X:
for a, b in self._ngram(2, line):
counts[self._pos[a]][self._pos[b]] += 1
# _normalize the counts so that they become log probabilities.
# We use log probabilities rather than straight probabilities to avoid
# numeric underflow issues with long texts.
# This contains a justification:
# http://squarecog.wordpress.com/2009/01/10/dealing-with-underflow-in-joint-probability-calculations/
for i, row in enumerate(counts):
s = float(sum(row))
for j in range(len(row)):
row[j] = np.log(row[j] / s)
self._log_prob_mat = counts
return self
def _avg_transition_prob(self, line: str) -> float:
""" Return the average transition probability of line with the log probability matrix. """
log_prob = 0.0
transition_ct = 0
for a, b in self._ngram(2, line):
log_prob += self._log_prob_mat[self._pos[a]][self._pos[b]]
transition_ct += 1
# The exponentiation translates from log probability to regular probability.
return np.exp(log_prob / (transition_ct or 1))
def predict_proba(self, X: Iterable[str]) -> Iterable[float]:
check_is_fitted(self, '_log_prob_mat')
return np.array([self._avg_transition_prob(x) for x in X])
def predict(self, X: Iterable[str], threshold: float) -> Iterable[int]:
# if the transition probability is lower than threshold, its gibberish, i.e., return 1 else 0
return (self.predict_proba(X) < threshold) * 1
|
{"hexsha": "6cfe419d26b1fb196bc3e4d6d89f9b5bb4cc1a08", "size": 3920, "ext": "py", "lang": "Python", "max_stars_repo_path": "python_data_utils/nlp/gibberish_detector.py", "max_stars_repo_name": "surajiyer/python-data-utils", "max_stars_repo_head_hexsha": "d6e9bf81204a01545a3edb165c5724eb24f37c18", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-01-06T00:09:21.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T06:03:13.000Z", "max_issues_repo_path": "python_data_utils/nlp/gibberish_detector.py", "max_issues_repo_name": "surajiyer/python-data-utils", "max_issues_repo_head_hexsha": "d6e9bf81204a01545a3edb165c5724eb24f37c18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python_data_utils/nlp/gibberish_detector.py", "max_forks_repo_name": "surajiyer/python-data-utils", "max_forks_repo_head_hexsha": "d6e9bf81204a01545a3edb165c5724eb24f37c18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.595959596, "max_line_length": 109, "alphanum_fraction": 0.6568877551, "include": true, "reason": "import numpy", "num_tokens": 910}
|
#include <boost/test/unit_test.hpp>
#include <boost/algorithm/string/predicate.hpp>
#include <enumivo/testing/tester.hpp>
#include <enumivo/chain/abi_serializer.hpp>
#include <enumivo/chain/wasm_enumivo_constraints.hpp>
#include <enumivo/chain/resource_limits.hpp>
#include <enumivo/chain/exceptions.hpp>
#include <enumivo/chain/wast_to_wasm.hpp>
#include <enumivo/chain_plugin/chain_plugin.hpp>
#include <asserter/asserter.wast.hpp>
#include <asserter/asserter.abi.hpp>
#include <stltest/stltest.wast.hpp>
#include <stltest/stltest.abi.hpp>
#include <enu.system/enu.system.wast.hpp>
#include <enu.system/enu.system.abi.hpp>
#include <enu.token/enu.token.wast.hpp>
#include <enu.token/enu.token.abi.hpp>
#include <fc/io/fstream.hpp>
#include <Runtime/Runtime.h>
#include <fc/variant_object.hpp>
#include <fc/io/json.hpp>
#include <array>
#include <utility>
#ifdef NON_VALIDATING_TEST
#define TESTER tester
#else
#define TESTER validating_tester
#endif
using namespace enumivo;
using namespace enumivo::chain;
using namespace enumivo::testing;
using namespace fc;
BOOST_AUTO_TEST_SUITE(get_table_tests)
BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try {
produce_blocks(2);
create_accounts({ N(enu.token), N(enu.ram), N(enu.ramfee), N(enu.stake),
N(enu.blockpay), N(enu.votepay), N(enu.savings), N(enu.names) });
std::vector<account_name> accs{N(inita), N(initb), N(initc), N(initd)};
create_accounts(accs);
produce_block();
set_code( N(enu.token), enu_token_wast );
set_abi( N(enu.token), enu_token_abi );
produce_blocks(1);
// create currency
auto act = mutable_variant_object()
("issuer", "enumivo")
("maximum_supply", enumivo::chain::asset::from_string("1000000000.0000 SYS"));
push_action(N(enu.token), N(create), N(enu.token), act );
// issue
for (account_name a: accs) {
push_action( N(enu.token), N(issue), "enumivo", mutable_variant_object()
("to", name(a) )
("quantity", enumivo::chain::asset::from_string("999.0000 SYS") )
("memo", "")
);
}
produce_blocks(1);
// iterate over scope
enumivo::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX));
enumivo::chain_apis::read_only::get_table_by_scope_params param{N(enu.token), N(accounts), "inita", "", 10};
enumivo::chain_apis::read_only::get_table_by_scope_result result = plugin.read_only::get_table_by_scope(param);
BOOST_REQUIRE_EQUAL(4, result.rows.size());
BOOST_REQUIRE_EQUAL("", result.more);
if (result.rows.size() >= 4) {
BOOST_REQUIRE_EQUAL(name(N(enu.token)), result.rows[0].code);
BOOST_REQUIRE_EQUAL(name(N(inita)), result.rows[0].scope);
BOOST_REQUIRE_EQUAL(name(N(accounts)), result.rows[0].table);
BOOST_REQUIRE_EQUAL(name(N(enumivo)), result.rows[0].payer);
BOOST_REQUIRE_EQUAL(1, result.rows[0].count);
BOOST_REQUIRE_EQUAL(name(N(initb)), result.rows[1].scope);
BOOST_REQUIRE_EQUAL(name(N(initc)), result.rows[2].scope);
BOOST_REQUIRE_EQUAL(name(N(initd)), result.rows[3].scope);
}
param.lower_bound = "initb";
param.upper_bound = "initd";
result = plugin.read_only::get_table_by_scope(param);
BOOST_REQUIRE_EQUAL(2, result.rows.size());
BOOST_REQUIRE_EQUAL("", result.more);
if (result.rows.size() >= 2) {
BOOST_REQUIRE_EQUAL(name(N(initb)), result.rows[0].scope);
BOOST_REQUIRE_EQUAL(name(N(initc)), result.rows[1].scope);
}
param.limit = 1;
result = plugin.read_only::get_table_by_scope(param);
BOOST_REQUIRE_EQUAL(1, result.rows.size());
BOOST_REQUIRE_EQUAL("initc", result.more);
param.table = name(0);
result = plugin.read_only::get_table_by_scope(param);
BOOST_REQUIRE_EQUAL(1, result.rows.size());
BOOST_REQUIRE_EQUAL("initc", result.more);
param.table = N(invalid);
result = plugin.read_only::get_table_by_scope(param);
BOOST_REQUIRE_EQUAL(0, result.rows.size());
BOOST_REQUIRE_EQUAL("", result.more);
} FC_LOG_AND_RETHROW() /// get_scope_test
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "b77afc716b6289919f6bb2435b85dfe690dc17be", "size": 4117, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/get_table_tests.cpp", "max_stars_repo_name": "dvlaicu/enumivo", "max_stars_repo_head_hexsha": "f347de9dde0571b55e3ef2b624377e49a30c6f4e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/get_table_tests.cpp", "max_issues_repo_name": "dvlaicu/enumivo", "max_issues_repo_head_hexsha": "f347de9dde0571b55e3ef2b624377e49a30c6f4e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/get_table_tests.cpp", "max_forks_repo_name": "dvlaicu/enumivo", "max_forks_repo_head_hexsha": "f347de9dde0571b55e3ef2b624377e49a30c6f4e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.936, "max_line_length": 114, "alphanum_fraction": 0.698809813, "num_tokens": 1062}
|
sleep <- function(time=1)
{
message("Sleeping...")
flush.console()
Sys.sleep(time)
message("Awake!")
}
sleep()
|
{"hexsha": "12ba992a0db59d172d35b848b78196d4fffff1d5", "size": 124, "ext": "r", "lang": "R", "max_stars_repo_path": "Task/Sleep/R/sleep.r", "max_stars_repo_name": "LaudateCorpus1/RosettaCodeData", "max_stars_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_stars_repo_licenses": ["Info-ZIP"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-11-09T22:08:38.000Z", "max_stars_repo_stars_event_max_datetime": "2018-11-09T22:08:38.000Z", "max_issues_repo_path": "Task/Sleep/R/sleep.r", "max_issues_repo_name": "seanwallawalla-forks/RosettaCodeData", "max_issues_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_issues_repo_licenses": ["Info-ZIP"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Task/Sleep/R/sleep.r", "max_forks_repo_name": "seanwallawalla-forks/RosettaCodeData", "max_forks_repo_head_hexsha": "9ad63ea473a958506c041077f1d810c0c7c8c18d", "max_forks_repo_licenses": ["Info-ZIP"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-11-09T22:08:40.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-09T22:08:40.000Z", "avg_line_length": 12.4, "max_line_length": 25, "alphanum_fraction": 0.5967741935, "num_tokens": 32}
|
From Heapster Require Import
Permissions
PermissionsSpred2.
From Coq Require Import
Classes.RelationClasses.
Section PermSet.
Context {config : Type}.
Record Perms2 :=
{
in_Perms2 : forall {spred}, @perm { x | spred x } -> Prop;
(* Perms_upwards_closed1 : forall (spred1 spred2 : config -> Prop) *)
(* (Hspred : forall x, spred1 x -> spred2 x) *)
(* (p1 : @perm {x | spred1 x}) (p2 : @perm {x | spred2 x}), *)
(* in_Perms2 p1 -> *)
(* hlte_perm1 config spred1 spred2 Hspred p1 p2 -> *)
(* in_Perms2 p2 (* p2 has bigger spred *); *)
Perms_upwards_closed2 : forall (spred1 spred2 : config -> Prop)
(Hspred : forall x, spred1 x -> spred2 x)
(p1 : @perm {x | spred2 x}) (p2 : @perm {x | spred1 x}),
in_Perms2 p1 ->
hlte_perm2 config spred1 spred2 Hspred p1 p2 ->
in_Perms2 p2 (* p2 has smaller spred *)
}.
Notation "p ∈2 P" := (in_Perms2 P p) (at level 60).
Definition lte_Perms2 (P Q : Perms2) : Prop :=
forall spred (p : @perm {x | spred x}), p ∈2 Q -> p ∈2 P.
Notation "P ⊑2 Q" := (lte_Perms2 P Q) (at level 60).
Program Definition top_Perms2 : Perms2 :=
{|
in_Perms2 := fun _ _ => False
|}.
Program Definition bottom_Perms2 : Perms2 :=
{|
in_Perms2 := fun _ _ => True
|}.
Program Definition sep_conj_Perms2 (P Q : Perms2) : Perms2 :=
{|
in_Perms2 := fun spred r =>
exists spred' Hspred (p q : @perm {x | spred' x}),
in_Perms2 P p /\
in_Perms2 Q q /\
hlte_perm2 config spred spred' Hspred (p ** q) r
|}.
Next Obligation.
rename H into spred', H1 into Hspred', H2 into p, H3 into q.
exists spred'. eexists. Unshelve.
2: { intros. auto. }
exists p, q. split; [| split]; auto. eapply hlte_perm2_transitive; eauto.
Qed.
End PermSet.
|
{"author": "GaloisInc", "repo": "heapster-formalization", "sha": "1c5ed40a556a2b45f8f137ba475eb4bca76dcc70", "save_path": "github-repos/coq/GaloisInc-heapster-formalization", "path": "github-repos/coq/GaloisInc-heapster-formalization/heapster-formalization-1c5ed40a556a2b45f8f137ba475eb4bca76dcc70/src/PermS.v"}
|
import unittest
import os
from shutil import rmtree
import numpy as np
import nifty
import nifty.graph.rag as nrag
class TestAccumulateStacked(unittest.TestCase):
shape = (10, 256, 256)
# shape = (3, 128, 128)
@staticmethod
def make_labels(shape):
labels = np.zeros(shape, dtype='uint32')
label = 0
for z in range(shape[0]):
for y in range(shape[1]):
for x in range(shape[2]):
labels[z, y, x] = label
if np.random.random() > .95:
have_increased = True
label += 1
else:
have_increased = False
if not have_increased:
label += 1
return labels
def setUp(self):
self.data = np.random.random(size=self.shape).astype('float32')
self.labels = self.make_labels(self.shape)
self.n_labels = self.labels.max() + 1
self.tmp_dir = './tmp'
if not os.path.exists(self.tmp_dir):
os.mkdir(self.tmp_dir)
def tearDown(self):
if os.path.exists(self.tmp_dir):
rmtree(self.tmp_dir)
def check_features(self, features, expected_length):
self.assertEqual(len(features), expected_length)
for feat_id in range(features.shape[1]):
self.assertFalse(np.allclose(features[:, feat_id], 0.))
def accumulation_in_core_test(self, accumulation_function):
rag = nrag.gridRagStacked2D(self.labels,
numberOfLabels=self.n_labels,
numberOfThreads=-1)
n_edges_xy = rag.totalNumberOfInSliceEdges
n_edges_z = rag.totalNumberOfInBetweenSliceEdges
# test complete accumulation
# print("Complete Accumulation ...")
feats_xy, feats_z = accumulation_function(rag,
self.data,
numberOfThreads=-1)
self.check_features(feats_xy, n_edges_xy)
self.check_features(feats_z, n_edges_z)
# print("... passed")
# test xy-feature accumulation
# print("Complete XY Accumulation ...")
feats_xy, feats_z = accumulation_function(rag,
self.data,
keepXYOnly=True,
numberOfThreads=-1)
self.check_features(feats_xy, n_edges_xy)
self.assertEqual(len(feats_z), 1)
# print("... passed")
# test z-feature accumulation for all 3 directions
# print("Complete Z Accumulations ...")
for z_direction in (0, 1, 2):
feats_xy, feats_z = accumulation_function(rag,
self.data,
keepZOnly=True,
zDirection=z_direction,
numberOfThreads=-1)
self.assertEqual(len(feats_xy), 1)
self.check_features(feats_z, n_edges_z)
# print("... passed")
def test_standard_features_in_core(self):
self.accumulation_in_core_test(nrag.accumulateEdgeStandardFeatures)
@unittest.skipUnless(nifty.Configuration.WITH_FASTFILTERS, "skipping fastfilter tests")
def test_features_from_filters_in_core(self):
self.accumulation_in_core_test(nrag.accumulateEdgeFeaturesFromFilters)
def accumulation_z5_test(self, accumulation_function, n_feats):
import z5py
import nifty.z5
label_path = os.path.join(self.tmp_dir, 'labels.n5')
f_labels = z5py.File(label_path, use_zarr_format=False)
dsl = f_labels.create_dataset('data',
dtype='uint32',
shape=self.shape,
chunks=(1, 25, 25),
compression='raw')
dsl[:] = self.labels
rag = nrag.gridRagStacked2DZ5(nifty.z5.datasetWrapper('uint32',
os.path.join(label_path, 'data')),
numberOfLabels=self.n_labels,
numberOfThreads=1)
n_edges_xy = rag.totalNumberOfInSliceEdges
n_edges_z = rag.totalNumberOfInBetweenSliceEdges
data_path = os.path.join(self.tmp_dir, 'data.n5')
f_data = z5py.File(data_path, use_zarr_format=False)
dsd = f_data.create_dataset('data',
dtype='float32',
shape=self.shape,
chunks=(1, 25, 25),
compression='raw')
dsd[:] = self.data
def open_features(keep_xy=False, keep_z=False):
p_xy = os.path.join(self.tmp_dir, 'xy.n5')
p_z = os.path.join(self.tmp_dir, 'z.n5')
f_xy = z5py.File(p_xy, use_zarr_format=False)
f_z = z5py.File(p_z, use_zarr_format=False)
f_xy.create_dataset('data',
dtype='float32',
shape=(1 if keep_z else n_edges_xy, n_feats),
chunks=(1 if keep_z else 500, n_feats),
compression='raw')
f_z.create_dataset('data',
dtype='float32',
shape=(1 if keep_xy else n_edges_z, n_feats),
chunks=(1 if keep_xy else 500, n_feats),
compression='raw')
return p_xy, p_z
def load_features(p_xy, p_z):
xy_feats = z5py.File(p_xy)['data'][:]
z_feats = z5py.File(p_z)['data'][:]
rmtree(p_xy)
rmtree(p_z)
return xy_feats, z_feats
path_xy, path_z = open_features()
# test complete accumulation
print("Complete Accumulation ...")
accumulation_function(rag,
nifty.z5.datasetWrapper('float32',
os.path.join(data_path, 'data')),
nifty.z5.datasetWrapper('float32',
os.path.join(path_xy, 'data')),
nifty.z5.datasetWrapper('float32',
os.path.join(path_z, 'data')),
numberOfThreads=1)
feats_xy, feats_z = load_features(path_xy, path_z)
self.check_features(feats_xy, n_edges_xy)
self.check_features(feats_z, n_edges_z)
print("... passed")
# test xy-feature accumulation
print("Complete XY Accumulation ...")
path_xy, path_z = open_features(keep_xy=True)
accumulation_function(rag,
nifty.z5.datasetWrapper('float32',
os.path.join(data_path, 'data')),
nifty.z5.datasetWrapper('float32',
os.path.join(path_xy, 'data')),
nifty.z5.datasetWrapper('float32',
os.path.join(path_z, 'data')),
keepXYOnly=True,
numberOfThreads=-1)
feats_xy, feats_z = load_features(path_xy, path_z)
self.check_features(feats_xy, n_edges_xy)
self.assertEqual(len(feats_z), 1)
print("... passed")
# test z-feature accumulation for all 3 directions
print("Complete Z Accumulations ...")
for z_direction in (0, 1, 2):
path_xy, path_z = open_features(keep_z=True)
accumulation_function(rag,
nifty.z5.datasetWrapper('float32',
os.path.join(data_path, 'data')),
nifty.z5.datasetWrapper('float32',
os.path.join(path_xy, 'data')),
nifty.z5.datasetWrapper('float32',
os.path.join(path_z, 'data')),
keepZOnly=True,
numberOfThreads=-1)
feats_xy, feats_z = load_features(path_xy, path_z)
self.assertEqual(len(feats_xy), 1)
self.check_features(feats_z, n_edges_z)
print("... passed")
@unittest.skipUnless(nifty.Configuration.WITH_Z5, "skipping z5 tests")
def test_z5_standard_features(self):
self.accumulation_z5_test(nrag.accumulateEdgeStandardFeatures, n_feats=9)
@unittest.skipUnless(nifty.Configuration.WITH_Z5 and nifty.Configuration.WITH_FASTFILTERS,
"skipping z5 fastfilter tests")
def test_z5_features_from_filters(self):
self.accumulation_z5_test(nrag.accumulateEdgeFeaturesFromFilters, n_feats=9 * 12)
@unittest.skipUnless(nifty.Configuration.WITH_Z5, "skipping z5 tests")
def test_in_vs_out_of_core(self):
accumulation_function = nrag.accumulateEdgeStandardFeatures
n_feats = 9
import z5py
import nifty.z5
###############
# get features with out of core calculation
label_path = os.path.join(self.tmp_dir, 'labels.n5')
f_labels = z5py.File(label_path, use_zarr_format=False)
dsl = f_labels.create_dataset('data',
dtype='uint32',
shape=self.shape,
chunks=(1, 25, 25),
compression='raw')
dsl[:] = self.labels
rag_ooc = nrag.gridRagStacked2DZ5(nifty.z5.datasetWrapper('uint32',
os.path.join(label_path,
'data')),
numberOfLabels=self.n_labels,
numberOfThreads=1)
data_path = os.path.join(self.tmp_dir, 'data.n5')
f_data = z5py.File(data_path, use_zarr_format=False)
dsd = f_data.create_dataset('data',
dtype='float32',
shape=self.shape,
chunks=(1, 25, 25),
compression='raw')
dsd[:] = self.data
n_edges_xy = rag_ooc.totalNumberOfInSliceEdges
n_edges_z = rag_ooc.totalNumberOfInBetweenSliceEdges
def open_features(keep_xy=False, keep_z=False):
p_xy = os.path.join(self.tmp_dir, 'xy.n5')
p_z = os.path.join(self.tmp_dir, 'z.n5')
f_xy = z5py.File(p_xy, use_zarr_format=False)
f_z = z5py.File(p_z, use_zarr_format=False)
f_xy.create_dataset('data',
dtype='float32',
shape=(1 if keep_z else n_edges_xy, n_feats),
chunks=(1 if keep_z else 500, n_feats),
compression='raw')
f_z.create_dataset('data',
dtype='float32',
shape=(1 if keep_xy else n_edges_z, n_feats),
chunks=(1 if keep_xy else 500, n_feats),
compression='raw')
return p_xy, p_z
def load_features(p_xy, p_z):
xy_feats = z5py.File(p_xy)['data'][:]
z_feats = z5py.File(p_z)['data'][:]
rmtree(p_xy)
rmtree(p_z)
return xy_feats, z_feats
path_xy, path_z = open_features()
# test complete accumulation
accumulation_function(rag_ooc,
nifty.z5.datasetWrapper('float32',
os.path.join(data_path, 'data')),
nifty.z5.datasetWrapper('float32',
os.path.join(path_xy, 'data')),
nifty.z5.datasetWrapper('float32',
os.path.join(path_z, 'data')),
numberOfThreads=-11)
feats_xy_ooc, feats_z_ooc = load_features(path_xy, path_z)
###############
# get features with in core calculation
rag = nrag.gridRagStacked2D(self.labels,
numberOfLabels=self.n_labels,
numberOfThreads=1)
# test complete accumulation
feats_xy, feats_z = accumulation_function(rag,
self.data,
numberOfThreads=1)
self.assertEqual(feats_xy.shape, feats_xy_ooc.shape)
self.assertTrue(np.allclose(feats_xy, feats_xy_ooc))
self.assertEqual(feats_z.shape, feats_z_ooc.shape)
self.assertTrue(np.allclose(feats_z, feats_z_ooc))
@staticmethod
def make_labels_with_ignore(shape):
labels = np.zeros(shape, dtype='uint32')
mask = np.random.choice([0, 1], size=shape, p=[1. / 10., 9. / 10.]).astype('bool')
label = 0
for z in range(shape[0]):
for y in range(shape[1]):
for x in range(shape[2]):
if not mask[z, y, x]:
continue
labels[z, y, x] = label
if np.random.random() > .95:
have_increased = True
label += 1
else:
have_increased = False
if not have_increased:
label += 1
return labels
def ignore_label_test_test(self, accumulation_function):
labels_with_ignore = self.make_labels_with_ignore(self.shape)
rag = nrag.gridRagStacked2D(labels_with_ignore,
numberOfLabels=labels_with_ignore.max() + 1,
ignoreLabel=0,
numberOfThreads=1)
n_edges_xy = rag.totalNumberOfInSliceEdges
n_edges_z = rag.totalNumberOfInBetweenSliceEdges
# test complete accumulation
print("Complete Accumulation ...")
feats_xy, feats_z = accumulation_function(rag,
self.data,
numberOfThreads=1)
self.check_features(feats_xy, n_edges_xy)
self.check_features(feats_z, n_edges_z)
print("... passed")
# test xy-feature accumulation
print("Complete XY Accumulation ...")
feats_xy, feats_z = accumulation_function(rag,
self.data,
keepXYOnly=True,
numberOfThreads=-1)
self.check_features(feats_xy, n_edges_xy)
self.assertEqual(len(feats_z), 1)
print("... passed")
# test z-feature accumulation for all 3 directions
print("Complete Z Accumulations ...")
for z_direction in (0, 1, 2):
feats_xy, feats_z = accumulation_function(rag,
self.data,
keepZOnly=True,
zDirection=z_direction,
numberOfThreads=-1)
self.assertEqual(len(feats_xy), 1)
self.check_features(feats_z, n_edges_z)
print("... passed")
def test_standard_features_ignore(self):
self.ignore_label_test_test(nrag.accumulateEdgeStandardFeatures)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "6439e4c49ee906cd7c0f4e67a756f1e3ec572bd3", "size": 16326, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/python/test/graph/rag/test_accumulate_stacked.py", "max_stars_repo_name": "konopczynski/nifty", "max_stars_repo_head_hexsha": "dc02ac60febaabfaf9b2ee5a854bb61436ebdc97", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/python/test/graph/rag/test_accumulate_stacked.py", "max_issues_repo_name": "konopczynski/nifty", "max_issues_repo_head_hexsha": "dc02ac60febaabfaf9b2ee5a854bb61436ebdc97", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/python/test/graph/rag/test_accumulate_stacked.py", "max_forks_repo_name": "konopczynski/nifty", "max_forks_repo_head_hexsha": "dc02ac60febaabfaf9b2ee5a854bb61436ebdc97", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.606557377, "max_line_length": 96, "alphanum_fraction": 0.4885458777, "include": true, "reason": "import numpy", "num_tokens": 3115}
|
function visualize(states::AbstractArray, states2::AbstractArray; statenames = string.(1:length(states)), statenames2 = string.(1:length(states2)), aspect = false, resolution = (2880, 1080), statistics = false, title = "Field = ", title2 = "Field = ", units1 = ["" for i in eachindex(states)], units2 = ["" for i in eachindex(states)])
# Create scene
scene, layout = layoutscene(resolution = resolution)
lscene = layout[2:4, 2:4] = LScene(scene)
lscene2 = layout[2:4, 5:7] = LScene(scene)
width = round(Int, resolution[1] / 6) # make menu 1/4 of preliminary resolution
# Create choices and nodes
stateindex = collect(1:length(states))
statenode = Node(stateindex[1])
colorchoices = [:balance, :thermal, :dense, :deep, :curl, :thermometer]
colornode = Node(colorchoices[1])
stateindex2 = collect(1:length(states2))
statenode2 = Node(stateindex2[1])
colorchoices2 = [:balance, :thermal, :dense, :deep, :curl, :thermometer]
colornode2 = Node(colorchoices2[1])
if statistics
llscene = layout[4,1] = Axis(scene, xlabel = @lift(statenames[$statenode] * units1[$statenode]),
xlabelcolor = :black, ylabel = "pdf",
ylabelcolor = :black, xlabelsize = 40, ylabelsize = 40,
xticklabelsize = 25, yticklabelsize = 25,
xtickcolor = :black, ytickcolor = :black,
xticklabelcolor = :black, yticklabelcolor = :black)
layout[3, 1] = Label(scene, "Statistics", width = width, textsize = 50)
llscene2 = layout[4,8] = Axis(scene, xlabel = @lift(statenames2[$statenode2] * units2[$statenode2]),
xlabelcolor = :black, ylabel = "pdf",
ylabelcolor = :black, xlabelsize = 40, ylabelsize = 40,
xticklabelsize = 25, yticklabelsize = 25,
xtickcolor = :black, ytickcolor = :black,
xticklabelcolor = :black, yticklabelcolor = :black)
layout[3, 8] = Label(scene, "Statistics", width = width, textsize = 50)
end
# x,y,z are for determining the aspect ratio of the box
if (typeof(aspect) <: Tuple) & (length(aspect) == 3)
x, y, z = aspect
else
x, y, z = size(states[1])
end
# Clim sliders
upperclim_slider = Slider(scene, range = range(0, 1, length = 101), startvalue = 0.99)
upperclim_node = upperclim_slider.value
lowerclim_slider = Slider(scene, range = range(0, 1, length = 101), startvalue = 0.01)
lowerclim_node = lowerclim_slider.value
upperclim_slider2 = Slider(scene, range = range(0, 1, length = 101), startvalue = 0.99)
upperclim_node2 = upperclim_slider2.value
lowerclim_slider2 = Slider(scene, range = range(0, 1, length = 101), startvalue = 0.01)
lowerclim_node2 = lowerclim_slider2.value
# Lift Nodes
state = @lift(states[$statenode])
statename = @lift(statenames[$statenode])
clims = @lift((quantile($state[:], $lowerclim_node) , quantile($state[:], $upperclim_node)))
cmap_rgb = @lift(to_colormap($colornode))
titlename = @lift(title * $statename) # use padding and appropriate centering
state2 = @lift(states2[$statenode2])
statename2 = @lift(statenames2[$statenode2])
clims2 = @lift((quantile($state2[:], $lowerclim_node2) , quantile($state2[:], $upperclim_node2)))
cmap_rgb2 = @lift(to_colormap($colornode2))
titlename2 = @lift(title2 * $statename2) # use padding and appropriate centering
# Statistics
if statistics
histogram_node = @lift(histogram($state, bins = 300))
xs = @lift($histogram_node[1])
ys = @lift($histogram_node[2])
pdf = GLMakie.AbstractPlotting.barplot!(llscene, xs, ys, color = :red,
strokecolor = :red,
strokewidth = 1)
@lift(GLMakie.AbstractPlotting.xlims!(llscene, extrema($state)))
@lift(GLMakie.AbstractPlotting.ylims!(llscene, extrema($histogram_node[2])))
vlines!(llscene, @lift($clims[1]), color = :black, linewidth = width / 100)
vlines!(llscene, @lift($clims[2]), color = :black, linewidth = width / 100)
# 2
histogram_node2 = @lift(histogram($state2, bins = 300))
xs2 = @lift($histogram_node2[1])
ys2 = @lift($histogram_node2[2])
pdf2 = GLMakie.AbstractPlotting.barplot!(llscene2, xs2, ys2, color = :red,
strokecolor = :red,
strokewidth = 1)
@lift(GLMakie.AbstractPlotting.xlims!(llscene2, extrema($state2)))
@lift(GLMakie.AbstractPlotting.ylims!(llscene2, extrema($histogram_node2[2])))
vlines!(llscene2, @lift($clims2[1]), color = :black, linewidth = width / 100)
vlines!(llscene2, @lift($clims2[2]), color = :black, linewidth = width / 100)
end
# Volume Plot
volume!(lscene, 0..x, 0..y, 0..z, state,
camera = cam3d!,
colormap = cmap_rgb,
colorrange = clims)
volume!(lscene2, 0..x, 0..y, 0..z, state2,
camera = cam3d!,
colormap = cmap_rgb2,
colorrange = clims2)
# Title
supertitle = layout[1, 2:4] = Label(scene, titlename , textsize = 50, color = :black)
supertitle2 = layout[1, 5:7] = Label(scene, titlename2 , textsize = 50, color = :black)
# Menus 1
statemenu = Menu(scene, options = zip(statenames, stateindex))
on(statemenu.selection) do s
statenode[] = s
end
colormenu = Menu(scene, options = zip(colorchoices, colorchoices))
on(colormenu.selection) do s
colornode[] = s
end
lowerclim_string = @lift("quantile = " * @sprintf("%0.2f", $lowerclim_node) * ", value = " * @sprintf("%0.1e", $clims[1]))
upperclim_string = @lift("quantile = " * @sprintf("%0.2f", $upperclim_node) * ", value = " * @sprintf("%0.1e", $clims[2]))
# depends on makie version, vbox for old, vgrid for new
layout[2, 1] = vgrid!(
Label(scene, "State", width = nothing),
statemenu,
Label(scene, "Color", width = nothing),
colormenu,
Label(scene, lowerclim_string, width = nothing),
lowerclim_slider,
Label(scene, upperclim_string, width = nothing),
upperclim_slider,
)
layout[1,1] = Label(scene, "Menu", width = width, textsize = 50)
# Menus 2
statemenu2 = Menu(scene, options = zip(statenames2, stateindex2))
on(statemenu2.selection) do s
statenode2[] = s
end
colormenu2 = Menu(scene, options = zip(colorchoices2, colorchoices2))
on(colormenu2.selection) do s
colornode2[] = s
end
lowerclim_string2 = @lift("quantile = " * @sprintf("%0.2f", $lowerclim_node2) * ", value = " * @sprintf("%0.1e", $clims2[1]))
upperclim_string2 = @lift("quantile = " * @sprintf("%0.2f", $upperclim_node2) * ", value = " * @sprintf("%0.1e", $clims2[2]))
# depends on makie version, vbox for old, vgrid for new
layout[2, 8] = vgrid!(
Label(scene, "State", width = nothing),
statemenu2,
Label(scene, "Color", width = nothing),
colormenu2,
Label(scene, lowerclim_string2, width = nothing),
lowerclim_slider2,
Label(scene, upperclim_string2, width = nothing),
upperclim_slider2,
)
layout[1,8] = Label(scene, "Menu", width = width, textsize = 50)
## Axis
# Modify Axis
axis = scene.children[1][OldAxis]
axis[:names][:axisnames] = ("↓ Zonal [m] ", "Meriodonal [m]↓ ", "Depth [m]↓ ")
axis[:names][:align] = ((:left, :center), (:right, :center), (:right, :center))
# need to adjust size of ticks first and then size of axis names
axis[:names][:textsize] = (50.0, 50.0, 50.0)
axis[:ticks][:textsize] = (00.0, 00.0, 00.0)
# axis[:ticks][:ranges_labels].val # current axis labels
xticks = collect(range(-0, aspect[1], length = 2))
yticks = collect(range(-0, aspect[2], length = 6))
zticks = collect(range(-0, aspect[3], length = 2))
ticks = (xticks, yticks, zticks)
axis[:ticks][:ranges] = ticks
xtickslabels = [@sprintf("%0.1f", (xtick)) for xtick in xticks]
xtickslabels[end] = "1e6"
ytickslabels = ["", "south","", "", "north", ""]
ztickslabels = [@sprintf("%0.1f", (xtick)) for xtick in xticks]
labels = (xtickslabels, ytickslabels, ztickslabels)
axis[:ticks][:labels] = labels
# Modify Axis
axis = scene.children[2][OldAxis]
axis[:names][:axisnames] = ("↓ Zonal [m] ", "Meriodonal [m]↓ ", "Depth [m]↓ ")
axis[:names][:align] = ((:left, :center), (:right, :center), (:right, :center))
# need to adjust size of ticks first and then size of axis names
axis[:names][:textsize] = (50.0, 50.0, 50.0)
axis[:ticks][:textsize] = (00.0, 00.0, 00.0)
# axis[:ticks][:ranges_labels].val # current axis labels
xticks = collect(range(-0, aspect[1], length = 2))
yticks = collect(range(-0, aspect[2], length = 6))
zticks = collect(range(-0, aspect[3], length = 2))
ticks = (xticks, yticks, zticks)
axis[:ticks][:ranges] = ticks
xtickslabels = [@sprintf("%0.1f", (xtick)) for xtick in xticks]
xtickslabels[end] = "1e6"
ytickslabels = ["", "south","", "", "north", ""]
ztickslabels = [@sprintf("%0.1f", (xtick)) for xtick in xticks]
labels = (xtickslabels, ytickslabels, ztickslabels)
axis[:ticks][:labels] = labels
display(scene)
return scene
end
function grabtitle(filename)
resolutionnumber = parse(Int64, split(filename, "_")[end-2])
return string(round(Int, 1000 * 16 / 192 / resolutionnumber)) * " km resolution, "
end
|
{"hexsha": "ba6d7b198d1317453eb347bfbc9719f2a32e84e5", "size": 9656, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/compare.jl", "max_stars_repo_name": "sandreza/Mesoscale", "max_stars_repo_head_hexsha": "cd3f00d9697ca3c97a51a5bf617edeb3a69e4178", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-06T18:27:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T18:27:53.000Z", "max_issues_repo_path": "scripts/compare.jl", "max_issues_repo_name": "sandreza/Mesoscale", "max_issues_repo_head_hexsha": "cd3f00d9697ca3c97a51a5bf617edeb3a69e4178", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-04-23T15:13:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-07T22:28:37.000Z", "max_forks_repo_path": "scripts/compare.jl", "max_forks_repo_name": "sandreza/Mesoscale", "max_forks_repo_head_hexsha": "cd3f00d9697ca3c97a51a5bf617edeb3a69e4178", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.2009569378, "max_line_length": 335, "alphanum_fraction": 0.6064623032, "num_tokens": 2975}
|
C
C************************** FXVDMC ************************************
C
C calculate derivative of moisture content with respect to pressure
C head using extended van Genuchten characteristic equation.
C for the extended van Genuchten equation this gives the value of the
C overall storage coefficient, or general storage term.
C
C***********************************************************************
C
DOUBLE PRECISION FUNCTION FXVDMC(PSI,SS,POR,INOD)
C
IMPLICIT NONE
INCLUDE 'CATHY.H'
INTEGER INOD
REAL*8 BETA,B1,B1R
REAL*8 PSI,SS,POR
INCLUDE 'SOILCHAR.H'
C
IF (PSI .LT. VGPNOT(INOD)) THEN
BETA=(PSI/VGPSAT(INOD))**VGN(INOD)
B1=BETA+1.0D0
B1R=1.0D0/B1
FXVDMC=VGN1(INOD)*(POR-VGRMC(INOD))*((DABS(PSI)**VGN1(INOD))
1 /VGPSN(INOD))*(B1**VGNR(INOD))*B1R*B1R
ELSE
FXVDMC=SS
END IF
C
RETURN
END
|
{"hexsha": "b89a09c590bd399c224d457cb303153fa93ef445", "size": 943, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "examples/weil_exemple_withDA/my_cathy_prj/src/fxvdmc.f", "max_stars_repo_name": "BenjMy/pycathy_wrapper", "max_stars_repo_head_hexsha": "8639713acbb5bc5c5cd293b94bdafe017d03f199", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/weil_exemple_withDA/my_cathy_prj/src/fxvdmc.f", "max_issues_repo_name": "BenjMy/pycathy_wrapper", "max_issues_repo_head_hexsha": "8639713acbb5bc5c5cd293b94bdafe017d03f199", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-10-14T13:07:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-02T03:32:05.000Z", "max_forks_repo_path": "examples/weil_exemple_withDA/my_cathy_prj/src/fxvdmc.f", "max_forks_repo_name": "BenjMy/pycathy_wrapper", "max_forks_repo_head_hexsha": "8639713acbb5bc5c5cd293b94bdafe017d03f199", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.46875, "max_line_length": 72, "alphanum_fraction": 0.5355249205, "num_tokens": 289}
|
# Copyright 2021 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
The FastEstimator implementation of Neural Architecture Search without Training on CIFAR10.
The model architecture implementation took reference from https://github.com/D-X-Y/AutoDL-Projects.
"""
import os
import tempfile
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import wget
from scipy import stats
import fastestimator as fe
from fastestimator.dataset.data import cifar10
from fastestimator.op.numpyop.univariate import ChannelTranspose, Normalize
from fastestimator.search import GridSearch
from fastestimator.util.wget_util import bar_custom, callback_progress
wget.callback_progress = callback_progress
# Predefined operation set
OPS = {
'none':
lambda C_in,
C_out,
stride,
affine,
track_running_stats: Zero(C_in, C_out, stride),
'avg_pool_3x3':
lambda C_in,
C_out,
stride,
affine,
track_running_stats: POOLING(C_in, C_out, stride, affine, track_running_stats),
'nor_conv_3x3':
lambda C_in,
C_out,
stride,
affine,
track_running_stats: ReLUConvBN(C_in, C_out, (3, 3), (stride, stride), (1, 1), (1, 1), affine, track_running_stats),
'nor_conv_1x1':
lambda C_in,
C_out,
stride,
affine,
track_running_stats: ReLUConvBN(C_in, C_out, (1, 1), (stride, stride), (0, 0), (1, 1), affine, track_running_stats),
'skip_connect':
lambda C_in,
C_out,
stride,
affine,
track_running_stats: Identity()
if stride == 1 and C_in == C_out else FactorizedReduce(C_in, C_out, stride, affine, track_running_stats),
}
class ResNetBasicblock(nn.Module):
def __init__(self, inplanes, planes, stride, affine=True):
super(ResNetBasicblock, self).__init__()
assert stride == 1 or stride == 2, 'invalid stride {:}'.format(stride)
self.conv_a = ReLUConvBN(inplanes, planes, 3, stride, 1, 1, affine)
self.conv_b = ReLUConvBN(planes, planes, 3, 1, 1, 1, affine)
if stride == 2:
self.downsample = nn.Sequential(nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False))
elif inplanes != planes:
self.downsample = ReLUConvBN(inplanes, planes, 1, 1, 0, 1, affine)
else:
self.downsample = None
self.in_dim = inplanes
self.out_dim = planes
self.stride = stride
def forward(self, inputs):
basicblock = self.conv_a(inputs)
basicblock = self.conv_b(basicblock)
if self.downsample is not None:
residual = self.downsample(inputs)
else:
residual = inputs
return residual + basicblock
class ReLUConvBN(nn.Module):
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine, track_running_stats=True):
super(ReLUConvBN, self).__init__()
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=False),
nn.BatchNorm2d(C_out, affine=affine, track_running_stats=track_running_stats))
def forward(self, x):
return self.op(x)
class POOLING(nn.Module):
def __init__(self, C_in, C_out, stride, affine=True, track_running_stats=True):
super(POOLING, self).__init__()
if C_in == C_out:
self.preprocess = None
else:
self.preprocess = ReLUConvBN(C_in, C_out, 1, 1, 0, 1, affine, track_running_stats)
self.op = nn.AvgPool2d(3, stride=stride, padding=1, count_include_pad=False)
def forward(self, inputs):
if self.preprocess: x = self.preprocess(inputs)
else: x = inputs
return self.op(x)
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class Zero(nn.Module):
def __init__(self, C_in, C_out, stride):
super(Zero, self).__init__()
self.C_in = C_in
self.C_out = C_out
self.stride = stride
self.is_zero = True
def forward(self, x):
if self.C_in == self.C_out:
if self.stride == 1: return x.mul(0.)
else: return x[:, :, ::self.stride, ::self.stride].mul(0.)
else:
shape = list(x.shape)
shape[1] = self.C_out
zeros = x.new_zeros(shape, dtype=x.dtype, device=x.device)
return zeros
class FactorizedReduce(nn.Module):
def __init__(self, C_in, C_out, stride, affine, track_running_stats):
super(FactorizedReduce, self).__init__()
self.stride = stride
self.C_in = C_in
self.C_out = C_out
self.relu = nn.ReLU(inplace=False)
if stride == 2:
C_outs = [C_out // 2, C_out - C_out // 2]
self.convs = nn.ModuleList()
for i in range(2):
self.convs.append(nn.Conv2d(C_in, C_outs[i], 1, stride=stride, padding=0, bias=False))
self.pad = nn.ConstantPad2d((0, 1, 0, 1), 0)
elif stride == 1:
self.conv = nn.Conv2d(C_in, C_out, 1, stride=stride, padding=0, bias=False)
else:
raise ValueError('Invalid stride : {:}'.format(stride))
self.bn = nn.BatchNorm2d(C_out, affine=affine, track_running_stats=track_running_stats)
def forward(self, x):
if self.stride == 2:
x = self.relu(x)
y = self.pad(x)
out = torch.cat([self.convs[0](x), self.convs[1](y[:, :, 1:, 1:])], dim=1)
else:
out = self.conv(x)
out = self.bn(out)
return out
def str2structure(xstr):
"""Process the architecture string from NAS-Bench-201. Referenced from https://github.com/D-X-Y/AutoDL-Projects.
"""
assert isinstance(xstr, str), 'must take string (not {:}) as input'.format(type(xstr))
nodestrs = xstr.split('+')
genotypes = []
for node_str in nodestrs:
inputs = list(filter(lambda x: x != '', node_str.split('|')))
for xinput in inputs:
assert len(xinput.split('~')) == 2, 'invalid input length : {:}'.format(xinput)
inputs = (xi.split('~') for xi in inputs)
input_infos = tuple((op, int(IDX)) for (op, IDX) in inputs)
genotypes.append(input_infos)
return genotypes
class InferCell(nn.Module):
def __init__(self, genotype, C_in, C_out, stride):
super(InferCell, self).__init__()
self.layers = nn.ModuleList()
self.node_IN = []
self.node_IX = []
for i in range(len(genotype)):
node_info = genotype[i]
cur_index = []
cur_innod = []
for (op_name, op_in) in node_info:
if op_in == 0:
layer = OPS[op_name](C_in, C_out, stride, True, True)
else:
layer = OPS[op_name](C_out, C_out, 1, True, True)
cur_index.append(len(self.layers))
cur_innod.append(op_in)
self.layers.append(layer)
self.node_IX.append(cur_index)
self.node_IN.append(cur_innod)
self.nodes = len(genotype)
self.in_dim = C_in
self.out_dim = C_out
def forward(self, inputs):
nodes = [inputs]
for (node_layers, node_innods) in zip(self.node_IX, self.node_IN):
node_feature = sum(self.layers[_il](nodes[_ii]) for _il, _ii in zip(node_layers, node_innods))
nodes.append(node_feature)
return nodes[-1]
class NasbenchNetwork(nn.Module):
def __init__(self, genotype, C, N, num_classes, batch_size=128):
super(NasbenchNetwork, self).__init__()
self._C = C
self._layerN = N
self.stem = nn.Sequential(nn.Conv2d(3, C, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(C))
layer_channels = [C] * N + [C * 2] + [C * 2] * N + [C * 4] + [C * 4] * N
layer_reductions = [False] * N + [True] + [False] * N + [True] + [False] * N
C_prev = C
self.cells = nn.ModuleList()
for (C_curr, reduction) in zip(layer_channels, layer_reductions):
if reduction:
cell = ResNetBasicblock(C_prev, C_curr, 2, True)
else:
cell = InferCell(genotype, C_prev, C_curr, 1)
self.cells.append(cell)
C_prev = cell.out_dim
self._Layer = len(self.cells)
self.lastact = nn.Sequential(nn.BatchNorm2d(C_prev), nn.ReLU(inplace=True))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(C_prev, num_classes)
self.relu_out = {}
for _, module in self.named_modules():
if 'ReLU' in str(type(module)):
module.register_forward_hook(self.relu_hook)
def relu_hook(self, module, inp, out):
try:
self.relu_out[inp[0].device].append(out.view(out.size(0), -1))
except:
self.relu_out[inp[0].device] = [out.view(out.size(0), -1)]
def forward(self, inputs):
feature = self.stem(inputs)
for cell in self.cells:
feature = cell(feature)
out = self.lastact(feature)
out = self.global_pooling(out)
out = out.view(out.size(0), -1)
logits = self.classifier(out)
return logits
def get_pipeline_data(batch_size=128):
train_data, _ = cifar10.load_data()
pipeline = fe.Pipeline(
train_data=train_data,
batch_size=batch_size,
ops=[
Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)),
ChannelTranspose(inputs="x", outputs="x")
])
result = pipeline.get_results()
return result
def score_fn(search_idx, uid, batch_data, config_info, batch_size):
config = config_info.loc[uid, :]
model = fe.build(
model_fn=lambda: NasbenchNetwork(
str2structure(config["architecture"]), config["C"], config["N"], 10, batch_size),
optimizer_fn=None)
if torch.cuda.is_available():
batch_data["x"] = batch_data["x"].to("cuda")
model = model.to("cuda")
_ = fe.backend.feed_forward(model, batch_data["x"], training=False)
if torch.cuda.device_count() > 1:
model = model.module
key_set = []
for key in model.relu_out.keys():
key_set.append(key)
matrix = np.zeros((batch_size, batch_size))
for i in range(len(model.relu_out[key_set[0]])):
x = np.concatenate([(model.relu_out[key][i] > 0).float().cpu().numpy() for key in key_set], axis=0)
x_t = np.transpose(x)
mat = x @ x_t
mat2 = (1. - x) @ (1. - x_t)
matrix = matrix + mat + mat2
_, score = np.linalg.slogdet(matrix)
return score
def fastestimator_run(batch_size=128, num_archs=1000, save_dir=tempfile.mkdtemp()):
download_link = "https://github.com/fastestimator-util/fastestimator-misc/raw/master/resource/nasbench201_info.csv"
uid_list = np.random.choice(15625, size=num_archs, replace=False) # Select random set of networks
wget.download(download_link, save_dir, bar=bar_custom)
config_info = pd.read_csv(os.path.join(save_dir, "nasbench201_info.csv"))
batch_data = get_pipeline_data(batch_size)
search = GridSearch(
eval_fn=lambda search_idx,
uid: score_fn(search_idx, uid, batch_data=batch_data, config_info=config_info, batch_size=batch_size),
params={"uid": uid_list},
best_mode="max")
search.fit()
best_results = search.get_best_results()
score_list = [result['result']['value'] for result in search.get_search_summary()]
acc_list = [config_info.loc[i, :]["accuracy"] for i in uid_list]
tau, _ = stats.kendalltau(acc_list, score_list)
print("Kendall's Tau correlation coefficient: ", tau)
print("Maximum accuracy among all the networks tested: ", np.max(acc_list))
print("Params for best network: {}, best score: {} and corresponding accuracy: {}".format(
best_results['param'],
best_results['result']['value'],
config_info.loc[best_results['param']["uid"], :]["accuracy"]))
print(
"The best network is the top - {} network among the selected networks, based on trained performance (accuracy)".
format(
len(acc_list) -
list(np.sort(acc_list)).index(config_info.loc[best_results['param']["uid"], :]["accuracy"])))
if __name__ == "__main__":
fastestimator_run()
|
{"hexsha": "e17312c24a9ce0cafe32206111db432e05327f67", "size": 13197, "ext": "py", "lang": "Python", "max_stars_repo_path": "apphub/neural_architecture_search/naswot/naswot_torch.py", "max_stars_repo_name": "DwijayDS/fastestimator", "max_stars_repo_head_hexsha": "9b288cb2bd870f971ec4cee09d0b3205e1316a94", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "apphub/neural_architecture_search/naswot/naswot_torch.py", "max_issues_repo_name": "DwijayDS/fastestimator", "max_issues_repo_head_hexsha": "9b288cb2bd870f971ec4cee09d0b3205e1316a94", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "apphub/neural_architecture_search/naswot/naswot_torch.py", "max_forks_repo_name": "DwijayDS/fastestimator", "max_forks_repo_head_hexsha": "9b288cb2bd870f971ec4cee09d0b3205e1316a94", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9591280654, "max_line_length": 120, "alphanum_fraction": 0.6193831931, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3352}
|
from scipy.integrate import solve_dde
import matplotlib.pyplot as plt
import numpy as np
def fun(t,y,Z):
if y[0] >= 0.0:
return [-1.0]
else:
return [-10.0]
y0 = 1.0
jumps = [1.]
tf = 2.0
tspan = [0.0, tf]
delays = []
rtol = 1e-5
atol = 1e-10
sol45 = solve_dde(fun, tspan, delays, [y0], [y0],
method='RK45', atol=atol, rtol=rtol)
sol45_j = solve_dde(fun, tspan, delays, [y0], [y0], #tracked_stages=0,
method='RK45', jumps=jumps, atol=atol, rtol=rtol)
print('nfev of f without jumps option : %s' % (sol45.nfev))
print('nfev of f wit jumps option : %s' % (sol45_j.nfev))
print('nfaild without jumps option : %s' % (sol45.nfailed))
print('nfaild with jumps option : %s' % (sol45_j.nfailed))
def anaf(t):
if t <= 1.:
return 1. - t
else:
return -10. * (t - 1.)
t_j = sol45_j.t
y_j = sol45_j.y[0,:]
t_ = sol45.t
y_ = sol45.y[0,:]
ana_j = np.zeros(y_j.shape)
ana_ = np.zeros(y_.shape)
for i in range(len(t_)):
ana_[i] = anaf(t_[i])
for i in range(len(t_j)):
ana_j[i] = anaf(t_j[i])
eps = 1e-2
mask_ = np.abs(y_) > eps
mask_j = np.abs(y_j) > eps
err_j = np.abs(np.abs(ana_j[mask_j]-y_j[mask_j])/ana_j[mask_j])
err_ = np.abs(np.abs(ana_[mask_]-y_[mask_])/ana_[mask_])
plt.figure()
plt.plot(t_[mask_], err_, 'o-', label='with jump')
plt.plot(t_j[mask_j], err_j, 'o-', label="without jump")
plt.yscale('log')
plt.legend()
plt.xlabel(r'$t$')
plt.ylabel(r'$\varepsilon$')
plt.savefig('figures/discODEex2/error_RK45')
plt.figure()
plt.plot(t_, ana_, 'o-', label='ana')
plt.plot(t_, y_, 'o-', label='no jump')
plt.plot(t_j, y_j, 'o-', label=' jump')
plt.legend()
plt.xlabel("t")
plt.ylabel("y")
plt.legend()
plt.savefig('figures/discODEex2/y_RK45')
plt.figure()
plt.plot(t_[0:-1], np.diff(t_), 'o', label='dt')
plt.plot(t_j[0:-1], np.diff(t_j), 'o', label='dt jump')
plt.yscale('log')
plt.legend()
plt.xlabel("t")
plt.ylabel("dt")
plt.legend()
plt.savefig('figures/discODEex2/dt_RK45')
plt.show()
|
{"hexsha": "21290f477d9d982272d6914bd42036cb4ca98093", "size": 2005, "ext": "py", "lang": "Python", "max_stars_repo_path": "DDEs_models_test/discODEex2_RK45.py", "max_stars_repo_name": "jrmejansen/scipy", "max_stars_repo_head_hexsha": "77f4f5172f8e718de96b89bf3f015a8729a7613c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-14T13:09:58.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-14T13:09:58.000Z", "max_issues_repo_path": "DDEs_models_test/discODEex2_RK45.py", "max_issues_repo_name": "jrmejansen/scipy", "max_issues_repo_head_hexsha": "77f4f5172f8e718de96b89bf3f015a8729a7613c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "DDEs_models_test/discODEex2_RK45.py", "max_forks_repo_name": "jrmejansen/scipy", "max_forks_repo_head_hexsha": "77f4f5172f8e718de96b89bf3f015a8729a7613c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.7934782609, "max_line_length": 70, "alphanum_fraction": 0.6134663342, "include": true, "reason": "import numpy,from scipy", "num_tokens": 722}
|
# Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import datetime
import multiprocessing
import random
import string
import time
import numpy
import paho.mqtt.client as mqtt
from paho.mqtt import publish
BASE_TOPIC = 'pybench'
SUB_QUEUE = multiprocessing.Queue()
PUB_QUEUE = multiprocessing.Queue()
class Sub(multiprocessing.Process):
def __init__(self, hostname, port=1883, tls=None, auth=None, topic=None,
timeout=60, max_count=10, qos=0):
super(Sub, self).__init__()
self.hostname = hostname
self.port = port
self.tls = tls
self.topic = topic or BASE_TOPIC
self.auth = auth
self.msg_count = 0
self.start_time = None
self.max_count = max_count
self.end_time = None
self.timeout = timeout
self.qos = qos
self.end_time_lock = multiprocessing.Lock()
def run(self):
def on_connect(client, userdata, flags, rc):
client.subscribe(BASE_TOPIC + '/#', qos=self.qos)
def on_message(client, userdata, msg):
if self.start_time is None:
self.start_time = datetime.datetime.utcnow()
self.msg_count += 1
if self.msg_count >= self.max_count:
self.end_time_lock.acquire()
if self.end_time is None:
self.end_time = datetime.datetime.utcnow()
self.end_time_lock.release()
self.client = mqtt.Client()
self.client.on_connect = on_connect
self.client.on_message = on_message
if self.tls:
self.client.tls_set(**self.tls)
if self.auth:
self.client.username_pw_set(**self.auth)
self.client.connect(self.hostname, port=self.port)
self.client.loop_start()
while True:
time.sleep(1)
self.end_time_lock.acquire()
if self.end_time:
delta = self.end_time - self.start_time
SUB_QUEUE.put(delta.total_seconds())
self.client.loop_stop()
break
self.end_time_lock.release()
if self.start_time:
current_time = datetime.datetime.utcnow()
curr_delta = current_time - self.start_time
if curr_delta.total_seconds() > self.timeout:
raise Exception('We hit the sub timeout!')
class Pub(multiprocessing.Process):
def __init__(self, hostname, port=1883, tls=None, auth=None, topic=None,
timeout=60, max_count=10, msg_size=1024, qos=0):
super(Pub, self).__init__()
self.hostname = hostname
self.port = port
self.tls = tls
self.topic = topic or BASE_TOPIC
self.auth = auth
self.start_time = None
self.max_count = max_count
self.end_time = None
self.timeout = timeout
self.msg = ''.join(
random.choice(string.lowercase) for i in range(msg_size))
self.qos = qos
def run(self):
self.start_time = datetime.datetime.utcnow()
for i in range(self.max_count):
publish.single(self.topic, self.msg, hostname=self.hostname,
port=self.port, auth=self.auth, tls=self.tls,
qos=self.qos)
if self.start_time:
current_time = datetime.datetime.utcnow()
curr_delta = current_time - self.start_time
if curr_delta.total_seconds() > self.timeout:
raise Exception('We hit the pub timeout!')
end_time = datetime.datetime.utcnow()
delta = end_time - self.start_time
PUB_QUEUE.put(delta.total_seconds())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--pub-clients', type=int, dest='pub_clients',
default=10,
help='The number of publisher client workers to use. '
'By default 10 are used.')
parser.add_argument('--sub-clients', type=int, dest='sub_clients',
default=10,
help='The number of subscriber client workers to use. '
'By default 10 are used')
parser.add_argument('--pub-count', type=int, dest='pub_count',
default=10,
help='The number of messages each publisher client '
'will publish for completing. The default count '
'is 10')
parser.add_argument('--sub-count', type=int, dest='sub_count',
default=10,
help='The number of messages each subscriber client '
'will wait to recieve before completing. The '
'default count is 10.')
parser.add_argument('--msg-size', type=int, dest='msg_size', default=1024,
help='The payload size to use in bytes')
parser.add_argument('--sub-timeout', type=int, dest='sub_timeout',
default=60,
help='The amount of time, in seconds, a subscriber '
'client will wait for messages. By default this '
'is 60.')
parser.add_argument('--pub-timeout', type=int, dest='pub_timeout',
default=60,
help="The amount of time, in seconds, a publisher "
"client will wait to successfully publish it's "
"messages. By default this is 60")
parser.add_argument('--hostname', required=True,
help='The hostname (or ip address) of the broker to '
'connect to')
parser.add_argument('--port', default=1883, type=int,
help='The port to use for connecting to the broker. '
'The default port is 1883.')
parser.add_argument('--topic',
help='The MQTT topic to use for the benchmark. The '
'default topic is pybench')
parser.add_argument('--cacert',
help='The certificate authority certificate file that '
'are treated as trusted by the clients')
parser.add_argument('--username',
help='An optional username to use for auth on the '
'broker')
parser.add_argument('--password',
help='An optional password to use for auth on the '
'broker. This requires a username is also set')
parser.add_argument('--brief', action='store_true', default=False,
help='Print results in a colon separated list instead'
' of a human readable format. See the README for '
'the order of results in this format')
parser.add_argument('--qos', default=0, type=int, choices=[0, 1, 2],
help='The qos level to use for the benchmark')
opts = parser.parse_args()
sub_threads = []
pub_threads = []
topic = getattr(opts, 'topic') or BASE_TOPIC
tls = None
if getattr(opts, 'cacert'):
tls = {'ca_certs': opts.cacert}
auth = None
if opts.username:
auth = {'username': opts.username,
'password': getattr(opts, 'password')}
if opts.pub_count * opts.pub_clients < opts.sub_count:
print('The configured number of publisher clients and published '
'message count is too small for the configured subscriber count.'
' Increase the value of --pub-count and/or --pub-clients, or '
'decrease the value of --sub-count.')
exit(1)
for i in range(opts.sub_clients):
sub = Sub(opts.hostname, opts.port, tls, auth, topic, opts.sub_timeout,
opts.sub_count, opts.qos)
sub_threads.append(sub)
sub.start()
for i in range(opts.pub_clients):
pub = Pub(opts.hostname, opts.port, tls, auth, topic, opts.pub_timeout,
opts.pub_count, opts.qos)
pub_threads.append(pub)
pub.start()
start_timer = datetime.datetime.utcnow()
for client in sub_threads:
client.join(opts.sub_timeout)
curr_time = datetime.datetime.utcnow()
delta = start_timer - curr_time
if delta.total_seconds() >= opts.sub_timeout:
raise Exception('Timed out waiting for threads to return')
start_timer = datetime.datetime.utcnow()
for client in pub_threads:
client.join(opts.pub_timeout)
curr_time = datetime.datetime.utcnow()
delta = start_timer - curr_time
if delta.total_seconds() >= opts.sub_timeout:
raise Exception('Timed out waiting for threads to return')
# Let's do some maths
if SUB_QUEUE.qsize < opts.sub_clients:
print('Something went horribly wrong, there are less results than '
'sub threads')
exit(1)
if PUB_QUEUE.qsize < opts.pub_clients:
print('Something went horribly wrong, there are less results than '
'pub threads')
exit(1)
sub_times = []
for i in range(opts.sub_clients):
try:
sub_times.append(SUB_QUEUE.get(opts.sub_timeout))
except multiprocessing.queues.Empty:
continue
if len(sub_times) < opts.sub_clients:
failed_count = opts.sub_clients - len(sub_times)
sub_times = numpy.array(sub_times)
pub_times = []
for i in range(opts.pub_clients):
try:
pub_times.append(PUB_QUEUE.get(opts.pub_timeout))
except multiprocessing.queues.Empty:
continue
if len(pub_times) < opts.pub_clients:
failed_count = opts.pub_clients - len(pub_times)
pub_times = numpy.array(pub_times)
if len(sub_times) < opts.sub_clients:
failed_count = opts.sub_clients - len(sub_times)
print("%s subscription workers failed" % failed_count)
if len(pub_times) < opts.pub_clients:
failed_count = opts.pub_clients - len(pub_times)
print("%s publishing workers failed" % failed_count)
sub_mean_duration = numpy.mean(sub_times)
sub_avg_throughput = float(opts.sub_count) / float(sub_mean_duration)
sub_total_thpt = float(
opts.sub_count * opts.sub_clients) / float(sub_mean_duration)
pub_mean_duration = numpy.mean(pub_times)
pub_avg_throughput = float(opts.pub_count) / float(pub_mean_duration)
pub_total_thpt = float(
opts.pub_count * opts.pub_clients) / float(pub_mean_duration)
if opts.brief:
output = '%s:%s:%s:%s:%s:%s:%s:%s:%s:%s'
else:
output = """\
[ran with %s subscribers and %s publishers]
================================================================================
Subscription Results
================================================================================
Avg. subscriber duration: %s
Subscriber duration std dev: %s
Avg. Client Throughput: %s
Total Throughput (msg_count * clients) / (avg. sub time): %s
================================================================================
Publisher Results
================================================================================
Avg. publisher duration: %s
Publisher duration std dev: %s
Avg. Client Throughput: %s
Total Throughput (msg_count * clients) / (avg. sub time): %s
"""
print(output % (
opts.sub_clients,
opts.pub_clients,
sub_mean_duration,
numpy.std(sub_times),
sub_avg_throughput,
sub_total_thpt,
pub_mean_duration,
numpy.std(pub_times),
pub_avg_throughput,
pub_total_thpt,
))
if __name__ == '__main__':
main()
|
{"hexsha": "6939ffeb3aca0c2287b26e1f61dbeb21fbaf96f2", "size": 12415, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymqttbench/bench.py", "max_stars_repo_name": "mtreinish/pymqttbench", "max_stars_repo_head_hexsha": "7b3bbd1a4de747492c3803667b2c48f9f432fb2d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2017-12-22T22:49:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-21T18:38:53.000Z", "max_issues_repo_path": "pymqttbench/bench.py", "max_issues_repo_name": "mtreinish/pymqttbench", "max_issues_repo_head_hexsha": "7b3bbd1a4de747492c3803667b2c48f9f432fb2d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-04-19T11:08:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-09T18:28:00.000Z", "max_forks_repo_path": "pymqttbench/bench.py", "max_forks_repo_name": "mtreinish/pymqttbench", "max_forks_repo_head_hexsha": "7b3bbd1a4de747492c3803667b2c48f9f432fb2d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-06-08T13:40:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-09T16:44:50.000Z", "avg_line_length": 40.1779935275, "max_line_length": 80, "alphanum_fraction": 0.5768828031, "include": true, "reason": "import numpy", "num_tokens": 2501}
|
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['text.usetex'] = True
import numpy as np
# vector field
def f(X, Y):
U = np.maximum(0.3, X + 1)
V = 0.5 * Y
return 0.07 * U, 0.07 * V
# action of the group
def act(X, Y, a, tx=0, ty=0):
newX = np.cos(a) * X - np.sin(a) * Y + tx
newY = np.sin(a) * X + np.cos(a) * Y + ty
return newX, newY
x = np.linspace(-1, 1, 6)
X, Y = np.meshgrid(x, x)
a = np.pi / 2
quiver_param = {
'units': 'width',
'scale': 1,
'pivot': 'middle',
'headwidth': 2.5,
'headlength': 5
}
br = 0.02
bb = 0.25
h = 3.3
# (1 - bb) h == w (1/3 - 2 br)
plt.figure(figsize=(h * (1 - bb), h * (1/3 - 2 * br)))
view = 1.2
ty = -1.8 # text position
plt.axes([0, bb, 1/3 - 2*br, 1 - bb])
plt.gca().axis('off')
U, V = f(X, Y)
plt.quiver(X, Y, U, V, **quiver_param)
plt.xlim(-view, view)
plt.ylim(-view, view)
plt.text(0, ty, r"$f(x)$", horizontalalignment='center')
plt.axes([1/2 - 1/6 + br, bb, 1/3 - 2*br, 1 - bb])
plt.gca().axis('off')
U, V = f(*act(X, Y, -a))
plt.quiver(X, Y, U, V, **quiver_param)
plt.xlim(-view, view)
plt.ylim(-view, view)
plt.text(0, ty, r"$f(g^{-1} x)$", horizontalalignment='center')
plt.axes([1 - 1/3 + 2*br, bb, 1/3 - 2*br, 1 - bb])
plt.gca().axis('off')
U, V = act(*f(*act(X, Y, -a)), a)
plt.quiver(X, Y, U, V, **quiver_param)
plt.xlim(-view, view)
plt.ylim(-view, view)
plt.text(0, ty, r"$\rho(g) f(g^{-1} x)$", horizontalalignment='center')
plt.savefig("rotate_field.pdf", transparent=True)
|
{"hexsha": "a302dd57d2d415c94b653eaa5c8a7c8c0a96f976", "size": 1508, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/image/plots/rotate_field.py", "max_stars_repo_name": "mariogeiger/se3cnn", "max_stars_repo_head_hexsha": "afd027c72e87f2c390e0a2e7c6cfc8deea34b0cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 170, "max_stars_repo_stars_event_min_datetime": "2018-07-03T17:18:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T15:30:40.000Z", "max_issues_repo_path": "examples/image/plots/rotate_field.py", "max_issues_repo_name": "CNNs4QSPR/se3cnn", "max_issues_repo_head_hexsha": "513f5f827c4c511bdc96e3c6ea663c8fbce60f57", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 34, "max_issues_repo_issues_event_min_datetime": "2018-10-09T08:56:24.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-30T09:46:52.000Z", "max_forks_repo_path": "examples/image/plots/rotate_field.py", "max_forks_repo_name": "CNNs4QSPR/se3cnn", "max_forks_repo_head_hexsha": "513f5f827c4c511bdc96e3c6ea663c8fbce60f57", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 40, "max_forks_repo_forks_event_min_datetime": "2018-07-03T13:40:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-28T16:40:48.000Z", "avg_line_length": 21.5428571429, "max_line_length": 71, "alphanum_fraction": 0.5616710875, "include": true, "reason": "import numpy", "num_tokens": 611}
|
import json
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
import pandas as pd
import numpy as np
import scipy.io as sio
import _pickle as cPickle
import time, os, math
import collections
from tqdm import tqdm
import matplotlib.pyplot as plt
import pickle
import math
#加载数据, 筛选重要的,三个相机都可以用
src_dir = '/media/chen/4CBEA7F1BEA7D1AE/Download/hand_dataset/NYU/train/'
test_model = 'rich/'
level_model = 'point/'
detal_name = 'Method_disturbance_fc/'
methon_name = 'NYU_suoluan'
detal_name += methon_name + '/'
save_dataset_dir = "data/hand_gen/"+test_model+level_model+detal_name
is_normlize = True
def get_param(dataset):
if dataset == 'icvl':
return 240.99, 240.96, 160, 120
elif dataset == 'nyu':
return 588.03, -587.07, 320, 240
elif dataset == 'msra':
return 241.42, 241.42, 160, 120
def pixel2world(x, fx, fy, ux, uy):
x[:, :, 0] = (x[:, :, 0] - ux) * x[:, :, 2] / fx
x[:, :, 1] = (x[:, :, 1] - uy) * x[:, :, 2] / fy
return x
# # 统计 xyz三个轴上的最大范围 并取80%
#
# path = os.path.join(src_dir, 'joint_data.mat')
# mat = sio.loadmat(path)
# joint_uvd = mat['joint_uvd']
# joint_uvd = joint_uvd.reshape(-1, 36, 3)
# # 选择
# select_list = [0, 3, 6, 9, 12, 15, 18, 21, 24, 25, 27, 30, 31, 32]
# delete_list = list(range(36))
# for i in select_list:
# delete_list.remove(i)
# joint_uvd = np.delete(joint_uvd, delete_list, axis=1)
# # 转换到世界坐标系 单位mm
# params = get_param('nyu')
# joint_uvd = pixel2world(joint_uvd, *params)
# x = joint_uvd[:,:,0]
# y = joint_uvd[:,:,1]
# z = joint_uvd[:,:,2]
# x = np.max(np.max(x,axis = 1) - np.min(x,axis = 1))
# y = np.max(np.max(y,axis = 1) - np.min(y,axis = 1))
# z = np.max(np.max(z,axis = 1) - np.min(z,axis = 1))
# xyz_scale = [x,y,z]
# print(xyz_scale) #[220.07752665833493, 219.73800684885566, 219.53804748548316]
def load_Method_disturbance_fc(this_file_path):
path = os.path.join(src_dir, 'joint_data.mat')
mat = sio.loadmat(path)
joint_uvd = mat['joint_uvd']
joint_uvd = joint_uvd.reshape(-1,36,3)
#选择
select_list =[0, 3, 6, 9, 12, 15, 18, 21, 24, 25, 27, 30, 31, 32]
delete_list = list(range(36))
for i in select_list:
delete_list.remove(i)
joint_uvd = np.delete(joint_uvd,delete_list,axis=1)
#转换到世界坐标系 单位mm
params = get_param('nyu')
joint_uvd = pixel2world(joint_uvd, *params)
#打乱顺序
# permutation = np.random.permutation(joint_uvd.shape[0])
# joint_uvd = joint_uvd[permutation, :, :]
if is_normlize:
joint_uvd = normlize(joint_uvd)
data_shape = joint_uvd.shape
output = np.loadtxt(this_file_path)
output = np.reshape(output, (-1, 14, 3))
ground_truth = np.loadtxt(
'/home/chen/Documents/awesome-hand-pose-estimation-master/evaluation/groundtruth/nyu/nyu_test_groundtruth_label.txt')
ground_truth = np.reshape(ground_truth, (-1, 14, 3))
params = get_param('nyu')
ground_truth = pixel2world(ground_truth, *params)
output = pixel2world(output, *params)
if is_normlize:
ground_truth, output = normlize_double(ground_truth, output)
move_bias = output - ground_truth
# 生成两个随机整数0~8251和一个随机分量0~1 data_shape[0] 个
move_bias = np.tile(move_bias,(math.ceil(data_shape[0]/move_bias.shape[0]),1,1))
# permutation = np.random.permutation(move_bias.shape[0])
# move_bias = move_bias[permutation, :, :]
move_bias = move_bias[:data_shape[0]]
# 增加异常
# permutation1 = np.random.permutation(move_bias.shape[0])
# permutation2 = np.random.permutation(move_bias.shape[0])
# proportion_array = np.random.rand(data_shape[0])
# proportion_array = np.expand_dims(proportion_array,axis=-1)
# proportion_array = np.expand_dims(proportion_array,axis=-1)
# proportion_array = np.tile(proportion_array,(1,data_shape[1],data_shape[2]))
# move_bias = move_bias[permutation1, :, :]*proportion_array \
# + move_bias[permutation2, :, :]*(1-proportion_array)
output = move_bias + joint_uvd
return joint_uvd, output
def normlize(joint_uvd):
joint_xyz_norm = []
for i in range(3):
joint_mean = np.mean(joint_uvd[:,:,i], axis=1)[:,np.newaxis]
joint_xyz_norm.append((joint_uvd[:,:,i]-joint_mean)/220)
joint_xyz_norm = np.concatenate([joint_xyz_norm[0][:,:,np.newaxis],joint_xyz_norm[1][:,:,np.newaxis],joint_xyz_norm[2][:,:,np.newaxis]],axis = -1)
return joint_xyz_norm
def normlize_double(joint_uvd, joint_bias):
joint_xyz_norm = []
joint_xyz_norm_b = []
for i in range(3):
joint_mean = np.mean(joint_bias[:,:,i], axis=1)[:,np.newaxis]
joint_xyz_norm.append((joint_uvd[:,:,i]-joint_mean)/220)
joint_xyz_norm_b.append((joint_bias[:,:,i]-joint_mean)/220)
joint_xyz_norm = np.concatenate([joint_xyz_norm[0][:,:,np.newaxis],joint_xyz_norm[1][:,:,np.newaxis],joint_xyz_norm[2][:,:,np.newaxis]],axis = -1)
joint_xyz_norm_b = np.concatenate([joint_xyz_norm_b[0][:,:,np.newaxis],joint_xyz_norm_b[1][:,:,np.newaxis],joint_xyz_norm_b[2][:,:,np.newaxis]],axis = -1)
return joint_xyz_norm, joint_xyz_norm_b
def get_file_path(root_path,file_list):
#获取该目录下所有的文件名称和目录名称
dir_or_files = os.listdir(root_path)
for dir_file in dir_or_files:
#获取目录或者文件的路径
dir_file_path = os.path.join(root_path,dir_file)
#判断该路径为文件还是路径
if os.path.isdir(dir_file_path):
#递归获取所有文件和目录的路径
get_file_path(dir_file_path,file_list)
else:
file_list.append(dir_file_path)
def draw_3d_point(labels, outputs, step, method):
#['Palm', 'Wrist1', 'Wrist2', 'Thumb.R1', 'Thumb.R2', 'Thumb.T', 'Index.R', 'Index.T', 'Mid.R', 'Mid.T', 'Ring.R', 'Ring.T', 'Pinky.R', 'Pinky.T', 'Mean']
label_id = [0,0,1,1,2,2,3,3,4,4,4,5,5,5]
fig = plt.figure(1)
plt.clf()
ax = fig.add_subplot(111, projection='3d')
bone_len = []
bone_fake_len = []
for draw_i in range(2):
if draw_i == 0:
pose_show = labels
fig_color = ['b', 'b', 'b', 'b', 'b', 'b']
else:
pose_show = outputs
fig_color = ['r', 'r', 'r', 'r', 'r', 'r']
ax.view_init(azim=20.0, elev=40.0) # aligns the 3d coord with the camera view
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
# if not method.startswith('no_norm_'):
# ax.set_xlim((-3.5, 1.5))
# ax.set_ylim((-2.5, 2.5))
# ax.set_zlim((-1, 6))
"""
蓝色: 'b' (blue)
绿色: 'g' (green)
红色: 'r' (red)
蓝绿色(墨绿色): 'c' (cyan)
红紫色(洋红): 'm' (magenta)
黄色: 'y' (yellow)
黑色: 'k' (black)
白色: 'w' (white)
"""
#fig_color = ['c', 'm', 'y', 'g', 'r', 'b']
for f in range(6):
cur_id = [i for i, x in enumerate(label_id) if x == f] # 属于当前类别的下标
for point_id in cur_id:
ax.scatter(pose_show[point_id, 0], pose_show[point_id, 1],pose_show[point_id, 2], s=30, c=fig_color[f])
if point_id!=cur_id[-1]:
ax.plot([pose_show[point_id, 0], pose_show[point_id + 1, 0]],
[pose_show[point_id, 1], pose_show[point_id + 1, 1]],
[pose_show[point_id, 2], pose_show[point_id + 1, 2]], color=fig_color[f],
linewidth=2)
len = np.sqrt(np.sum((pose_show[point_id] - pose_show[point_id + 1]) ** 2))
if draw_i == 0:
bone_len.append(round(len,1))
else:
bone_fake_len.append(round(len,1))
# concat
if point_id==cur_id[-1]:
ax.plot([pose_show[point_id, 0], pose_show[13, 0]],
[pose_show[point_id, 1], pose_show[13, 1]],
[pose_show[point_id, 2], pose_show[13, 2]], color=fig_color[f],
linewidth=2)
len = np.sqrt(np.sum((pose_show[point_id] - pose_show[13]) ** 2))
if draw_i == 0:
bone_len.append(round(len,1))
else:
bone_fake_len.append(round(len,1))
ax.set_title(str(bone_len)+"\n"+str(bone_fake_len),fontsize=10)
# plt.show()
# plt.pause(0.01)
if not os.path.isdir("./data/image/"):
os.makedirs("./data/image/")
plt.savefig("./data/image/" + method + str(step).zfill(10) + ".png")
def save_obj(obj, name, method):
if not os.path.isdir(save_dataset_dir + method + '/'):
os.makedirs(save_dataset_dir + method + '/')
with open(save_dataset_dir + method + '/' + name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name, method):
with open(save_dataset_dir + method + '/' + name + '.pkl', 'rb') as f:
return pickle.load(f)
method_name = 'hand_test'
a = load_obj(method_name, '3DV18_NYU_DeepHPS')
errors = []
step = 0
for sample_id in tqdm(range(0,len(a),5)):
sample =a[sample_id]
draw_3d_point(sample["targets"], sample["node_features"], step, method_name)
step = step + 1
errors.append(np.mean(np.sqrt(np.sum((sample["targets"] - sample["node_features"]) ** 2, axis=1))))
errors = np.mean(np.array(errors))
#开始写文件
test_file_list = []
test_file_list_clear = []
method_name = []
get_file_path('/home/chen/Documents/awesome-hand-pose-estimation-master/evaluation/results/nyu/', test_file_list)
for file_path in test_file_list:
if file_path.endswith('.txt'):# and ('19' in file_path):# or '18' in file_path):
file_path_list = file_path.split('/')
file_path_list = file_path_list[-1][:-4]
method_name.append(file_path_list)
test_file_list_clear.append(file_path)
# 遍历两个数组
for file_id, this_file_path in enumerate(test_file_list_clear):
this_method_name = method_name[file_id]
print(this_file_path+'\n')
print(this_method_name+'\n')
# 保存测试集合
output = np.loadtxt(this_file_path)
output = np.reshape(output, (-1, 14, 3))
hand_dict_array = []
ground_truth = np.loadtxt(
'/home/chen/Documents/awesome-hand-pose-estimation-master/evaluation/groundtruth/nyu/nyu_test_groundtruth_label.txt')
ground_truth = np.reshape(ground_truth, (-1, 14, 3))
joint_uvd = ground_truth
move_bias = output
params = get_param('nyu')
joint_uvd = pixel2world(joint_uvd, *params)
move_bias = pixel2world(move_bias, *params)
if is_normlize:
joint_uvd, move_bias = normlize_double(joint_uvd, move_bias)
for id in tqdm(range(joint_uvd.shape[0])):
one_joint_xyz_norm = joint_uvd[id, :, :]
one_joint_xyz_norm_mask = move_bias[id, :, :]
test_dict = {"targets": one_joint_xyz_norm,
"graph": [[0, 1, 1]],
"id": "handGen: " + str(id).zfill(7),
"node_features": one_joint_xyz_norm_mask}
hand_dict_array.append(test_dict)
save_obj(hand_dict_array, 'hand_test', this_method_name)
# 保存训练集合
# 保存train
joint_uvd, move_bias = load_Method_disturbance_fc(this_file_path)
hand_dict_array = []
for id in tqdm(range(joint_uvd.shape[0])):
one_joint_xyz_norm = joint_uvd[id,:,:]
one_joint_xyz_norm_mask = move_bias[id,:,:]
test_dict = {"targets": one_joint_xyz_norm,
"graph": [[0, 1, 1]],
"id": "handGen: "+str(id).zfill(7),
"node_features": one_joint_xyz_norm_mask}
hand_dict_array.append(test_dict)
save_obj(hand_dict_array,'hand_train', this_method_name)
|
{"hexsha": "b64aca59dc7a48bbd5b8e8195ab968ebd0f26b89", "size": 11564, "ext": "py", "lang": "Python", "max_stars_repo_path": "watch_all_output_image_NYU_suoluan.py", "max_stars_repo_name": "Chenzhoujia/Mobile_hand_tf-gnn-samples-master", "max_stars_repo_head_hexsha": "56236752bb06bcc60b4befb024cdd100aa9ce9ac", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "watch_all_output_image_NYU_suoluan.py", "max_issues_repo_name": "Chenzhoujia/Mobile_hand_tf-gnn-samples-master", "max_issues_repo_head_hexsha": "56236752bb06bcc60b4befb024cdd100aa9ce9ac", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "watch_all_output_image_NYU_suoluan.py", "max_forks_repo_name": "Chenzhoujia/Mobile_hand_tf-gnn-samples-master", "max_forks_repo_head_hexsha": "56236752bb06bcc60b4befb024cdd100aa9ce9ac", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.5949367089, "max_line_length": 158, "alphanum_fraction": 0.6153580076, "include": true, "reason": "import numpy,import scipy", "num_tokens": 3557}
|
# author : Group 27
# date : 2021-11-25
"""Performs some statistical or machine learning analysis and summarizes the results as a figure(s) and a table(s)
Usage: preprocess_n_model.py --file_path=<file_path> --out_file=<out_file>
Options:
--file_path=<file_path> Path to train processed data file for which to perform preprocessing on
--out_file=<out_file> Path (including filename) of where to locally write the file
"""
import altair as alt
import numpy as np
import pandas as pd
import os
from docopt import docopt
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression, Ridge, RidgeCV
from sklearn.preprocessing import (
OneHotEncoder,
OrdinalEncoder,
StandardScaler,
)
from sklearn.model_selection import (
GridSearchCV,
RandomizedSearchCV,
ShuffleSplit,
cross_val_score,
cross_validate,
train_test_split,
)
opt = docopt(__doc__)
def main(file_path, out_file):
df_train = pd.read_csv(f'{file_path}/train_df.csv')
df_test = pd.read_csv(f'{file_path}/test_df.csv')
X_train, y_train = df_train.drop(columns=["spotify_track_popularity"]), df_train["spotify_track_popularity"]
X_test, y_test = df_test.drop(columns=["spotify_track_popularity"]), df_test["spotify_track_popularity"]
numeric_features = [
'spotify_track_duration_ms',
'danceability',
'energy','key',
'loudness','mode',
'speechiness',
'acousticness',
'instrumentalness',
'liveness',
'valence',
'tempo',
'time_signature']
categorical_features = ['performer']
binary_features = ['spotify_track_explicit']
drop_features = ['song_id', 'spotify_track_id', 'spotify_track_album']
preprocessor = make_column_transformer(
(StandardScaler(), numeric_features),
(OneHotEncoder(handle_unknown="ignore"), categorical_features),
(OneHotEncoder(drop='if_binary', handle_unknown="ignore"), binary_features),
(CountVectorizer(max_features = 20000, stop_words="english"), "spotify_genre"),
(CountVectorizer(max_features = 20000, stop_words="english"), "song"),
("drop", drop_features)
)
pipe = make_pipeline(preprocessor, Ridge())
cv_df = pd.DataFrame(cross_validate(pipe, X_train, y_train, cv=10, return_train_score=True))
try:
cv_df.to_csv(f'{out_file}/cv_df.csv', index = False) # save the cv file
except:
os.makedirs(os.path.dirname(f'{out_file}/cv_df.csv'))
cv_df.to_csv(f'{out_file}/cv_df.csv', index = False)
#random seach hyperparameters model tunning
param_grid = {
"ridge__alpha": np.logspace(-3,2,6),
"columntransformer__countvectorizer-1__binary": np.array([True, False]),
"columntransformer__countvectorizer-1__max_features": np.arange(1000, 10000, 20000),
"columntransformer__countvectorizer-2__binary": np.array([True, False]),
"columntransformer__countvectorizer-2__max_features": np.arange(1000, 10000, 20000)
}
random_search = RandomizedSearchCV(
pipe, param_distributions=param_grid, n_jobs=-1, n_iter=10, cv=5
)
random_search.fit(X_train, y_train)
random_search_results = pd.DataFrame(random_search.cv_results_)[
[
"mean_test_score",
"param_ridge__alpha",
"param_columntransformer__countvectorizer-1__max_features",
"param_columntransformer__countvectorizer-1__binary",
"param_columntransformer__countvectorizer-2__max_features",
"param_columntransformer__countvectorizer-2__binary",
"rank_test_score",
]
].set_index("rank_test_score").sort_index()
try:
random_search_results.to_csv(f'{out_file}/best_hyperparameters.csv', index = False) # save the random_search results
except:
os.makedirs(os.path.dirname(f'{out_file}/best_hyperparameters.csv'))
random_search_results.to_csv(f'{out_file}/best_hyperparameters.csv', index = False)
#Evaluating on the test set
y_predicted = random_search.predict(X_test)
df = pd.DataFrame({'y_test':y_test, 'y_predicted':y_predicted})
plot = alt.Chart(df, title= "Predicted versus true Spotify popularities").mark_point(filled=True, clip=True).encode(
alt.X('y_test', title='Predicted values of Spotify popularities'),
alt.Y('y_predicted', title='True values of Spotify popularities', scale=alt.Scale(domain=(0, 100)))
)
plot_2 = plot + plot.mark_line(color = 'black').encode(
alt.Y('y_test')
)
try:
plot_2.save(f'{out_file}/predict_vs_test.png') #needs altair_saver package
except:
os.makedirs(os.path.dirname(f'{out_file}/predict_vs_test.png'))
plot_2.to_csv(f'{out_file}/predict_vs_test.png', index = False)
if __name__ == "__main__":
main(opt["--file_path"], opt["--out_file"])
|
{"hexsha": "d032c09722116e24b79016ac0200d7d3d21dbe39", "size": 5132, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/preprocess_n_model.py", "max_stars_repo_name": "rezam747/DSCI_522_Spotify_Track_Popularity_Predictor", "max_stars_repo_head_hexsha": "56abceb68a57f72de855e65bdc18bdd02d229311", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/preprocess_n_model.py", "max_issues_repo_name": "rezam747/DSCI_522_Spotify_Track_Popularity_Predictor", "max_issues_repo_head_hexsha": "56abceb68a57f72de855e65bdc18bdd02d229311", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2021-11-21T04:53:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-10T20:07:37.000Z", "max_forks_repo_path": "src/preprocess_n_model.py", "max_forks_repo_name": "rezam747/DSCI_522_Spotify_Track_Popularity_Predictor", "max_forks_repo_head_hexsha": "56abceb68a57f72de855e65bdc18bdd02d229311", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-11-20T02:40:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-02T23:59:47.000Z", "avg_line_length": 33.7631578947, "max_line_length": 126, "alphanum_fraction": 0.6960249415, "include": true, "reason": "import numpy", "num_tokens": 1232}
|
Euler Problem 51
================
By replacing the 1st digit of the 2-digit number \*3, it turns out that six of the nine possible values: 13, 23, 43, 53, 73, and 83, are all prime.
By replacing the 3rd and 4th digits of 56\*\*3 with the same digit, this 5-digit number is the first example having seven primes among the ten generated numbers, yielding the family: 56003, 56113, 56333, 56443, 56663, 56773, and 56993. Consequently 56003, being the first member of this family, is the smallest prime with this property.
Find the smallest prime which, by replacing part of the number (not necessarily adjacent digits) with the same digit, is part of an eight prime value family.
```python
from sympy import sieve
from itertools import combinations, product
primes = set(sieve.primerange(10**5, 10**6))
powers = [10, 100, 1000, 10000, 100000]
smallest_prime = 999999
for i in [1,2,3,4]:
for j in range(i):
pattern = 111110 - powers[i] - powers[j]
for a in range(10):
for b in range(10):
for c in 1, 3, 7, 9:
current_prime = 0
strikes = 0
N = a*powers[i] + b*powers[j] + c
for k in range(10):
if N > smallest_prime:
break
if not (N in primes):
strikes += 1
if strikes > 2:
break
elif current_prime == 0:
current_prime = N
N += pattern
else:
print(current_prime)
smallest_prime = min(smallest_prime, current_prime)
```
121313
**Explanation:** We search for examples with up to six digits. The number of asterisks must be a multiple of 3,
since otherwise at least two of the numbers obtained by replacing the asterisks with a digit will be divisible by 3.
For example, the pattern 56\*\*3 generates the 10 numbers {56003, 56113, 56223, ..., 56993}, and three of these numbers
are divisible by 3 (56223, 56553, and 56883). Clearly the last digit in the pattern cannot be an asterisk, since a prime
greater than 5 can end only in 1, 3, 7, or 9.
In the code, `pattern` is a six-digit number with three 0s and three 1s, such as `110100`. The ones indicate the positions of the asterisks, and the zeros indicate positions where other digits `a`, `b`, `c` can be placed. For example, if `pattern = 110100`, `a = 5`, `b = 6`, and `c = 7`, then the candidate primes are 005067, 115167, 225267, ..., 995967. We search for examples of this sort in which 8 out of the 10 candidates are prime.
|
{"hexsha": "84b8522e4872c6835633386fe82d58673a2c3697", "size": 3907, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Euler 051 - Prime digit replacements.ipynb", "max_stars_repo_name": "Radcliffe/project-euler", "max_stars_repo_head_hexsha": "5eb0c56e2bd523f3dc5329adb2fbbaf657e7fa38", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2016-05-11T18:55:35.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-27T21:38:43.000Z", "max_issues_repo_path": "Euler 051 - Prime digit replacements.ipynb", "max_issues_repo_name": "Radcliffe/project-euler", "max_issues_repo_head_hexsha": "5eb0c56e2bd523f3dc5329adb2fbbaf657e7fa38", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Euler 051 - Prime digit replacements.ipynb", "max_forks_repo_name": "Radcliffe/project-euler", "max_forks_repo_head_hexsha": "5eb0c56e2bd523f3dc5329adb2fbbaf657e7fa38", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8673469388, "max_line_length": 446, "alphanum_fraction": 0.5323777835, "converted": true, "num_tokens": 709}
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contributor(s):
# - Wirawan Purwanto
# - Qiming Sun <osirpt.sun@gmail.com>
#
#
import numpy
import h5py
from pyscf.lib.chkfile import load
from pyscf.lib.chkfile import dump, save
from pyscf.lib.chkfile import load_mol, save_mol
def load_mcscf(chkfile):
return load_mol(chkfile), load(chkfile, 'mcscf')
def dump_mcscf(mc, chkfile=None, key='mcscf',
e_tot=None, mo_coeff=None, ncore=None, ncas=None,
mo_occ=None, mo_energy=None, e_cas=None, ci_vector=None,
casdm1=None, overwrite_mol=True):
'''Save CASCI/CASSCF calculation results or intermediates in chkfile.
'''
if chkfile is None: chkfile = mc.chkfile
if ncore is None: ncore = mc.ncore
if ncas is None: ncas = mc.ncas
if e_tot is None: e_tot = mc.e_tot
if e_cas is None: e_cas = mc.e_cas
if mo_coeff is None: mo_coeff = mc.mo_coeff
#if ci_vector is None: ci_vector = mc.ci
if h5py.is_hdf5(chkfile):
fh5 = h5py.File(chkfile, 'a')
if key in fh5:
del(fh5[key])
else:
fh5 = h5py.File(chkfile, 'w')
if 'mol' not in fh5:
fh5['mol'] = mc.mol.dumps()
elif overwrite_mol:
del(fh5['mol'])
fh5['mol'] = mc.mol.dumps()
fh5[key+'/mo_coeff'] = mo_coeff
def store(subkey, val):
if val is not None:
fh5[key+'/'+subkey] = val
store('e_tot', e_tot)
store('e_cas', e_cas)
store('ci', ci_vector)
store('ncore', ncore)
store('ncas', ncas)
store('mo_occ', mo_occ)
store('mo_energy', mo_energy)
store('casdm1', casdm1)
fh5.close()
|
{"hexsha": "9ad0058f1ab7325e2fc26d3a117473e0d2eb5cbd", "size": 2232, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyscf/mcscf/chkfile.py", "max_stars_repo_name": "crisely09/pyscf", "max_stars_repo_head_hexsha": "cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-30T22:33:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-22T18:02:36.000Z", "max_issues_repo_path": "pyscf/mcscf/chkfile.py", "max_issues_repo_name": "crisely09/pyscf", "max_issues_repo_head_hexsha": "cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyscf/mcscf/chkfile.py", "max_forks_repo_name": "crisely09/pyscf", "max_forks_repo_head_hexsha": "cb92f7974bd9c87c0ef5b2b52abf5d3219b3d6b6", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-01T05:31:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T02:38:33.000Z", "avg_line_length": 30.5753424658, "max_line_length": 74, "alphanum_fraction": 0.6581541219, "include": true, "reason": "import numpy", "num_tokens": 650}
|
# -*- coding: utf-8 -*-
#
# plot_sequence_I_networks_perlin_size.py
#
# Copyright 2017 Sebastian Spreizer
# The MIT License
import numpy as np
import matplotlib as mpl
import pylab as pl
from lib.circular_colormap import gen_circular_cmap
from lib.panel_label import panel_label
from lib.ploscb_formatting import set_fontsize
import lib.activity_sequence as seq
import lib.ax_spines as ax_spines
import lib.connectivity_landscape as cl
import lib.protocol as protocol
set_fontsize()
def plot_landscape_map(ax, landscape):
ax.matshow(landscape.reshape(nrow, ncol), vmin=0, vmax=7, origin='bottom', cmap=cmap)
ax.xaxis.set_ticks_position('bottom')
ax.set_xlim(0, ncol)
ax.set_ylim(0, nrow)
ax.set_axis_off()
return ax
def plot_rate_map(ax, gids):
x, y = gids % ncol, gids // nrow
h = np.histogram2d(x, y, bins=[range(ncol + 1), range(nrow + 1)])[0]
ax.matshow(np.log10(h.T / 10.), origin='bottom', vmin=0, vmax=np.log10(50), cmap=cmap)
ax.xaxis.set_ticks_position('bottom')
ax.set_xlim(0, ncol)
ax.set_ylim(0, nrow)
ax.set_axis_off()
def plot_direction_changing(das, label):
bins = np.linspace(-np.pi, np.pi, 51)
hd = np.histogram(das, bins=bins)[0]
ax_da.plot(bins[:-1], hd, label=label, lw=1)
def plot_speed(speed, label):
bins = np.linspace(0, 3, 51)
hs = np.histogram(speed, bins=bins)[0]
ax_s.plot(bins[:-1] / 25. * 1000., hs / float(np.sum(hs)), label=label, lw=1)
def plot(idx, params, label):
landscape = cl.__dict__[params['landscape']['mode']](
nrow, params['landscape'].get('specs', {}))
gids, ts = protocol.get_or_simulate(simulation, params)
ii = ts > recstart
clusters, sequences = seq.identify_vectors(ts[ii], gids[ii] - 1, nrow, ncol, steps=steps, width=width, td=td, eps=eps)
das = []
t, x, y, c, a, s = sequences
for cid in np.unique(c):
cidx = c == cid
da = np.diff(a[cidx])
das.extend(da[~np.isnan(da)])
nclu = []
for i in range(0, 9000, 100):
q = (t >= (i + 500)) * (t < (i + 1500))
nclu.append((np.unique(c[q]).size))
nclusters.append(nclu)
ax1 = pl.subplot2grid((gridy, gridx), (0, idx))
plot_landscape_map(ax1, landscape)
ax1.set_title(label)
ax2 = pl.subplot2grid((gridy, gridx), (1, idx))
plot_rate_map(ax2, gids[ii])
plot_speed(s[~np.isnan(s)], label.split('\n')[-1])
plot_direction_changing(das, label.split('\n')[-1])
return ax1, ax2
simulation = 'sequence_I_networks'
params = protocol.get_parameters(simulation).as_dict()
nrow, ncol = params['nrow'], params['ncol']
npop = nrow * ncol
landscapes = [
{'mode': 'homogeneous', 'specs': {'phi': 3}},
{'mode': 'Perlin_uniform', 'specs': {'size': 3}},
{'mode': 'random'},
]
steps = 10
width = 25
td = 20
eps = 4
recstart = 500.
perlin_sizes = [3, 5, 10, 20, 50]
gridx = len(perlin_sizes) + 2
gridy = 5
nclusters = []
fig = pl.figure(dpi=300)
ax_c = pl.subplot2grid((10, 9), (5, 0), rowspan=3, colspan=2)
ax_s = pl.subplot2grid((10, 9), (5, 3), rowspan=3, colspan=2)
ax_da = pl.subplot2grid((10, 9), (5, 5), rowspan=3, colspan=3, projection='polar')
cmap = gen_circular_cmap()
params['landscape'] = landscapes[0]
ax1, ax2 = plot(0, params, 'Homogeneous\n$\infty$')
for idx, size in enumerate(perlin_sizes):
params['landscape'] = landscapes[1]
params['landscape']['specs']['size'] = size
plot(idx + 1, params, 'Perlin\n%i' % (100 // size))
params['landscape'] = landscapes[-1]
plot(idx + 2, params, 'Random\n1')
box = ax_c.boxplot(nclusters, 0, '', widths = 0.6)
colors = pl.rcParams['axes.prop_cycle'].by_key()['color']
for i in range(7):
box['boxes'][i].set_color(colors[i])
box['medians'][i].set_color(colors[i])
box['whiskers'][i * 2].set_color(colors[i])
box['whiskers'][i * 2 + 1].set_color(colors[i])
box['caps'][i * 2].set_color(colors[i])
box['caps'][i * 2 + 1].set_color(colors[i])
ax_spines.set_default(ax_c)
ax_da.legend(bbox_to_anchor=(2.1, 1.2), title='Perlin scale')
ax_c.set_xticks([1, 4, 7])
ax_c.set_xticklabels(['$\infty$', 10, 1])
ax_c.set_xlabel('Perlin scale')
ax_c.set_ylim(20, 70)
ax_c.set_ylabel('Number of sequences')
ax_da.set_theta_offset(np.pi / 2)
# ax_da.set_ylabel('Count')
ax_da.set_xlabel('Direction changing')
ax_spines.set_default(ax_da)
ax_da.locator_params(nbins=4)
ax_da.set_xticks([0, np.pi / 2, np.pi, np.pi * 3 / 2])
ax_da.set_xticklabels([0, '$\pi/2$', '$\pi$', '$-\pi/2$'])
ax_da.set_rticks([500, 1000, 1500])
ax_da.set_yticklabels([])
ax_da.set_rlabel_position(np.pi)
ax_s.set_ylabel('Probability')
ax_s.set_xlabel('Velocity [grid points / sec]')
ax_spines.set_default(ax_s)
ax_s.locator_params(nbins=3)
panel_label(ax1, 'a', x=-.84)
panel_label(ax_c, 'b', x=-.5)
panel_label(ax_s, 'c', x=-.5)
panel_label(ax_da, 'd', x=-.25)
filename = 'sequence_I_networks_perlin_size'
fig.savefig(filename + '.png', format='png', dpi=300)
fig.savefig(filename + '.pdf', format='pdf')
pl.show()
|
{"hexsha": "77de7b2ffe12d48ce7b26e49ae36913a9069316a", "size": 4980, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/plot/sequence_I_networks_perlin_size.py", "max_stars_repo_name": "babsey/spatio-temporal-activity-sequence", "max_stars_repo_head_hexsha": "bb82c7d4d3b2f85e9d2635a5479d0478868d33bb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-11-12T01:11:00.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-12T15:15:28.000Z", "max_issues_repo_path": "scripts/plot/sequence_I_networks_perlin_size.py", "max_issues_repo_name": "ModelDBRepository/263711", "max_issues_repo_head_hexsha": "eb9ce4e4773c561a1e7ac82423f41139a7036462", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/plot/sequence_I_networks_perlin_size.py", "max_forks_repo_name": "ModelDBRepository/263711", "max_forks_repo_head_hexsha": "eb9ce4e4773c561a1e7ac82423f41139a7036462", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-17T20:58:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-17T20:58:17.000Z", "avg_line_length": 28.4571428571, "max_line_length": 122, "alphanum_fraction": 0.6564257028, "include": true, "reason": "import numpy", "num_tokens": 1625}
|
import copy
import time
import torch
import argparse
import numpy as np
from torch.utils.data import DataLoader
from deepsnap.batch import Batch
from deepsnap.dataset import GraphDataset
from torch_geometric.datasets import TUDataset
def arg_parse():
parser = argparse.ArgumentParser(description='Pagerank arguments.')
parser.add_argument('--device', type=str,
help='CPU / GPU device.')
parser.add_argument('--netlib', type=str,
help='Backend network library, nx or sx.')
parser.add_argument('--batch_size', type=int,
help='Batch size.')
parser.add_argument('--num_runs', type=int,
help='Number of runs averaged on.')
parser.add_argument('--dataset', type=str,
help='Dataset.')
parser.add_argument('--print_run', action='store_true',
help='Print out current run.')
parser.set_defaults(
device='cuda:0',
netlib="nx",
batch_size=1,
num_runs=100,
dataset='COX2',
print_run=False,
)
return parser.parse_args()
def ego_nets(graph, radius=2):
egos = []
# time_1 = time.time()
for i in range(graph.num_nodes):
if radius > 4:
egos.append(graph.G) #what is graph.G
else:
egos.append(netlib.ego_graph(graph.G, i, radius=radius)) # """Returns induced subgraph of neighbors centered at node n within a given radius.
# time_2 = time.time() #0.0007936954498291016
G = graph.G.__class__()
id_bias = graph.num_nodes
for i in range(len(egos)):
G.add_node(i, **egos[i].nodes(data=True)[i])
# time_3 = time.time() #0.0017819404602050781
for i in range(len(egos)):
# time_1 = time.time()
keys = list(egos[i].nodes)
# time_2 = time.time()
keys.remove(i)
# time_3 = time.time()
id_cur = egos[i].number_of_nodes() - 1
# time_4 = time.time()
vals = range(id_bias, id_bias + id_cur)
# time_5 = time.time()
id_bias += id_cur
mapping = dict(zip(keys, vals))
#time_6 = time.time()
ego = netlib.relabel_nodes(egos[i], mapping, copy=True) #10 seconds
#time_7 = time.time()
#G.add_nodes_from(range(2))
G.add_nodes_from(ego.nodes(data=True)) #3 seconds
#time_8 = time.time()
#G.add_edges_from([[0, 1]])
G.add_edges_from(ego.edges(data=True)) #10 seconds
#time_9 = time.time()
# print("For loop time")
# print("Ego Net 2: ", time_2 - time_1)
# print("Ego Net 3: ", time_3 - time_2)
# print("Ego Net 4: ", time_4 - time_3)
# print("Ego Net 5: ", time_5 - time_4)
# print("Ego Net 6: ", time_6 - time_5)
# print("Ego Net 7: ", time_7 - time_6)
# print("Ego Net 8: ", time_8 - time_7)
# print("Ego Net 9: ", time_9 - time_8)
#print("For loop times")
graph.G = G
# time_4 = time.time()#0.05971074104309082
graph.node_id_index = torch.arange(len(egos))
#time_5 = time.time()
# print("Ego Net Time 1: ", time_1)
# print("Ego Net 2: ", time_2 - time_1)
# print("Ego Net 3: ", time_3 - time_2)
# print("Ego Net 4: ", time_4 - time_3)
# print("Ego Net 5: ", time_5 - time_4)
def ego_graph(edge_index, num_nodes, node, radius=2):
edge_list = {}
for i in range(edge_index.shape[1]):
if edge_index[0][i].item() in edge_list:
edge_list[edge_index[0][i].item()].add(edge_index[1][i].item())
else:
edge_list[edge_index[0][i].item()] = set([edge_index[1][i].item()])
if radius > 4:
return set(range(num_nodes)), edge_list
neighbors = set([node])
for i in range(radius):
neighbors_temp = copy.copy(neighbors)
for neighbor in neighbors:
neighbor_neighbors = edge_list[neighbor]
for nn in neighbor_neighbors:
neighbors_temp.add(nn)
neighbors = neighbors_temp
ego_edge_list = {}
for neighbor in neighbors:
if neighbor in edge_list:
neighbor_neighbors = edge_list[neighbor]
for nn in neighbor_neighbors:
if nn in neighbors:
if neighbor in ego_edge_list:
ego_edge_list[neighbor].append(nn)
else:
ego_edge_list[neighbor] = [nn]
return list(neighbors), ego_edge_list
def pyg_ego_nets(graph, radius=2):
x = []
edges = []
egos = []
for i in range(graph.num_nodes):
egos.append(ego_graph(graph.edge_index, graph.num_nodes, i, radius=radius))
id_bias = graph.num_nodes
for i in range(len(egos)):
x.append(graph.x[i])
for i in range(len(egos)):
for neighbor in egos[i][0]:
if neighbor != i:
x.append(graph.x[neighbor])
keys = list(egos[i][0])
keys.remove(i)
id_cur = len(egos[i][0])
vals = range(id_bias, id_bias + id_cur)
id_bias += id_cur
mapping = dict(zip(keys, vals))
ego_edge_list = egos[i][1]
for node in ego_edge_list:
neighbors = ego_edge_list[node]
for neighbor in neighbors:
if node == i:
src = node
else:
src = mapping[node]
if neighbor == i:
dst = neighbor
else:
dst = mapping[neighbor]
edges.append([src, dst])
edge_index = torch.tensor(edges, dtype=torch.long).t()
x = torch.stack(x)
return x, edge_index
def pyg_ego(args, pyg_dataset):
avg_time = 0
for i in range(args.num_runs):
if args.print_run:
print("Run {}".format(i + 1))
graphs = []
ds = pyg_dataset[:373]
from torch_geometric.data import DataLoader
loader = DataLoader(ds, batch_size=1)
s = time.time()
for batch in loader:
x, edge_index = pyg_ego_nets(batch)
avg_time += (time.time() - s)
print("Tensor has average time: {}".format(avg_time / args.num_runs))
def deepsnap_ego(args, pyg_dataset):
avg_time = 0
task = "graph"
for i in range(args.num_runs):
if args.print_run:
print("Run {}".format(i + 1))
# time_1 = time.time()
graphs = GraphDataset.pyg_to_graphs(pyg_dataset, verbose=True, netlib=netlib)
# time_2 = time.time()
dataset = GraphDataset(graphs, task=task)
datasets = {}
datasets['train'], datasets['val'], datasets['test'] = dataset.split(transductive=False, split_ratio = [0.8, 0.1, 0.1], shuffle=False)
# time_3 = time.time()
dataloaders = {
split: DataLoader(
dataset, collate_fn=Batch.collate(),
batch_size=1, shuffle=False
) for split, dataset in datasets.items()
}
#time_4 = time.time()
s = time.time()
for batch in dataloaders['train']:
batch = batch.apply_transform(ego_nets, update_tensor=True)
# time_5 = time.time()
# print("Deepsnap Ego")
# print("Time 1: ", time_1)
# print("Time 2: ", time_2 - time_1)
# print("Time 3: ", time_3 - time_2)
# print("Time 4: ", time_4 - time_3)
# print("Time 5: ", time_5 - time_4)
# print("Deepsnap Ego")
avg_time += (time.time() - s)
print("DeepSNAP has average time: {}".format(avg_time / args.num_runs))
if __name__ == '__main__':
args = arg_parse()
if args.netlib == "nx":
print("Use NetworkX as the DeepSNAP backend network library.")
import networkx as netlib
elif args.netlib == "sx":
print("Use SnapX as the DeepSNAP backend network library.")
import snap
import snapx as netlib
else:
import networkx as netlib
print("Use NetworkX as the DeepSNAP backend network library.")
if args.dataset == 'COX2':
pyg_dataset = TUDataset('./tu', args.dataset)
print("Start benchmark DeepSNAP:")
deepsnap_ego(args, pyg_dataset)
print("Start benchmark Tensor:")
pyg_ego(args, pyg_dataset)
|
{"hexsha": "54f335aaa08f79ee2a345113fcae32ab1d9765eb", "size": 8202, "ext": "py", "lang": "Python", "max_stars_repo_path": "benchmark/transform/ego_net.py", "max_stars_repo_name": "ruth-ann/deepsnap", "max_stars_repo_head_hexsha": "35eeb5abdb304c53b2e0a68cbbeeaa55dca286a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "benchmark/transform/ego_net.py", "max_issues_repo_name": "ruth-ann/deepsnap", "max_issues_repo_head_hexsha": "35eeb5abdb304c53b2e0a68cbbeeaa55dca286a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "benchmark/transform/ego_net.py", "max_forks_repo_name": "ruth-ann/deepsnap", "max_forks_repo_head_hexsha": "35eeb5abdb304c53b2e0a68cbbeeaa55dca286a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0331950207, "max_line_length": 154, "alphanum_fraction": 0.5743721044, "include": true, "reason": "import numpy,import networkx", "num_tokens": 2159}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.