text
stringlengths
0
1.25M
meta
stringlengths
47
1.89k
# # simulate phenotypes from a LMM # using Revise using MendelIHT using SnpArrays using Random using GLM using DelimitedFiles using Distributions using LinearAlgebra using CSV using DataFrames using StatsBase using TraitSimulation using Knockoffs BLAS.set_num_threads(1) """ βi ~ Uniform([-0.5, -0.45, ..., -0.05, 0.05, ..., 0.5]) chosen uniformly on the odd indices (i.e. real genotypes) across genome k = Number of causal SNPs p = Total number of SNPs traits = Number of traits (phenotypes) overlap = number of pleiotropic SNPs (affects each trait with effect size βi / r) """ function simulate_fixed_beta(k::Int, p::Int, traits::Int; overlap::Int=0) true_b = zeros(p, traits) effect_sizes = collect(0.05:0.05:0.5) k_indep = k - 2overlap # pleiotropic SNPs affect 2 phenotypes num_causal_snps = k_indep + overlap @assert num_causal_snps > 0 "number of causal SNPs should be positive but was $num_causal_snps" idx_causal_snps = sample(1:p, num_causal_snps, replace=false) @assert length(idx_causal_snps) == num_causal_snps "length(idx_causal_snps) = $(length(idx_causal_snps)) != num_causal_snps" shuffle!(idx_causal_snps) # pleiotropic SNPs affect 2 phenotypes for i in 1:overlap j = idx_causal_snps[i] rs = sample(1:traits, 2, replace=false) for r in rs true_b[j, r] = rand(-1:2:1) * effect_sizes[rand(1:10)] end end # non pleiotropic SNPs affect only 1 phenotype for i in (overlap+1):length(idx_causal_snps) idx = idx_causal_snps[i] true_b[idx, rand(1:traits)] = rand(-1:2:1) * effect_sizes[rand(1:10)] end @assert count(!iszero, true_b) == k "count(!iszero, true_b) = $(count(!iszero, true_b)) != k = $k" return true_b end """ Trait covariance matrix is σg * Φ + σe * I where Φ is the GRM. """ function simulate_polygenic( plinkname::String, k::Int, r::Int; seed::Int=2021, σg=0.1, σe=0.9, βoverlap=2, ) # set seed Random.seed!(seed) # simulate `.bed` file with no missing data x = SnpArray(plinkname * ".bed") xla = SnpLinAlg{Float64}(x, model=ADDITIVE_MODEL, impute=true, center=true, scale=true) n, p = size(x) # intercept is the only nongenetic covariate Z = ones(n, 1) intercepts = zeros(r)' # each trait have 0 intercept # simulate β B = simulate_fixed_beta(k, p, r, overlap=βoverlap) writedlm("sim$(seed)/trueb.txt", B) # between trait covariance matrix Σ = random_covariance_matrix(r) writedlm("sim$(seed)/true_cov.txt", Σ) # between sample covariance is identity + GRM Φ = readdlm(plinkname * ".grm") V = σg * Φ + σe * I # simulate y using TraitSimulations.jl (https://github.com/OpenMendel/TraitSimulation.jl/blob/master/src/modelframework.jl#L137) vc = @vc Σ ⊗ V μ = zeros(n, r) μ_null = zeros(n, r) LinearAlgebra.mul!(μ_null, Z, intercepts) mul!(μ, xla, B) BLAS.axpby!(1.0, μ_null, 1.0, μ) VCM_model = VCMTrait(Z, intercepts, xla, B, vc, μ) Y = Matrix(Transpose(simulate(VCM_model))) # simulate using Distributions.jl # μ = z * intercepts + xla * B # Y = rand(MatrixNormal(μ', Σ, V)) return xla, Matrix(Z'), B, Σ, Y end """ Computes power and false positive rates - p: total number of SNPs - pleiotropic_snps: Indices (or ID) of the true causal SNPs that affect >1 phenotype - independent_snps: Indices (or ID) of the true causal SNPs that affect exactly 1 phenotype - signif_snps: Indices (or ID) of SNPs that are significant after testing returns: pleiotropic SNP's power, independent SNP's power, number of false positives, and false discovery rate """ function power_and_fdr(p::Int, pleiotropic_snps, independent_snps, signif_snps) pleiotropic_power = length(signif_snps ∩ pleiotropic_snps) / length(pleiotropic_snps) independent_power = length(signif_snps ∩ independent_snps) / length(independent_snps) correct_snps = pleiotropic_snps ∪ independent_snps FP = length(signif_snps) - length(signif_snps ∩ correct_snps) # number of false positives TN = p - length(signif_snps) # number of true negatives # FPR = FP / (FP + TN) FDR = FP / length(signif_snps) return pleiotropic_power, independent_power, FP, FDR end function make_grm(chr::Int) dir = "/scratch/users/bbchu/ukb/subset/" cd(dir) isfile(dir * "ukb.10k.chr$chr.bed") || error("PLINK file not present!") if !isfile(dir * "ukb.10k.chr$chr.grm") println("GRM file not present, generating robust GRM") Φ = SnpArrays.grm(SnpArray(dir * "ukb.10k.chr$chr.bed"), method=:Robust) writedlm("ukb.10k.chr$chr.grm", Φ) end end function one_simulation( k::Int, r::Int; seed::Int=2021, σg=0.1, σe=0.9, βoverlap=2, path=5:5:50, init_beta=false, model=:polygenic, debias=100, fdr=0.1 ) isdir("sim$seed") ? (return nothing) : mkdir("sim$seed") plinkname = "/scratch/users/bbchu/ukb/subset/ukb.10k.chr10" knockoff_file = "/scratch/users/bbchu/ukb/subset/ukb.10k.merged.chr10" grid = path[2] - path[1] - 1 # simulate data Random.seed!(seed) if model == :polygenic xla, Z, B, Σ, Y = simulate_polygenic(plinkname, k, r, seed=seed, σg=σg, σe=σe, βoverlap=βoverlap) elseif model == :sparse xla, Z, B, Σ, Y = simulate_sparse(plinkname, k, r, seed=seed, σg=σg, σe=σe, βoverlap=βoverlap) else error("model misspecified!") end cd("sim$seed") writedlm("simulated_phenotypes.phen", Y', ',') correct_snps = unique([x[1] for x in findall(!iszero, B)]) pleiotropic_snps, independent_snps = Int[], Int[] for snp in correct_snps count(x -> abs(x) > 0, @view(B[snp, :])) > 1 ? push!(pleiotropic_snps, snp) : push!(independent_snps, snp) end # snpdata = SnpData("../" * plinkname) # pleiotropic_snp_rsid = snpdata.snp_info[pleiotropic_snps, :snpid] # independent_snp_rsid = snpdata.snp_info[independent_snps, :snpid] # run GEMMA (GRM is precomputed already) # run(`cp ../../$(plinkname).bed .`) # run(`cp ../../$(plinkname).bim .`) # run(`cp ../../$(plinkname).cXX.txt .`) # make_GEMMA_fam_file(xla, Y, plinkname) # pheno_columns = [string(ri) for ri in 1:r] # gemma_time = @elapsed begin # run(`../../gemma -bfile $plinkname -k $(plinkname).cXX.txt -notsnp -lmm 1 -n $pheno_columns -o gemma.sim$seed`) # end # gemma_pleiotropic_power, gemma_independent_power, gemma_FP, gemma_FPR, gemma_λ = # process_gemma_result("output/gemma.sim$seed.assoc.txt", pleiotropic_snp_rsid, independent_snp_rsid) # println("GEMMA time = $gemma_time, pleiotropic power = $gemma_pleiotropic_power, independent power = $gemma_independent_power, FP = $gemma_FP, FDR = $gemma_FDR, gemma_λ=$gemma_λ") # mv("output/gemma.sim$seed.assoc.txt", "gemma.sim$seed.assoc.txt") # mv("output/gemma.sim$seed.log.txt", "gemma.sim$seed.log.txt") # run multivariate IHT mIHT_time = @elapsed begin mses = cross_validate(plinkname, MvNormal, path=path, phenotypes="simulated_phenotypes.phen"; init_beta=init_beta, debias=debias) k_rough_guess = path[argmin(mses)] dense_path = (k_rough_guess - grid):(k_rough_guess + grid) mses_new = cross_validate(plinkname, MvNormal, path=dense_path, phenotypes="simulated_phenotypes.phen"; init_beta=init_beta, debias=debias, cv_summaryfile="miht.cviht.summary.txt") iht_result = iht(plinkname, dense_path[argmin(mses_new)], MvNormal, phenotypes="simulated_phenotypes.phen"; init_beta=init_beta, debias=debias, summaryfile="miht.summary.txt", betafile="miht.beta.txt") end detected_snps = Int[] for i in 1:r β = iht_result.beta[i, :] detected_snps = detected_snps ∪ findall(!iszero, β) end mIHT_pleiotropic_power, mIHT_independent_power, mIHT_FP, mIHT_FDR = power_and_fdr(size(B, 1), pleiotropic_snps, independent_snps, detected_snps) println("multivariate IHT time = $mIHT_time, pleiotropic power = $mIHT_pleiotropic_power, independent power = $mIHT_independent_power, FP = $mIHT_FP, FDR = $mIHT_FDR") # run knockoff + multivariate IHT mIHT_ko_time = @elapsed begin mses = cross_validate(knockoff_file, MvNormal, path=path, phenotypes="simulated_phenotypes.phen"; init_beta=init_beta, debias=debias) k_rough_guess = path[argmin(mses)] dense_path = (k_rough_guess - grid):(k_rough_guess + grid) mses_new = cross_validate(knockoff_file, MvNormal, path=dense_path, phenotypes="simulated_phenotypes.phen"; init_beta=init_beta, debias=debias, cv_summaryfile="miht.ko.cviht.summary.txt") iht_result = iht(knockoff_file, dense_path[argmin(mses_new)], MvNormal, phenotypes="simulated_phenotypes.phen"; init_beta=init_beta, debias=debias, summaryfile="miht.ko.summary.txt", betafile="miht.knockoff.beta.txt") end W = coefficient_diff(iht_result.beta', :interleaved) τ = threshold(W, fdr) detected_snps = findall(W .> τ) mIHT_ko_pleiotropic_power, mIHT_ko_independent_power, mIHT_ko_FP, mIHT_ko_FDR = power_and_fdr(size(B, 1), pleiotropic_snps, independent_snps, detected_snps) println("multivariate IHT with knockoffs time = $mIHT_ko_time, pleiotropic power = $mIHT_ko_pleiotropic_power, independent power = $mIHT_ko_independent_power, FP = $mIHT_ko_FP, FDR = $mIHT_ko_FDR") # run multiple univariate IHT # detected_snps = Int[] # uIHT_time = @elapsed begin # for trait in 1:r # mses = cross_validate(plinkname, Normal, path=path, phenotypes=trait+5; # init_beta=init_beta, debias=debias) # k_rough_guess = path[argmin(mses)] # dense_path = (k_rough_guess == 5) ? (0:5) : ((k_rough_guess - 4):(k_rough_guess + 4)) # mses_new = cross_validate(plinkname, Normal, path=dense_path, phenotypes=trait+5; # init_beta=init_beta, debias=debias, cv_summaryfile="uiht.cviht.summary$trait.txt") # best_k = dense_path[argmin(mses_new)] # if best_k > 0 # iht_result = iht(plinkname, best_k, Normal, phenotypes=trait+5; # init_beta=init_beta, debias=debias, summaryfile="uiht.summary$trait.txt") # β = iht_result.beta # else # β = zeros(size(B, 2)) # end # # save results # detected_snps = detected_snps ∪ findall(!iszero, β) # writedlm("univariate_iht_beta$trait.txt", β) # end # end # uIHT_pleiotropic_power, uIHT_independent_power, uIHT_FP, uIHT_FPR = power_and_fdr(size(B, 1), pleiotropic_snps, independent_snps, detected_snps) # println("univariate IHT time = $uIHT_time, pleiotropic power = $uIHT_pleiotropic_power, independent power = $uIHT_independent_power, FP = $uIHT_FP, FPR = $uIHT_FPR") # run MVPLINK # phenofile = plinkname * ".phen" # make_MVPLINK_fam_and_phen_file(xla, Y, plinkname) # mvplink_time = @elapsed run(`../../plink.multivariate --bfile $plinkname --noweb --mult-pheno $phenofile --mqfam`) # mvPLINK_pleitropic_power, mvPLINK_independent_power, mvPLINK_FP, mvPLINK_FPR, mvPLINK_λ = # process_mvPLINK("plink.mqfam.total", pleiotropic_snps, independent_snps) # println("mvPLINK time = $mvplink_time, pleiotropic power = $mvPLINK_pleitropic_power, independent power = $mvPLINK_independent_power, FP = $mvPLINK_FP, FPR = $mvPLINK_FPR, mvPLINK_λ=$mvPLINK_λ") # clean up # rm("plink.hh", force=true) # rm("$(plinkname).fam", force=true) # rm("$(plinkname).bed", force=true) # rm("$(plinkname).bim", force=true) # rm("$(plinkname).cXX.txt", force=true) # save summary stats n, p = size(xla) open("summary.txt", "w") do io println(io, "Simulation $seed summary") println(io, "n = $n, p = $p, k = $k, r = $r, βoverlap=$βoverlap") println(io, "debias=$debias, init_beta=$init_beta") model == :polygenic ? println(io, "model = $model, σg=$σg, σe=$σe") : println(io, "model = $model") println(io, "") println(io, "mIHT time = $mIHT_time seconds, pleiotropic power = $mIHT_pleiotropic_power, independent power = $mIHT_independent_power, FP = $mIHT_FP, FDR = $mIHT_FDR, λ = NaN") println(io, "mIHT knockoff time = $mIHT_ko_time seconds, pleiotropic power = $mIHT_ko_pleiotropic_power, independent power = $mIHT_ko_independent_power, FP = $mIHT_ko_FP, FDR = $mIHT_ko_FDR, λ = NaN") # println(io, "uIHT time = $uIHT_time seconds, pleiotropic power = $uIHT_pleiotropic_power, independent power = $uIHT_independent_power, FP = $uIHT_FP, FDR = $uIHT_FDR, λ = NaN") # println(io, "mvPLINK time = $mvplink_time seconds, pleiotropic power = $mvPLINK_pleitropic_power, independent power = $mvPLINK_independent_power, FP = $mvPLINK_FP, FDR = $mvPLINK_FDR, λ = $mvPLINK_λ") # println(io, "GEMMA time = $gemma_time seconds, pleiotropic power = $gemma_pleiotropic_power, independent power = $gemma_independent_power, FP = $gemma_FP, FDR = $gemma_FDR, λ = $gemma_λ") end cd("../") return nothing end function run_simulation(set::Int, model::Symbol) σg = 0.1 σe = 0.9 fdr = 0.1 init_beta = true debias = false βoverlap = [3, 5, 7] k = [10, 20, 100] r = [2, 3, 3] path = set ≥ 3 ? (10:10:200) : (5:5:50) println("Simulation model = $model, set $set has k = $(k[set]), r = $(r[set]), βoverlap = $(βoverlap[set])") cur_dir = pwd() * "/set$set" isdir(cur_dir) || mkdir(cur_dir) k_cur = k[set] r_cur = r[set] βoverlap_cur = βoverlap[set] for seed in 1:100 try cd(cur_dir) one_simulation(k_cur, r_cur, seed = seed, path = path, βoverlap=βoverlap_cur, σg=σg, σe=σe, init_beta=init_beta, model=model, debias=debias) catch e bt = catch_backtrace() msg = sprint(showerror, e, bt) println("set $set sim $seed threw an error!") println(msg) continue end end end # set = parse(Int, ARGS[1]) # model = :polygenic # run_simulation(set, model) # set = 1 # model = :polygenic # σg = 0.1 # σe = 0.9 # fdr=0.1 # init_beta = true # debias = false # path = 5:5:50 # βoverlap = 3 # k = 10 # r = 2 # cur_dir = pwd() * "/set$set" # isdir(cur_dir) || mkdir(cur_dir) # seed = 1 # cd(cur_dir)
{"hexsha": "387cae14f0e3d0c7dcab727332e4fcb8a961da98", "size": 14480, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "scripts/simulate.jl", "max_stars_repo_name": "biona001/Knockoffs.jl", "max_stars_repo_head_hexsha": "8c2d1de79e85704dd5d670e11341bbbe784986ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/simulate.jl", "max_issues_repo_name": "biona001/Knockoffs.jl", "max_issues_repo_head_hexsha": "8c2d1de79e85704dd5d670e11341bbbe784986ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-09-08T04:55:29.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-11T01:26:37.000Z", "max_forks_repo_path": "scripts/simulate.jl", "max_forks_repo_name": "biona001/Knockoffs.jl", "max_forks_repo_head_hexsha": "8c2d1de79e85704dd5d670e11341bbbe784986ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2157434402, "max_line_length": 210, "alphanum_fraction": 0.6601519337, "num_tokens": 4741}
subroutine perfc ! ! to obtain the inviscid contour of the nozzle ! use kinddefine use gg, only:gam,g1,g2,g4,g5,g6,g7,g8,ga,qt use cline, only:axis,taxi,frip,zonk,seo,cse use coord, only:s,fs,waltan,sd,wmn,ttr,dmdx,spr,dpx,secd,xbin,xcin &,gma,gmb,gmc,gmd use work, only:wall,wax,way,wan,a,b,fclast use prop, only:xbl,conv use param, only:etad,rc,amach,bmach,cmach,emach,gmach,sf,wwo,wwop, &qm,cbet,xe,eta,epsi,bpsi,xo,yo,rrc,sdo,xb,xc,ah,se,tye,xa use troat use contr, only:itle,ie,lr,it,jb,jq,kat,kbl,king,ko,nocon,mc,ip,iq &,ise,jc,m,mp,mq,n,np,nf,nut ! implicit none ! interface function fmv(psi) use kinddefine implicit none real(kind=K8) :: fmv real(kind=K8), intent(in) :: psi end function fmv ! subroutine ofeld(a,b,c,nocon) use kinddefine implicit none integer(kind=K4),intent(out) :: nocon real(kind=K8),dimension(5),intent(in) :: a,b real(kind=K8),dimension(5),intent(out) :: c end subroutine ofeld ! subroutine scond(a,b,c,king) use kinddefine implicit none integer(kind=K4),intent(in) :: king real(kind=K8),dimension(150),intent(in) :: a,b real(kind=K8),dimension(150),intent(out) :: c end subroutine scond ! subroutine twixt(s,gma,gmb,gmc,gmd,xbl,kat,kbl) use kinddefine implicit none integer(kind=K4),intent(in) :: kat integer(kind=K4),intent(out) :: kbl real(kind=K8),intent(out) :: gma,gmb,gmc,gmd real(kind=K8),intent(in) :: xbl real(kind=K8),dimension(200),intent(in) :: s end subroutine twixt end interface ! integer(kind=K4) :: i,ib,ichara,icy,inc,iprnt,j,jj,k,kan,kc,kit,kt integer(kind=K4) :: kut,l,last,lastp,lin,line,ll,lp,lq,lt,nag,nk integer(kind=K4) :: nl,nn real(kind=K8),dimension(6,150) :: chara real(kind=K8),dimension(150) :: su real(kind=K8),dimension(200) :: wdx,wtan,scdf real(kind=K8),dimension(100) :: yi real(kind=K8) :: add,an,as,bdd,bs,bx,ci,cpsi,cs,cy,cyp real(kind=K8) :: del,dhp,dm,dmxx,dn,dpxx,dsx,dydx,em,f,fn,fsx,half real(kind=K8) :: hs,la,one,ps,psi,r,rl,s1,s2,s3,sa,sb,sc,sdx real(kind=K8) :: six,sne,sprx,ss,st,sum1 real(kind=K8) :: sumax,t,t1,t2,t3,thr,tne,tt,two,wmnx,x,xbet,xinch real(kind=K8) :: xm,xmu,xmur,xs,xs2,xs3,y,ye,yinch,ypx,ys,zro character(len=4,kind=K3) :: ifr,iwl,lst,ibl ! COMMON /GG/ GAM,GM,G1,G2,G3,G4,G5,G6,G7,G8,G9,GA,RGA,QT ! COMMON /CLINE/ AXIS(5,150),TAXI(5,150),WIP,X1,FRIP,ZONK,SEO,CSE ! COMMON /COORD/ S(200),FS(200),WALTAN(200),SD(200),WMN(200),TTR(200 ! 1),DMDX(200),SPR(200),DPX(200),SECD(200),XBIN,XCIN,GMA,GMB,GMC,GMD ! COMMON /WORK/ WALL(5,200),WAX(200),WAY(200),WAN(200),A(5,150),B(5, ! 1150),FCLAST(5,150) ! COMMON /PROP/ AR,ZO,RO,VISC,VISM,SFOA,XBL,CONV ! COMMON /PARAM/ ETAD,RC,AMACH,BMACH,CMACH,EMACH,GMACH,FRC,SF,WWO,WW ! 1OP,QM,WE,CBET,XE,ETA,EPSI,BPSI,XO,YO,RRC,SDO,XB,XC,AH,PP,SE,TYE,XA ! COMMON /TROAT/ FC(6,51) ! COMMON /CONTR/ ITLE(3),IE,LR,IT,JB,JQ,JX,KAT,KBL,KING,KO,LV,NOCON, ! 1IN,MC,MCP,IP,IQ,ISE,JC,M,MP,MQ,N,NP,NF,NUT ! DIMENSION CHARA(6,150), SU(150), WDX(200), WTAN(200), SCDF(200), YI ! 1(100) data zro/0.0d+0/,one/1.d+0/,two/2.d+0/,six/6.d+0/,half/5.d-1/ data ifr/'FIRS'/,iwl/'WALL'/,lst/'LAST'/,ibl/' '/,thr/3.d+0/ ! call orez (a,4*750+250) a(:,:)=0.0d0 b(:,:)=0.0d0 fclast(:,:)=0.0d0 wall(:,:)=0.0d0 ! chara(:,:)=0.0d0 su(:)=0.0d0 wdx(:)=0.0d0 wtan(:)=0.0d0 scdf(:)=0.0d0 yi(:)=0.0d0 ! cpsi=g2*datan(g4*cbet)-datan(cbet) if (jq.gt.0) goto 6 if (lr.eq.0) goto 4 ! ! throat characteristic values sumax=(se/seo)**(ie+1) if (qm.eq.one) sumax=one lq=zonk*(lr-1)+1 nl=n+lq-1 do j=1,lq if (qm.ne.one) goto 1 fc(1,j)=fc(1,j)*se+xo fc(2,j)=fc(2,j)*se 1 fclast(1,j)=fc(1,j) fclast(2,j)=fc(2,j) fclast(3,j)=fc(3,j) fclast(4,j)=fc(4,j) fclast(5,j)=fc(5,j) if (mq.lt.0) goto 3 if (j.gt.1) goto 2 write (2,93) itle write (2,99) ibl 2 xmu=conv*dasin(one/fclast(3,j)) psi=conv*fclast(4,j) an=conv*fclast(5,j) xinch=sf*fclast(1,j)+frip yinch=sf*fclast(2,j) write (2,103) j,(fclast(k,j),k=1,3),xmu,psi,an,xinch,yinch if (mod(j,10).eq.0) write (2,98) 3 su(j)=fc(6,j)/sumax enddo 4 if (ise.eq.0) goto 8 ! ! initial characteristic values if non-radial flow do k=1,m a(2,k)=(k-1)*tye/(m-1) a(1,k)=a(2,k)*cbet+xe a(3,k)=cmach a(4,k)=cpsi a(5,k)=zro enddo goto 10 ! ! final characteristic values if radial flow 6 nl=n+np-1 fn=np-1 do jj=1,np if (ie.eq.0) f=(jj-1)/fn if (ie.eq.1) f=two*dsin(half*eta*(jj-1)/fn)/se fclast(2,jj)=f*tye fclast(1,jj)=fclast(2,jj)*cbet+xc fclast(3,jj)=cmach fclast(4,jj)=cpsi fclast(5,jj)=zro su(jj)=f**(ie+1) enddo ! ! initial characteristic values if radial flow 8 em=eta/(m-1) do k=1,m t=(k-1)*em if (ip.eq.0) xm=fmv(epsi+t/qt) if (ip.ne.0) xm=fmv(bpsi-t/qt) r=((g6+g5*xm**2)**ga/xm)**qt xbet=dsqrt(xm**2-one) a(1,k)=r*dcos(t) a(2,k)=r*dsin(t) a(3,k)=xm a(4,k)=g2*datan(g4*xbet)-datan(xbet) a(5,k)=t enddo if (ie.eq.1 .and. ip.eq.0) a(5,1)=taxi(5,1) if (ie.eq.1 .and. ip.ne.0) a(5,1)=axis(5,1) 10 do j=1,5 wall(j,1)=a(j,m) enddo line=1 if (mq.lt.0) goto 14 if (ise.eq.1) goto 12 if (jq.eq.0) write (2,91) itle if (jq.eq.1) write (2,94) itle goto 13 12 write (2,102) itle 13 write (2,106) line 14 su(1)=zro if (ie.eq.0) bx=one/se nn=1 do k=1,m do j=1,5 b(j,k)=a(j,k) enddo enddo last=m-1 goto 20 16 last=m line=2 if (ip.ne.0) goto 38 17 do j=1,5 b(j,1)=taxi(j,line) enddo do j=1,last k=j call ofeld(a(1,k),b(1,k),b(1,k+1),nocon) if (nocon.ne.0) goto 83 enddo 20 lastp=last+1 if (line.lt.lastp) lp=line nk=1+lp/52 la=conv*dasin(one/b(3,nn)) iprnt=0 ichara=0 if (jc.eq.0) go to 21 kc=iabs(jc) if (jc.gt.0 .and. jq.ne.0) go to 21 if (jc.lt.0 .and. jq.eq.0) go to 21 ichara=1 if (kc.gt.100 .and. kc.lt.101+line) iprnt=1 if (nn.eq.1 .and. mod(line-1,kc).eq.0) iprnt=1 if (nn.gt.1 .and. mod(nn-1,kc).eq.0) iprnt=1 21 do j=nn,lastp if (ie.eq.1) bx=two*b(2,j)/se**2 xm=b(3,j) xmur=dasin(one/xm) xmu=conv*xmur psi=b(4,j)*conv an=b(5,j)*conv if (b(2,j).eq.zro) an=zro if (ip.eq.0.or.la.gt.45) goto 22 s(j)=b(1,nn)-b(1,j) ! mass integration with respect to x dsx=one/dcos(b(5,j)-xmur) if (b(2,j).eq.zro) dsx=xm/dsqrt(xm**2-one) goto 23 22 s(j)=b(2,j)-b(2,nn) ! mass integration with respect to y if (ip.eq.0) dsx=one/dsin(xmur+b(5,j)) if (ip.ne.0) dsx=one/dsin(xmur-b(5,j)) if (b(2,j).eq.zro) dsx=xm 23 if (ichara.eq.0 .or. j.ne.line) goto 24 chara(1,j)=b(1,j) chara(2,j)=b(2,j) chara(3,j)=xm chara(4,j)=xmu chara(5,j)=psi chara(6,j)=an 24 fs(j)=dsx*bx/(g6+g5*xm**2)**ga if (mq.ge.0 .and. line.eq.1) goto 25 if (iprnt.eq.0) goto 27 if (j.gt.nn) goto 25 if (ip.eq.0) write (2,104) itle if (ip.ne.0) write (2,105) itle write (2,106) line 25 if ((nk.gt.1) .and. (mod(j,nk).eq.0)) goto 26 xinch=sf*b(1,j)+frip yinch=sf*b(2,j) write (2,103) j,b(1,j),b(2,j),xm,xmu,psi,an,xinch,yinch 26 if (mod(j,10*nk).eq.0) write (2,98) 27 continue enddo ! ! integration and interpolation for mass flow sa=zro sb=zro sc=zro sum1=su(nn) kan=(lastp-nn)/2 do j=1,kan k=nn+2*j kt=k as=s(k-1)-s(k-2) bs=s(k)-s(k-1) cs=as+bs s1=(two-bs/as)*cs/six s3=(two-as/bs)*cs/six s2=cs-s1-s3 add=s1*fs(k-2)+s2*fs(k-1)+s3*fs(k) sum1=add+sum1 if (line.eq.1) goto 28 del=one-sum1 if (del) 30,29,28 28 continue enddo if (line.eq.1) write (2,96) sum1 if (line.eq.1) goto 16 bs=s(k+1)-s(k) kt=k+1 dn=two*del/bs sc=dn/(fs(k)+dsqrt(fs(k)**2+(fs(kt)-fs(k))*dn)) sb=one-sc goto 34 29 sc=one goto 34 30 s2=bs*(two+cs/as)/six s3=bs*(two+as/cs)/six s1=bs-s2-s3 bdd=s1*fs(k-2)+s2*fs(k-1)+s3*fs(k) if (bdd+del) 31,32,33 31 dn=two*(add+del)/as sb=dn/(fs(k-2)+dsqrt(fs(k-2)**2+(fs(k-1)-fs(k-2))*dn)) sa=one-sb go to 34 32 sb=one go to 34 33 dn=two*del/bs sc=one+dn/(fs(k)+dsqrt(fs(k)**2+(fs(k)-fs(k-1))*dn)) sb=one-sc 34 do j=1,5 wall(j,line)=b(j,kt-2)*sa+b(j,kt-1)*sb+b(j,kt)*sc enddo if (iprnt.eq.1) write (2,107) (wall(j,line),j=1,3) last=kt if (n-line) 42,41,36 36 line=line+1 do k=1,5 do l=1,150 a(k,l)=b(k,l) enddo enddo if (ip.eq.0) go to 17 38 do j=1,5 b(j,1)=axis(j,line) enddo do j=1,last k=j call ofeld (b(1,k),a(1,k),b(1,k+1),nocon) if (nocon.ne.0) goto 83 enddo goto 20 41 if (ip.ne.0) goto 42 if (lr.eq.0.or.it.ne.0) goto 49 42 if (line.eq.nl-1) goto 48 nn=nn+1 line=line+1 do k=1,5 do l=1,150 a(k,l)=b(k,l) enddo enddo do k=1,5 do l=1,150 b(k,l)=fclast(k,l) enddo enddo if ((lr.ne.0).and.(jq.eq.0)) goto 46 do j=nn,last k=j call ofeld(b(1,k),a(1,k),b(1,k+1),nocon) if (nocon.ne.0) goto 83 enddo goto 20 46 do j=nn,last k=j call ofeld(a(1,k),b(1,k),b(1,k+1),nocon) if (nocon.ne.0) go to 83 enddo goto 20 48 if (ip.ne.0) goto 64 ! ! integration of slopes 49 ib=1 if (iabs(jb).gt.1) ib=2 lt=0 if (it.ne.0) lt=ib nut=(line-1)/ib+2-lt wall(1,line+1)=xo wall(5,line+1)=zro yi(nut)=wall(2,1) y=yi(nut) lin=2*((line-lt)/2) do j=2,lin,2 i=nut-j ss=wall(1,j)-wall(1,j-1) tt=wall(1,j+1)-wall(1,j) st=ss+tt s1=ss*(two+tt/st)/six s2=ss*(two+st/tt)/six s3=ss-s1-s2 t3=tt*(two+ss/st)/six t2=tt*(two+st/ss)/six t1=tt-t2-t3 y=y+s1*dtan(wall(5,j-1))+s2*dtan(wall(5,j))+s3*dtan(wall(5,j+1)) if (ib.eq.1) yi(i+1)=y y=y+t1*dtan(wall(5,j-1))+t2*dtan(wall(5,j))+t3*dtan(wall(5,j+1)) if (ib.eq.1) yi(i)=y if (ib.eq.2) yi(i+j/2)=y enddo if (lr.ne.0.and.line.eq.lin) goto 51 x=wall(1,line-lt)-xo yi(1)=yi(2)-x*(dtan(wall(5,line-lt))+half*x*sdo)/thr 51 do l=2,nut jj=1+ib*(nut-l) wax(l)=wall(1,jj) way(l)=wall(2,jj) wmn(l)=wall(3,jj) wan(l)=conv*wall(5,jj) waltan(l)=dtan(wall(5,jj)) enddo wax(1)=xo way(1)=yo wan(1)=zro wmn(1)=wwo/dsqrt(g7-g8*wwo**2) waltan(1)=zro if (nf.ge.0) goto 54 ! ! smooth upstream contour if desired call neo do j=1,nut waltan(j)=dtan(wan(j)/conv) enddo 54 call scond (wax,waltan,secd,nut) secd(1)=sdo secd(nut)=zro ko=nut+mp if (mp.eq.0) goto 56 ! ! radial flow section coordinates sne=dsin(eta) tne=dtan(eta) dm=(amach-gmach)/mp do l=1,mp ll=nut+l wmn(ll)=gmach+l*dm rl=((g5*wmn(ll)**2+g6)**ga/wmn(ll))**qt wax(ll)=rl*cse way(ll)=rl*sne wan(ll)=etad waltan(ll)=tne secd(ll)=zro enddo 56 if (mq.lt.0) goto 60 if (jc.le.0) goto 58 write (2,105) itle write (2,99) lst do k=1,lp,nk i=(k-1)/nk+1 xinch=sf*chara(1,k)+frip yinch=sf*chara(2,k) write (2,103) k,(chara(j,k),j=1,6),xinch,yinch if (mod(i,10).eq.0) write (2,98) enddo 58 if (ise.eq.0) write (2,91) itle if (ise.eq.1) write (2,102) itle write (2,84) rc,etad,amach,bmach,cmach,emach,mc,ah if (nocon.ne.0) goto 59 write (2,100) iwl write (2,85) (k,wax(k),way(k),wmn(k),wan(k),waltan(k),secd(k),k=1, &nut) if ((lr.eq.0) .and. (n.lt.42)) goto 59 if ((lr.ne.0) .and. (n+lr.lt.27)) goto 59 nocon=1 goto 58 59 write (2,87) nocon=0 ! ! comparison of contour with parabola and hyperbola 60 do j=1,nut xs=(wax(j)-xo)/yo xs2=xs**2 xs3=xs**3 ys=way(j)/yo ye=yi(j)/yo ps=one+half*xs2*rrc dhp=one+xs2*rrc hs=dsqrt(dhp) if (j.gt.1) goto 61 if (mq.lt.0) goto 62 write (2,88) j,xs,ys,ye,ps,hs goto 62 61 ypx=waltan(j)/xs cy=(ps-ys)/xs3 ci=(ps-ye)/xs3 if (j.eq.2) icy=int(1.d+6*(dabs(cy)-dabs(ci)),K4) if (mq.lt.0) go to 63 cyp=(rrc-ypx)/xs/thr write (2,88) j,xs,ys,ye,ps,hs,cy,ci,cyp 62 if (mod(j,10).eq.0) write (2,98) enddo 63 write (2,97) icy if (iq.gt.0) goto 70 jq=1 return 64 line=nl do j=1,5 wall(j,nl)=fclast(j,np) enddo ! ! smooth downstream contour if desired if (nf.lt.0) call neo do j=1,nl wdx(j)=wall(1,j) wtan(j)=dtan(wall(5,j)) enddo call scond (wdx,wtan,scdf,nl) scdf(1)=zro scdf(nl)=zro if (jc.ge.0) goto 68 write (2,104) itle write (2,99) ifr do k=1,lp,nk i=(k-1)/nk+1 xinch=sf*chara(1,k)+frip yinch=sf*chara(2,k) write (2,103) k,(chara(j,k),j=1,6),xinch,yinch if (mod(i,10).eq.0) write (2,98) enddo 68 if (iq.lt.0) ko=1 nag=ko-1 king=line+nag do l=1,line wax(nag+l)=wall(1,l) way(nag+l)=wall(2,l) wmn(nag+l)=wall(3,l) wan(nag+l)=conv*wall(5,l) waltan(nag+l)=wtan(l) secd(nag+l)=scdf(l) enddo if (mq.lt.0) goto 71 write (2,94) itle write (2,84) rc,etad,amach,bmach,cmach,emach,mc,ah write (2,100) iwl write (2,85) (k,wax(k),way(k),wmn(k),wan(k),waltan(k),secd(k),k=ko &,king) goto 71 70 king=ko ! ! application of scale factor to non-dimensional coordinates 71 do k=1,king s(k)=sf*wax(k)+frip fs(k)=sf*way(k) ttr(k)=one+g8*wmn(k)**2 spr(k)=one/ttr(k)**(one+g1) sd(k)=secd(k)/sf enddo if (ise.eq.1) xbin=zro if (ise.eq.0) xbin=xb*sf+frip xcin=xc*sf+frip call scond (s,wmn,dmdx,king) dmdx(1)=g7*wwop*wmn(1)**3/wwo**3/sf if (mp.eq.0 .or. iq.lt.0) go to 74 do k=nut,ko dmdx(k)=wmn(k)*ttr(k)/(wmn(k)**2-one)/qt/sf/wax(k) enddo goto 75 74 if (ise.eq.0) dmdx(ko)=amach*ttr(ko)/(amach**2-one)/qt/sf/xa 75 if (iq.lt.1 .or. ise.eq.1) dmdx(king)=zro do k=1,king dpx(k)=-gam*wmn(k)*dmdx(k)*spr(k)/ttr(k) enddo jq=0 kat=king if (iabs(mq).lt.2) goto 78 ! ! extension of parallel-flow contour kit=king+1 kat=king+iabs(mq) kut=s(king)+half inc=s(king)-s(king-1) if (inc.lt.1) inc=1 do k=kit,kat s(k)=kut+(k-king)*inc fs(k)=fs(king) wmn(k)=wmn(king) ttr(k)=ttr(king) spr(k)=spr(king) wan(k)=zro waltan(k)=zro dmdx(k)=zro dpx(k)=zro sd(k)=zro enddo 78 if (xbl.eq.zro) goto 79 if (s(king-1).lt.xbl) goto 79 ! ! interpolate for values at specified station call twixt (s,gma,gmb,gmc,gmd,xbl,king,kbl) goto 80 79 kbl=kat+4 80 if (jb.gt.0) return if (ise.eq.0) goto 81 write (2,102) itle write (2,92) rc,se,xcin goto 82 81 if (iq.gt.0) write (2,91) itle if (iq.le.0) write (2,95) itle,xbin,xcin,sf write (2,84) rc,etad,amach,bmach,cmach,emach,mc,ah 82 write (2,89) write (2,90) (k,s(k),fs(k),waltan(k),sd(k),wmn(k),dmdx(k),spr(k),d &px(k),k=1,king) if (kbl.gt.kat) return j=kbl-1 fsx=gma*fs(j-2)+gmb*fs(j-1)+gmc*fs(j)+gmd*fs(j+1) wmnx=gma*wmn(j-2)+gmb*wmn(j-1)+gmc*wmn(j)+gmd*wmn(j+1) dmxx=gma*dmdx(j-2)+gmb*dmdx(j-1)+gmc*dmdx(j)+gmd*dmdx(j+1) dydx=gma*waltan(j-2)+gmb*waltan(j-1)+gmc*waltan(j)+gmd*waltan(j+1) sdx=gma*sd(j-2)+gmb*sd(j-1)+gmc*sd(j)+gmd*sd(j+1) sprx=gma*spr(j-2)+gmb*spr(j-1)+gmc*spr(j)+gmd*spr(j+1) dpxx=gma*dpx(j-2)+gmb*dpx(j-1)+gmc*dpx(j)+gmd*dpx(j+1) write (2,101) xbl,fsx,dydx,sdx,wmnx,dmxx,sprx,dpxx return 83 write (2,86) ip,nn,line,j return ! 84 format (1x,' RC=',f11.6,3x,'ETAD=',f8.4,' DEG',3x,'AMACH=',f10.7,3 &x,'BMACH=',f10.7,3x,'CMACH=',f10.7,3x,'EMACH=',f10.7,3x,A4,'H=',f1 &1.7/) 85 format (10(8x,i3,2x,1p6e15.7/)) 86 format ('0','OFELD,IP=',i3,', NN=',i3,', LINE=',i3,', POINT=',i3) 87 format (1x,9x,'POINT X/YO',8x,'Y/YO',7x,'INT.Y/YO',7x,'PAR/YO',7x, &'HYP/YO C(Y)',11x,'C(YI)',10x,'C(YP)'/) 88 format (1x,9x,i3,5f13.7,1p3e15.6) 89 format (1x,9x,'POINT',7x,'X(IN)',9x,'Y(IN)',9x,'DY/DX',8x,'D2Y/DX2 &',7x,'MACH NO.',7x,'DM/DX',9x,'PE/PO',11x,'DPR/DX'/) 90 format (10(10x,i3,2x,0p6f14.7,1p2e16.5/)) 91 format (1x,3a4,' UPSTREAM CONTOUR'/) 92 format (1x,' RC=',f11.7,', STREAMLINE RATIO=',f11.8,', TEST &CONE BEGINS AT',f12.7,' IN.'/) 93 format (1x,3a4,' THROAT CHARACTERISTIC') 94 format (1x,3a4,' DOWNSTREAM CONTOUR'/) 95 format ('1',3a4,' INVISCID NOZZLE CONTOUR, RADIAL FLOW ENDS AT',f1 &1.6,' IN., TEST CONE BEGINS AT',f11.6,' IN., SCALE FACTOR=',f9.4/) 96 format (1x,8x,'MASS =',f13.10/) 97 format (1x,9x,'ICY =',i13) 98 format (1x) 99 format (1x,8x,a4/8x,'POINT',8x,'X',14x,'Y',10x,'MACH NO. MACH & ANG.(D) PSI (D) FLOW ANG.(D) X(IN)',9x,'Y(IN)'/) 100 format (1x,8x,a4/8x,'POINT',8x,'X',14x,'Y',10x,'MACH NO. FLOW & ANG.(D) WALTAN',9x,'SECDIF'/) 101 format ('0',14x,6f14.7,1p2e16.5) 102 format ('1',3a4,' INVISCID CONTOUR'/) 103 format (1x,i10,2x,1p6e15.7,0p2f14.7) 104 format (1x,3a4,' INTERMEDIATE LEFT CHARACTERISTIC'/) 105 format (1x,3a4,' INTERMEDIATE RIGHT CHARACTERISTIC'/) 106 format (1x,' CHARACT',i4/8x,'POINT',8x,'X',14x,'Y',10x,'MACH NO. & MACH ANG.(D) PSI (D) FLOW ANG.(D) X(IN)',9X,'Y( &IN)'/) 107 format (1x,' CONTOUR ',1p3e15.7/) end subroutine perfc
{"hexsha": "c0e07885ad75bc2ccd20ad74ad27d5d86b82ee3a", "size": 19424, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/perfc.f", "max_stars_repo_name": "aldorona/contur", "max_stars_repo_head_hexsha": "d4197b55e28b20f905f9418f0473b2c39fadb0fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-03-03T10:30:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-25T22:20:59.000Z", "max_issues_repo_path": "src/perfc.f", "max_issues_repo_name": "aldorona/contur", "max_issues_repo_head_hexsha": "d4197b55e28b20f905f9418f0473b2c39fadb0fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-08-01T20:33:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-05T13:46:04.000Z", "max_forks_repo_path": "src/perfc.f", "max_forks_repo_name": "aldorona/contur", "max_forks_repo_head_hexsha": "d4197b55e28b20f905f9418f0473b2c39fadb0fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-25T16:14:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-25T12:14:43.000Z", "avg_line_length": 30.588976378, "max_line_length": 75, "alphanum_fraction": 0.5066927512, "num_tokens": 8298}
variables P Q R S T U : Prop /-------------------------------------------------------------------------- ``exact`` If ``P`` is the current target and ``hp`` is a proof of ``P``, then ``exact hp,`` closes the goal. English translation: This is exactly what we wanted to prove. --------------------------------------------------------------------------/ theorem tautology (hp : P) : P := begin exact hp, end /-------------------------------------------------------------------------- ``intro`` If the current target is an implication ``P → Q``, then ``intro hp,`` will produce a hypothesis ``hp : P`` and changes the target to ``Q``. English translation: To prove ``P → Q``, let us assume ``P`` and try to prove ``Q``. --------------------------------------------------------------------------/ theorem tautology' : P → P := begin intro hp, exact hp, end /-------------------------------------------------------------------------- ``apply`` If the current target is ``Q`` and ``h`` is a proof of ``P → Q``, then ``apply h,`` changes target to ``P``. English translation: (Backward reasoning.) To prove ``Q``, since we know ``P → Q``, it suffices to prove ``P``. --------------------------------------------------------------------------/ theorem syllogism (hp : P) (h : P → Q) : Q := begin apply h, exact hp, end /-------------------------------------------------------------------------- Delete the ``sorry,`` below and replace them with valid proofs. Don't forget the ``,`` at the end of each line. --------------------------------------------------------------------------/ theorem ex1 (P Q R : Prop) (f : P → Q) (g : Q → R) : P → R := begin sorry, end -- In Lean, implications are "right associative", -- which means that ``P → Q → R`` is the same as ``P → (Q → R).`` theorem ex2 (P Q : Prop): P → (Q → P) := begin sorry, end theorem ex3 (P Q : Prop) : ((Q → P) → (Q → P)) := begin sorry, end theorem ex7 (hpq : P → Q) (hqr : Q → R) (hqt : Q → T) (hst : S → T) (htu : T → U) : P → U := begin sorry, end -- Implication is not associative -- what happens if you try to prove the following? theorem ex2' (P Q : Prop): (P → Q) → P := begin sorry, end
{"author": "apurvanakade", "repo": "uwo2021-CUMC", "sha": "0be9402011feda35e510725449686c0af3c3761e", "save_path": "github-repos/lean/apurvanakade-uwo2021-CUMC", "path": "github-repos/lean/apurvanakade-uwo2021-CUMC/uwo2021-CUMC-0be9402011feda35e510725449686c0af3c3761e/src/implies.lean"}
module ModuleDoesntExport where module A where postulate C : Set open A using (B; module P) renaming (D to C)
{"hexsha": "0cc3aa5118fa01f6e1cecc78bd4ad00102d9af39", "size": 115, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "test/Succeed/ModuleDoesntExport.agda", "max_stars_repo_name": "cruhland/agda", "max_stars_repo_head_hexsha": "7f58030124fa99dfbf8db376659416f3ad8384de", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1989, "max_stars_repo_stars_event_min_datetime": "2015-01-09T23:51:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T18:20:48.000Z", "max_issues_repo_path": "test/Succeed/ModuleDoesntExport.agda", "max_issues_repo_name": "cruhland/agda", "max_issues_repo_head_hexsha": "7f58030124fa99dfbf8db376659416f3ad8384de", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4066, "max_issues_repo_issues_event_min_datetime": "2015-01-10T11:24:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T21:14:49.000Z", "max_forks_repo_path": "test/Succeed/ModuleDoesntExport.agda", "max_forks_repo_name": "cruhland/agda", "max_forks_repo_head_hexsha": "7f58030124fa99dfbf8db376659416f3ad8384de", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 371, "max_forks_repo_forks_event_min_datetime": "2015-01-03T14:04:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-30T19:00:30.000Z", "avg_line_length": 14.375, "max_line_length": 44, "alphanum_fraction": 0.7304347826, "num_tokens": 33}
import pytest import os import skimage.io import glob import numpy as np from pathlib import Path import zdo2021.main import skimage.measure from skimage.draw import polygon # cd ZDO2021 # python -m pytest def test_run_random(): vdd = zdo2021.main.VarroaDetector() # Nastavte si v operačním systém proměnnou prostředí 'VARROA_DATA_PATH' s cestou k datasetu. # Pokud není nastavena, využívá se testovací dataset tests/test_dataset dataset_path = os.getenv('VARROA_DATA_PATH_', default=Path(__file__).parent / 'test_dataset/') # print(f'dataset_path = {dataset_path}') files = glob.glob(f'{dataset_path}/images/*.jpg') cislo_obrazku = np.random.randint(0, len(files)) filename = files[cislo_obrazku] im = skimage.io.imread(filename) imgs = np.expand_dims(im, axis=0) # print(f"imgs.shape={imgs.shape}") prediction = vdd.predict(imgs) assert prediction.shape[0] == imgs.shape[0] # Toto se bude spouštět všude mimo GitHub if not os.getenv('CI'): import matplotlib.pyplot as plt plt.imshow(prediction[0]) plt.show() import json with open(Path(dataset_path)/"annotations/instances_default.json", 'r') as file: ann = file.read() gt_ann = json.loads(ann) head, name = os.path.split(filename) ground_true_masks = prepare_ground_true_masks(gt_ann, name) ground_true_masks = merge_masks(ground_true_masks) assert f1score(ground_true_masks, prediction[0]) > 0.55 def f1score(gt_ann, prediction): if prediction.shape[0] != gt_ann.shape[0]: gt_ann = skimage.transform.rotate(gt_ann, -90, resize=True) sco = f1class(gt_ann, prediction) scb = f1class(1 - gt_ann, 1 - prediction) sc = (sco + scb) / 2 return sc def f1class(gt_ann, prediction): """ Parameters ---------- gt_ann : 2d array prediction : 2d array """ tp = 0 tn = 0 fp = 0 fn = 0 obj = 1 bg = 0 for i in range(gt_ann.shape[0]): for j in range(gt_ann.shape[1]): t = gt_ann[i, j] p = prediction[i, j] if (t == p and t == obj): tp = tp + 1 elif (t == p and t == bg): tn = tn + 1 elif (t == obj and p == bg): fn = fn + 1 elif (t == bg and p == obj): fp = fp + 1 if (tp == 0): return 0 precision = tp / (tp + fp) recall = tp / (tp + fn) F1 = 2 * ((precision * recall) / (precision + recall)) # print("TP: {}, TN: {}, FP: {}, FN: {}".format(tp, tn, fp, fn)) # print("Precision: {}, Recall: {}, F1: {}".format(precision, recall, F1)) return F1 def prepare_ground_true_masks(gt_ann, filname): # get image id, shape im_id = -1 height = 0 width = 0 for im in gt_ann['images']: if (im['file_name'] == filname): im_id = im['id'] height = im['height'] width = im['width'] break if (im_id == -1): # raise Exception('No image with name {}'.format(filname)) print('No image with name {}'.format(filname)) # get image annotations im_annotations = [] for ann in gt_ann['annotations']: if (ann['image_id'] == im_id): im_annotations.append(ann) if (len(im_annotations) == 0): # raise Exception('No annotations for image with name {}'.format(filname)) print('No annotations for image with name {}'.format(filname)) masks = np.zeros((height, width, 1)) return masks # mask for every object # bg = 0, obj = 1 masks = np.zeros((height, width, len(im_annotations))) for i, ann in enumerate(im_annotations): seg = ann['segmentation'] c = seg[0][0::2] r = seg[0][1::2] rr, cc = polygon(r, c) masks[rr, cc, i] = 1 return masks def merge_masks(masks): if len(masks.shape) < 3: return masks MASK = np.zeros(masks[:, :, 0].shape) for i in range(masks.shape[2]): MASK = np.add(MASK, masks[:, :, i]) return MASK
{"hexsha": "049803bb238a6a5bc7c7ed739c02efadd6906bca", "size": 4100, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_zdo2021.py", "max_stars_repo_name": "zeleznyt/ZDO2021_StZe", "max_stars_repo_head_hexsha": "ed93f68c13a43eba90b9933f5b0bea689dc9f8eb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_zdo2021.py", "max_issues_repo_name": "zeleznyt/ZDO2021_StZe", "max_issues_repo_head_hexsha": "ed93f68c13a43eba90b9933f5b0bea689dc9f8eb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_zdo2021.py", "max_forks_repo_name": "zeleznyt/ZDO2021_StZe", "max_forks_repo_head_hexsha": "ed93f68c13a43eba90b9933f5b0bea689dc9f8eb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.625, "max_line_length": 98, "alphanum_fraction": 0.5802439024, "include": true, "reason": "import numpy", "num_tokens": 1161}
import bz2 import os.path import re import numpy as np from torch.utils.data import DataLoader from torchtext.data import get_tokenizer # from torchnlp.encoders.text import StaticTokenizerEncoder, stack_and_pad_tensors, pad_tensor ############################## # read raw gros bizou2 # ############################## def get_labels_and_texts(file): labels = [] texts = [] k = 0 # slice else it takes too long to run for line in bz2.BZ2File(file): if ( k == 6990 ): # totally arbitrary stop point because my computer is smol bean :( break x = line.decode("utf-8") labels.append(int(x[9]) - 1) texts.append(x[10:].strip()) k += 1 return np.array(labels), texts def normalize_texts(texts): NON_ALPHANUM = re.compile(r"[\W]") NON_ASCII = re.compile(r"[^a-z0-1\s]") normalized_texts = [] for text in texts: lower = text.lower() no_punctuation = NON_ALPHANUM.sub(r" ", lower) no_non_ascii = NON_ASCII.sub(r"", no_punctuation) normalized_texts.append(no_non_ascii) return normalized_texts def amzreview_dataset() -> (DataLoader, DataLoader): """ reads files from /data/raw with appropriate method applies normalization and tokenization merges processed texts with their labels into torch.utils.data DataLoader object for train and test sets """ train_labels, train_texts = get_labels_and_texts("data/raw/train.ft.txt.bz2") test_labels, test_texts = get_labels_and_texts("data/raw/test.ft.txt.bz2") ####################### # preprocessing # should be part of dataloader tbqh ####################### train_texts = normalize_texts(train_texts) test_texts = normalize_texts(test_texts) ########################## # train/test split # - for debugging; now done in dataloader senere ########################## # from sklearn.model_selection import train_test_split # train_texts, val_texts, train_labels, val_labels = train_test_split( # train_texts, train_labels, random_state=57643892, test_size=0.2) ################## # tokenize # (already normalized by hand above) ################## Tokenizer = get_tokenizer( None ) # maybe look into tokenizination libraries (spacy, etc) # following prints are just for debugging print("len before tok:", len(train_texts)) train_data = [ [Tokenizer(text), train_labels[i]] for i, text in enumerate(train_texts) ] test_data = [[Tokenizer(text), test_labels[i]] for i, text in enumerate(test_texts)] print("len after tok:", len(train_data), "\n") ################### # dataloader # ################### train = DataLoader(train_data, shuffle=True, batch_size=3000) test = DataLoader(test_data, shuffle=True, batch_size=3000) return train, test amzreview_dataset()
{"hexsha": "f8151d83b2f4e477f0c58774629e1c67119d4640", "size": 2949, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data/load_bz2_nlpdata.py", "max_stars_repo_name": "samytessier/samy_mlops", "max_stars_repo_head_hexsha": "f52592d3b63d8fc11d0ea6cd2f51c80c4858ef4f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/data/load_bz2_nlpdata.py", "max_issues_repo_name": "samytessier/samy_mlops", "max_issues_repo_head_hexsha": "f52592d3b63d8fc11d0ea6cd2f51c80c4858ef4f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/data/load_bz2_nlpdata.py", "max_forks_repo_name": "samytessier/samy_mlops", "max_forks_repo_head_hexsha": "f52592d3b63d8fc11d0ea6cd2f51c80c4858ef4f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-20T00:56:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T00:56:36.000Z", "avg_line_length": 31.3723404255, "max_line_length": 94, "alphanum_fraction": 0.6147846728, "include": true, "reason": "import numpy", "num_tokens": 683}
# Test methods with long descriptive names can omit docstrings # pylint: disable=missing-docstring import unittest import pickle import numpy as np from Orange.preprocess import Continuize, Normalize from Orange.projection import PCA, SparsePCA, IncrementalPCA, TruncatedSVD from Orange.data import Table class TestPCA(unittest.TestCase): @classmethod def setUpClass(cls): cls.ionosphere = Table("ionosphere") cls.iris = Table("iris") cls.zoo = Table("zoo") def test_pca(self): data = self.ionosphere self.__pca_test_helper(data, n_com=3, min_xpl_var=0.5) self.__pca_test_helper(data, n_com=10, min_xpl_var=0.7) self.__pca_test_helper(data, n_com=32, min_xpl_var=1) def __pca_test_helper(self, data, n_com, min_xpl_var): pca = PCA(n_components=n_com) pca_model = pca(data) pca_xpl_var = np.sum(pca_model.explained_variance_ratio_) self.assertGreaterEqual(pca_xpl_var + 1e-6, min_xpl_var) self.assertEqual(n_com, pca_model.n_components) self.assertEqual((n_com, data.X.shape[1]), pca_model.components_.shape) proj = np.dot(data.X - pca_model.mean_, pca_model.components_.T) np.testing.assert_almost_equal(pca_model(data).X, proj) def test_sparse_pca(self): data = self.ionosphere[:100] self.__sparse_pca_test_helper(data, n_com=3, max_err=1500) self.__sparse_pca_test_helper(data, n_com=10, max_err=1000) self.__sparse_pca_test_helper(data, n_com=32, max_err=500) def __sparse_pca_test_helper(self, data, n_com, max_err): sparse_pca = SparsePCA(n_components=n_com, ridge_alpha=0.001, random_state=0) pca_model = sparse_pca(data) self.assertEqual(n_com, pca_model.n_components) self.assertEqual((n_com, data.X.shape[1]), pca_model.components_.shape) self.assertLessEqual(pca_model.error_[-1], max_err) def test_randomized_pca(self): data = self.ionosphere self.__rnd_pca_test_helper(data, n_com=3, min_xpl_var=0.5) self.__rnd_pca_test_helper(data, n_com=10, min_xpl_var=0.7) self.__rnd_pca_test_helper(data, n_com=32, min_xpl_var=0.98) def __rnd_pca_test_helper(self, data, n_com, min_xpl_var): rnd_pca = PCA(n_components=n_com, svd_solver="randomized") pca_model = rnd_pca(data) pca_xpl_var = np.sum(pca_model.explained_variance_ratio_) self.assertGreaterEqual(pca_xpl_var, min_xpl_var) self.assertEqual(n_com, pca_model.n_components) self.assertEqual((n_com, data.X.shape[1]), pca_model.components_.shape) proj = np.dot(data.X - pca_model.mean_, pca_model.components_.T) np.testing.assert_almost_equal(pca_model(data).X, proj) def test_incremental_pca(self): data = self.ionosphere self.__ipca_test_helper(data, n_com=3, min_xpl_var=0.49) self.__ipca_test_helper(data, n_com=32, min_xpl_var=1) def __ipca_test_helper(self, data, n_com, min_xpl_var): pca = IncrementalPCA(n_components=n_com) pca_model = pca(data[::2]) pca_xpl_var = np.sum(pca_model.explained_variance_ratio_) self.assertGreaterEqual(pca_xpl_var + 1e-6, min_xpl_var) self.assertEqual(n_com, pca_model.n_components) self.assertEqual((n_com, data.X.shape[1]), pca_model.components_.shape) proj = np.dot(data.X - pca_model.mean_, pca_model.components_.T) np.testing.assert_almost_equal(pca_model(data).X, proj) pc1_ipca = pca_model.components_[0] self.assertAlmostEqual(np.linalg.norm(pc1_ipca), 1) pc1_pca = PCA(n_components=n_com)(data).components_[0] self.assertAlmostEqual(np.linalg.norm(pc1_pca), 1) self.assertNotAlmostEqual(abs(pc1_ipca.dot(pc1_pca)), 1, 2) pc1_ipca = pca_model.partial_fit(data[1::2]).components_[0] self.assertAlmostEqual(abs(pc1_ipca.dot(pc1_pca)), 1, 4) def test_truncated_svd(self): data = self.ionosphere self.__truncated_svd_test_helper(data, n_components=3, min_variance=0.5) self.__truncated_svd_test_helper(data, n_components=10, min_variance=0.7) self.__truncated_svd_test_helper(data, n_components=31, min_variance=0.99) def __truncated_svd_test_helper(self, data, n_components, min_variance): model = TruncatedSVD(n_components=n_components)(data) svd_variance = np.sum(model.explained_variance_ratio_) self.assertGreaterEqual(svd_variance + 1e-6, min_variance) self.assertEqual(n_components, model.n_components) self.assertEqual((n_components, data.X.shape[1]), model.components_.shape) proj = np.dot(data.X, model.components_.T) np.testing.assert_almost_equal(model(data).X, proj) def test_compute_value(self): iris = self.iris pca = PCA(n_components=2)(iris) pca_iris = pca(iris) pca_iris2 = Table(pca_iris.domain, iris) np.testing.assert_almost_equal(pca_iris.X, pca_iris2.X) np.testing.assert_equal(pca_iris.Y, pca_iris2.Y) pca_iris3 = pickle.loads(pickle.dumps(pca_iris)) np.testing.assert_almost_equal(pca_iris.X, pca_iris3.X) np.testing.assert_equal(pca_iris.Y, pca_iris3.Y) def test_transformed_domain_does_not_pickle_data(self): iris = self.iris pca = PCA(n_components=2)(iris) pca_iris = pca(iris) pca_iris2 = Table(pca_iris.domain, iris) pca_iris2 = pickle.loads(pickle.dumps(pca_iris)) self.assertIsNone(pca_iris2.domain[0].compute_value.transformed) def test_chain(self): zoo_c = Continuize()(self.zoo) pca = PCA(n_components=3)(zoo_c)(self.zoo) pca2 = PCA(n_components=3)(zoo_c)(zoo_c) pp = [Continuize()] pca3 = PCA(n_components=3, preprocessors=pp)(self.zoo)(self.zoo) np.testing.assert_almost_equal(pca.X, pca2.X) np.testing.assert_almost_equal(pca.X, pca3.X) def test_PCA_scorer(self): data = self.iris pca = PCA(preprocessors=[Normalize()]) pca.component = 1 scores = pca.score_data(data) self.assertEqual(scores.shape[1], len(data.domain.attributes)) self.assertEqual( ["petal length", "petal width"], sorted( [data.domain.attributes[i].name for i in np.argsort(scores[0])[-2:]] ), ) self.assertEqual( [round(s, 4) for s in scores[0]], [0.5224, 0.2634, 0.5813, 0.5656] ) def test_PCA_scorer_component(self): pca = PCA() for i in range(1, len(self.zoo.domain.attributes) + 1): pca.component = i scores = pca.score_data(self.zoo) self.assertEqual( scores.shape, (pca.component, len(self.zoo.domain.attributes)) ) def test_PCA_scorer_all_components(self): n_attr = len(self.iris.domain.attributes) pca = PCA() scores = pca.score_data(self.iris) self.assertEqual(scores.shape, (n_attr, n_attr)) def test_max_components(self): d = np.random.RandomState(0).rand(20, 20) data = Table(d) pca = PCA()(data) self.assertEqual(len(pca.explained_variance_ratio_), 20) pca = PCA(n_components=10)(data) self.assertEqual(len(pca.explained_variance_ratio_), 10)
{"hexsha": "a8db0e9fdb1eadfc7ede8934bfebeb44383daae8", "size": 7356, "ext": "py", "lang": "Python", "max_stars_repo_path": "orange3/Orange/tests/test_pca.py", "max_stars_repo_name": "rgschmitz1/BioDepot-workflow-builder", "max_stars_repo_head_hexsha": "f74d904eeaf91ec52ec9b703d9fb38e9064e5a66", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 54, "max_stars_repo_stars_event_min_datetime": "2017-01-08T17:21:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-02T08:46:07.000Z", "max_issues_repo_path": "orange3/Orange/tests/test_pca.py", "max_issues_repo_name": "Synthia-3/BioDepot-workflow-builder", "max_issues_repo_head_hexsha": "4ee93abe2d79465755e82a145af3b6a6e1e79fd4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 22, "max_issues_repo_issues_event_min_datetime": "2017-03-28T06:03:14.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-28T05:43:55.000Z", "max_forks_repo_path": "orange3/Orange/tests/test_pca.py", "max_forks_repo_name": "Synthia-3/BioDepot-workflow-builder", "max_forks_repo_head_hexsha": "4ee93abe2d79465755e82a145af3b6a6e1e79fd4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 21, "max_forks_repo_forks_event_min_datetime": "2017-01-26T21:12:09.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T21:34:59.000Z", "avg_line_length": 43.2705882353, "max_line_length": 85, "alphanum_fraction": 0.6768624252, "include": true, "reason": "import numpy", "num_tokens": 1948}
""" This file tests the `generate_frames` method. """ import sys sys.path.insert(0, '../') import audiosegment import math import numpy as np import unittest class TestGenerateFrames(unittest.TestCase): """ Test the generate_frames_* methods. """ def test_reconstruction_mono(self): """ Test that we can put the original segment back together via the frames. """ before = audiosegment.from_file("furelise.wav") nchannels = before.channels bps = before.sample_width hz = before.frame_rate duration_s = before.duration_seconds results = [s for s, _ in before.generate_frames_as_segments(1000, zero_pad=False)] after = results[0].reduce(results[1:]) self.assertEqual(after.channels, nchannels, "Got {} channels, expected {}.".format(after.channels, nchannels)) self.assertEqual(after.sample_width, bps, "Got {} sample width, expected {}.".format(after.sample_width, bps)) self.assertEqual(after.frame_rate, hz, "Got {} frame rate, expected {}.".format(after.frame_rate, hz)) self.assertEqual(after.duration_seconds, duration_s, "Got {} duration seconds, expected {}.".format(after.duration_seconds, duration_s)) beforearr = before.to_numpy_array() afterarr = after.to_numpy_array() self.assertTrue(np.allclose(beforearr, afterarr), "Segments differ in data") def test_reconstruction_stereo(self): """ """ before = audiosegment.from_file("stereo_furelise.wav") nchannels = before.channels bps = before.sample_width hz = before.frame_rate duration_s = before.duration_seconds results = [s for s, _ in before.generate_frames_as_segments(1000, zero_pad=False)] after = results[0].reduce(results[1:]) self.assertEqual(after.channels, nchannels, "Got {} channels, expected {}.".format(after.channels, nchannels)) self.assertEqual(after.sample_width, bps, "Got {} sample width, expected {}.".format(after.sample_width, bps)) self.assertEqual(after.frame_rate, hz, "Got {} frame rate, expected {}.".format(after.frame_rate, hz)) self.assertEqual(after.duration_seconds, duration_s, "Got {} duration seconds, expected {}.".format(after.duration_seconds, duration_s)) beforearr = before.to_numpy_array() afterarr = after.to_numpy_array() self.assertTrue(np.allclose(beforearr, afterarr), "Segments differ in data") if __name__ == "__main__": unittest.main()
{"hexsha": "f4e63eaeddf8453a858962912a0f84957c6c06e9", "size": 2549, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/frames.py", "max_stars_repo_name": "MaxStrange/AudioSegment", "max_stars_repo_head_hexsha": "9e54dd575b879711021e1536b66d2b4f48965d0a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 79, "max_stars_repo_stars_event_min_datetime": "2018-02-11T18:46:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T03:15:56.000Z", "max_issues_repo_path": "tests/frames.py", "max_issues_repo_name": "MaxStrange/AudioSegment", "max_issues_repo_head_hexsha": "9e54dd575b879711021e1536b66d2b4f48965d0a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2019-01-28T22:54:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-18T01:17:48.000Z", "max_forks_repo_path": "tests/frames.py", "max_forks_repo_name": "MaxStrange/AudioSegment", "max_forks_repo_head_hexsha": "9e54dd575b879711021e1536b66d2b4f48965d0a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2018-02-14T22:49:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-23T03:15:59.000Z", "avg_line_length": 38.0447761194, "max_line_length": 144, "alphanum_fraction": 0.6783052177, "include": true, "reason": "import numpy", "num_tokens": 556}
# -*- coding: utf-8 -*- """ Created on Dec 14 2020 @author: Yi-Hui (Sophia) Chou Updated on May 10 2021 @author: I-Chun (Bronwin) Chen """ import sys sys.path.append('../../CP') import os import json import time import tqdm import torch import pickle import argparse import numpy as np import torch.nn as nn import utils_bestloss as utils from pathlib import Path from model import SAN from train import training, valid from pop_dataset import PopDataset from torch.utils.data import DataLoader def get_args(): parser = argparse.ArgumentParser(description='Argument Parser for sequence-level tasks') ### mode ### parser.add_argument('--task', choices=['composer', 'emotion'], required=True) ### path setup ### parser.add_argument('--input', type=str, default='../../../data/CP',help='Path to input numpy folder for composer dataset') parser.add_argument('--dict', type=str, default='../../../BERT/dict/CP.pkl') parser.add_argument('--output', type=str, help='Used for output directory name', required=True) ### parameter setting ### parser.add_argument('--train-batch', default=16, type=int) parser.add_argument('--dev-batch', default=8, type=int) parser.add_argument('--cuda', default=0, type=int, help='Specify cuda number') parser.add_argument('--epoch', default=1000, type=int, help='number of training epochs') parser.add_argument('--lr', default=1e-2, type=float, help="learning rate") args = parser.parse_args() # learning rate used in paper # if args.lr == 0: # if args.task == "composer": # args.lr = 1e-2 # elif args.task == "emotion": # args.lr = 5e-2 if args.task == "composer": args.num_of_class = 8 elif args.task == "emotion": args.num_of_class = 4 return args def main(): args = get_args() cuda_num = args.cuda cuda_str = 'cuda:'+str(cuda_num) device = torch.device(cuda_str if torch.cuda.is_available() else 'cpu') inputs = args.input exp_name = args.output exp_dir = os.path.join('./experiments', exp_name) target_jsonpath = exp_dir num_of_class = args.num_of_class train_epochs = args.epoch lr = args.lr patience = 20 if not os.path.exists(exp_dir): Path(exp_dir).mkdir(parents = True, exist_ok = True) print("loading dictionary...") with open(args.dict, 'rb') as f: e2w, w2e = pickle.load(f) print("\nloading data...") if args.task == "composer": X_train = torch.tensor(np.load(inputs + "/composer_cp_train.npy", allow_pickle=True), dtype=torch.long) X_val = torch.tensor(np.load(inputs + "/composer_cp_valid.npy", allow_pickle=True), dtype=torch.long) y_train = torch.tensor(np.load(inputs+ "/composer_cp_train_ans.npy", allow_pickle=True), dtype=torch.long) y_val = torch.tensor(np.load(inputs+ "/composer_cp_valid_ans.npy", allow_pickle=True), dtype=torch.long) elif args.task == "emotion": X_train = torch.tensor(np.load(inputs + "/emopia_cp_train.npy", allow_pickle=True), dtype=torch.long) X_val = torch.tensor(np.load(inputs + "/emopia_cp_valid.npy", allow_pickle=True), dtype=torch.long) y_train = torch.tensor(np.load(inputs+ "/emopia_cp_train_ans.npy", allow_pickle=True), dtype=torch.long) y_val = torch.tensor(np.load(inputs+ "/emopia_cp_valid_ans.npy", allow_pickle=True), dtype=torch.long) trainset = PopDataset(X=X_train, y=y_train) validset = PopDataset(X=X_val, y=y_val) train_loader = DataLoader(trainset, batch_size = args.train_batch, shuffle = True) print(" len of train_loader", len(train_loader)) valid_loader = DataLoader(validset, batch_size = args.dev_batch, shuffle = True) print(" len of valid_loader", len(valid_loader)) print("\n initializing model...") model = SAN(num_of_dim=num_of_class, e2w=e2w, vocab_size=len(e2w), embedding_size=768, r=4) model.cuda(cuda_num) optimizer = torch.optim.SGD( model.parameters(), lr=lr, momentum=0.9 ) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, factor=0.3, # follow openunmix patience=80, cooldown=10 ) es = utils.EarlyStopping(patience= patience) t = tqdm.trange(1, train_epochs +1, disable = False) train_losses, train_accs = [], [] valid_losses, valid_accs = [], [] train_times = [] best_epoch = 0 stop_t = 0 print(" start training...") for epoch in t: # break t.set_description("Training Epoch") end = time.time() train_loss, train_acc = training(model, device, train_loader, optimizer, args.train_batch, num_of_class) valid_loss, valid_acc = valid(model, device, valid_loader, args.dev_batch, num_of_class) scheduler.step(valid_loss) train_losses.append(train_loss.item()) valid_losses.append(valid_loss.item()) train_accs.append(train_acc.item()) valid_accs.append(valid_acc.item()) t.set_postfix( train_loss=train_loss.item(), val_loss=valid_loss.item() ) stop = es.step(valid_loss.item()) if valid_loss.item() == es.best: best_epoch = epoch utils.save_checkpoint({ 'epoch': epoch + 1, 'state_dict': model.state_dict(), 'best_loss': es.best, 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict() }, is_best=valid_loss == es.best, path=exp_dir, target='SAN_' + args.task ) # save params params = { 'epochs_trained': epoch, 'best_loss': es.best, 'best_epoch': best_epoch, 'train_loss_history': train_losses, 'valid_loss_history': valid_losses, 'train_acc_history': train_accs, 'valid_acc_history': valid_accs, 'train_time_history': train_times, 'num_bad_epochs': es.num_bad_epochs, } with open(os.path.join(target_jsonpath, 'SAN_' + args.task + '.json'), 'w') as outfile: outfile.write(json.dumps(params, indent=4, sort_keys=True)) train_times.append(time.time() - end) if stop: print("Apply Early Stopping and retrain") # break stop_t +=1 if stop_t >=5: break lr = lr*0.2 optimizer = torch.optim.SGD( model.parameters(), lr=lr, momentum=0.9 ) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, factor=0.3, # follow openunmix patience=80, cooldown=10 ) es = utils.EarlyStopping(patience= patience, best_loss = es.best) if __name__ == "__main__": main()
{"hexsha": "bc60b7ed64018978354989ed1f5b0ba0a2a82e47", "size": 7239, "ext": "py", "lang": "Python", "max_stars_repo_path": "baseline/CP/sequence-level/main.py", "max_stars_repo_name": "atosystem/MIDI-BERT", "max_stars_repo_head_hexsha": "61f7efb3be85a2a847e6585237036e052235a6a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 109, "max_stars_repo_stars_event_min_datetime": "2021-07-09T07:38:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T07:28:07.000Z", "max_issues_repo_path": "baseline/CP/sequence-level/main.py", "max_issues_repo_name": "atosystem/MIDI-BERT", "max_issues_repo_head_hexsha": "61f7efb3be85a2a847e6585237036e052235a6a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-11-18T10:03:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T07:15:52.000Z", "max_forks_repo_path": "baseline/CP/sequence-level/main.py", "max_forks_repo_name": "atosystem/MIDI-BERT", "max_forks_repo_head_hexsha": "61f7efb3be85a2a847e6585237036e052235a6a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2021-07-13T01:40:25.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-06T09:54:26.000Z", "avg_line_length": 34.8028846154, "max_line_length": 128, "alphanum_fraction": 0.5970437906, "include": true, "reason": "import numpy", "num_tokens": 1659}
"""Implementation of the series A279125 from OEIS. The entry a(n) is decided by checking if n's binary value has any overlapping with previous i=1, 2, 3, ..., n-1, also in binary. I.e., if n's binary value has ones in places where any of the i's have ones, a(n) is the lowest integer that has not yet been picked, i.e. a(n) > a(i) ∀ i ∈ {1, 2, 3, ..., n-1}. """ import matplotlib.pyplot as plt import numpy as np # Length of the series size = 3000 # The series with the two first entries A279125 = [0, 0] # A look up table with binary numbers sorted after what entry they gave to the series look_up = [np.zeros((2, 2))] npad_left = ((0, 0), (1, 0)) npad_bottom = ((0, 1), (0, 0)) for n in range(3, size): print(f"{round(n / size * 100, 1)}%", end="\r") # Make a list of the digits of the binary number bin_list = [int(x) for x in "{:b}".format(n)] not_filled = True # Variable that decides what should be the entry of the series numb = 1 # For deciding quickly if the binary number is on the form n = 2**i where i ∈\mathbb{Z} rest = bin_list[1:] if not any([int(x) for x in rest]): # If the number is from a power of two, it goes here, placing a '0' in the series A279125.append(0) look_up[0] = np.pad( look_up[0], pad_width=npad_left, mode="constant", constant_values=0 ) look_up[0] = np.pad( look_up[0], pad_width=npad_bottom, mode="constant", constant_values=0 ) look_up[0][:, -1] = bin_list else: while not_filled: try: # If the series do not yet have the entry 'numb'... look_up[numb] except Exception: # ... then this should be added to the look-up table # and the series should get 'numb' added to it new_entry = np.zeros((1, len(bin_list))) new_entry[0, :] = bin_list look_up.append(new_entry) A279125.append(numb) not_filled = False else: # If 'numb' is used before, we must check if the new number in binary has any overlapping '1's. if len(bin_list) > len(look_up[numb][0, :]): diff = len(bin_list) - len(look_up[numb][0, :]) new_npad_left = ((0, 0), (diff, 0)) look_up[numb] = np.pad( look_up[numb], pad_width=new_npad_left, mode="constant", constant_values=0, ) cols = [index for index, value in enumerate(bin_list) if value] for col in cols: if any([x for x in look_up[numb][:, col]]): # If there is overlapping '1's, we can't use 'numb', # and we break the loop and go to the next possibility break if col == cols[-1]: # If there are no overlapping '1's, we add the number in binary # to the look-up table of 'numb' and append 'numb' to the series look_up[numb] = np.pad( look_up[0], pad_width=npad_bottom, mode="constant", constant_values=0, ) look_up[numb][-1, :] = bin_list A279125.append(numb) not_filled = False numb += 1 # The list is plotted with a black background and white data points to resemble snow plt.figure().set_facecolor("black") plt.subplot("111", facecolor="black").tick_params(axis="x", colors="white") plt.subplot("111", facecolor="black").tick_params(axis="y", colors="white") plt.subplot("111", facecolor="black").spines["top"].set_color("white") plt.subplot("111", facecolor="black").spines["bottom"].set_color("white") plt.subplot("111", facecolor="black").spines["left"].set_color("white") plt.subplot("111", facecolor="black").spines["right"].set_color("white") plt.scatter(list(range(len(A279125))), A279125, s=0.1, color="k", marker=",") # Remove labels, axes etc. spines = ["top", "right", "left", "bottom"] for sp in spines: plt.gca().spines[sp].set_visible(False) plt.tick_params(axis="y", which="both", left=False, right=False, labelleft=False) plt.tick_params(axis="x", which="both", bottom=False, top=False, labelbottom=False) plt.gca().set_xticklabels([]) plt.gca().set_yticklabels([]) plt.savefig( "snowy_hills.png", bbox_inches="tight", format="png", dpi=1200, transparent=True ) plt.show()
{"hexsha": "a22caf8ad6cfd9b7e985683bbcd2d2b6e56a9edc", "size": 4713, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/maths_snack/scripts/snowy.py", "max_stars_repo_name": "engeir/maths_snack", "max_stars_repo_head_hexsha": "1b35c7dfc3b5ac3d105226db2be9ce6434da0a11", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/maths_snack/scripts/snowy.py", "max_issues_repo_name": "engeir/maths_snack", "max_issues_repo_head_hexsha": "1b35c7dfc3b5ac3d105226db2be9ce6434da0a11", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-12T01:08:36.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T01:08:36.000Z", "max_forks_repo_path": "src/maths_snack/scripts/snowy.py", "max_forks_repo_name": "engeir/maths_snack", "max_forks_repo_head_hexsha": "1b35c7dfc3b5ac3d105226db2be9ce6434da0a11", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.6388888889, "max_line_length": 111, "alphanum_fraction": 0.5573944409, "include": true, "reason": "import numpy", "num_tokens": 1198}
[STATEMENT] lemma noDA[rule_format]: "noDenyAll xs \<longrightarrow> s \<in> set xs \<longrightarrow> \<not> member DenyAll s" [PROOF STATE] proof (prove) goal (1 subgoal): 1. noDenyAll xs \<longrightarrow> s \<in> set xs \<longrightarrow> \<not> member DenyAll s [PROOF STEP] by (induct xs, simp_all)
{"llama_tokens": 110, "file": "UPF_Firewall_FWNormalisation_NormalisationGenericProofs", "length": 1}
# Copyright 2012 Mehmet Ali ANIL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ For a network, every single node is taken, its mask and state vector is clipped. They are subject to a boolean operation from numpy libraries. Then from the fact that whether the node outputs a number greater than the shortened state vector, the node outputs 1 or 0. These values are gathered up and outputted as a new state vector. The method can be: num.logical_or num.logical_and num.logical_xor num.logical_xnor or any other function that outputs an integer with an input of two ndarrays. """ from boolfuncs import xor_masking, and_masking, or_masking, xor_masking_C, and_masking_C, or_masking_C, advance_C
{"hexsha": "f26235ec5f3d3bd394cc4ea7cabaf60bec4a5992", "size": 1273, "ext": "py", "lang": "Python", "max_stars_repo_path": "kreveik/network/boolfuncs/__init__.py", "max_stars_repo_name": "kreveik/Kreveik", "max_stars_repo_head_hexsha": "5144c555c526f68560d891e39c401053c5286359", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kreveik/network/boolfuncs/__init__.py", "max_issues_repo_name": "kreveik/Kreveik", "max_issues_repo_head_hexsha": "5144c555c526f68560d891e39c401053c5286359", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kreveik/network/boolfuncs/__init__.py", "max_forks_repo_name": "kreveik/Kreveik", "max_forks_repo_head_hexsha": "5144c555c526f68560d891e39c401053c5286359", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-21T05:32:02.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-21T05:32:02.000Z", "avg_line_length": 35.3611111111, "max_line_length": 113, "alphanum_fraction": 0.7368421053, "include": true, "reason": "from numpy", "num_tokens": 296}
[STATEMENT] lemma (in vfsequence) vfsequence_vcons[intro, simp]: "vfsequence (xs #\<^sub>\<circ> x)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. vfsequence (xs #\<^sub>\<circ> x) [PROOF STEP] proof(intro vfsequenceI) [PROOF STATE] proof (state) goal (2 subgoals): 1. vsv (xs #\<^sub>\<circ> x) 2. \<D>\<^sub>\<circ> (xs #\<^sub>\<circ> x) \<in>\<^sub>\<circ> \<omega> [PROOF STEP] from vfsequence_vdomain_in_omega vsv_vcard_vdomain [PROOF STATE] proof (chain) picking this: \<D>\<^sub>\<circ> xs \<in>\<^sub>\<circ> \<omega> vcard (\<D>\<^sub>\<circ> xs) = vcard xs [PROOF STEP] have "vcard xs = \<D>\<^sub>\<circ> xs" [PROOF STATE] proof (prove) using this: \<D>\<^sub>\<circ> xs \<in>\<^sub>\<circ> \<omega> vcard (\<D>\<^sub>\<circ> xs) = vcard xs goal (1 subgoal): 1. vcard xs = \<D>\<^sub>\<circ> xs [PROOF STEP] by (simp add: vcard_veqpoll) [PROOF STATE] proof (state) this: vcard xs = \<D>\<^sub>\<circ> xs goal (2 subgoals): 1. vsv (xs #\<^sub>\<circ> x) 2. \<D>\<^sub>\<circ> (xs #\<^sub>\<circ> x) \<in>\<^sub>\<circ> \<omega> [PROOF STEP] show "vsv (xs #\<^sub>\<circ> x)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. vsv (xs #\<^sub>\<circ> x) [PROOF STEP] proof(intro vsvI) [PROOF STATE] proof (state) goal (2 subgoals): 1. vbrelation (xs #\<^sub>\<circ> x) 2. \<And>a b c. \<lbrakk>\<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x; \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x\<rbrakk> \<Longrightarrow> b = c [PROOF STEP] fix a b c [PROOF STATE] proof (state) goal (2 subgoals): 1. vbrelation (xs #\<^sub>\<circ> x) 2. \<And>a b c. \<lbrakk>\<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x; \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x\<rbrakk> \<Longrightarrow> b = c [PROOF STEP] assume ab: "\<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x" and ac: "\<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x" [PROOF STATE] proof (state) this: \<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x goal (2 subgoals): 1. vbrelation (xs #\<^sub>\<circ> x) 2. \<And>a b c. \<lbrakk>\<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x; \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x\<rbrakk> \<Longrightarrow> b = c [PROOF STEP] then [PROOF STATE] proof (chain) picking this: \<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x [PROOF STEP] consider (dom) "a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs" | (ndom) "a = vcard xs" [PROOF STATE] proof (prove) using this: \<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x goal (1 subgoal): 1. \<lbrakk>a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs \<Longrightarrow> thesis; a = vcard xs \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] unfolding vcons_def [PROOF STATE] proof (prove) using this: \<langle>a, b\<rangle> \<in>\<^sub>\<circ> vinsert \<langle>vcard xs, x\<rangle> xs \<langle>a, c\<rangle> \<in>\<^sub>\<circ> vinsert \<langle>vcard xs, x\<rangle> xs goal (1 subgoal): 1. \<lbrakk>a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs \<Longrightarrow> thesis; a = vcard xs \<Longrightarrow> thesis\<rbrakk> \<Longrightarrow> thesis [PROOF STEP] by auto [PROOF STATE] proof (state) this: \<lbrakk>a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs \<Longrightarrow> ?thesis; a = vcard xs \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis goal (2 subgoals): 1. vbrelation (xs #\<^sub>\<circ> x) 2. \<And>a b c. \<lbrakk>\<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x; \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x\<rbrakk> \<Longrightarrow> b = c [PROOF STEP] then [PROOF STATE] proof (chain) picking this: \<lbrakk>a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs \<Longrightarrow> ?thesis; a = vcard xs \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis [PROOF STEP] show "b = c" [PROOF STATE] proof (prove) using this: \<lbrakk>a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs \<Longrightarrow> ?thesis; a = vcard xs \<Longrightarrow> ?thesis\<rbrakk> \<Longrightarrow> ?thesis goal (1 subgoal): 1. b = c [PROOF STEP] proof cases [PROOF STATE] proof (state) goal (2 subgoals): 1. a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs \<Longrightarrow> b = c 2. a = vcard xs \<Longrightarrow> b = c [PROOF STEP] case dom [PROOF STATE] proof (state) this: a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs goal (2 subgoals): 1. a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs \<Longrightarrow> b = c 2. a = vcard xs \<Longrightarrow> b = c [PROOF STEP] with ab [PROOF STATE] proof (chain) picking this: \<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs [PROOF STEP] have "\<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs" [PROOF STATE] proof (prove) using this: \<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs goal (1 subgoal): 1. \<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs [PROOF STEP] unfolding vcons_def [PROOF STATE] proof (prove) using this: \<langle>a, b\<rangle> \<in>\<^sub>\<circ> vinsert \<langle>vcard xs, x\<rangle> xs a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs goal (1 subgoal): 1. \<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs [PROOF STEP] by (auto simp: \<open>vcard xs = \<D>\<^sub>\<circ> xs\<close>) [PROOF STATE] proof (state) this: \<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs goal (2 subgoals): 1. a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs \<Longrightarrow> b = c 2. a = vcard xs \<Longrightarrow> b = c [PROOF STEP] moreover [PROOF STATE] proof (state) this: \<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs goal (2 subgoals): 1. a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs \<Longrightarrow> b = c 2. a = vcard xs \<Longrightarrow> b = c [PROOF STEP] from dom ac [PROOF STATE] proof (chain) picking this: a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x [PROOF STEP] have "\<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs" [PROOF STATE] proof (prove) using this: a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x goal (1 subgoal): 1. \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs [PROOF STEP] unfolding vcons_def [PROOF STATE] proof (prove) using this: a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs \<langle>a, c\<rangle> \<in>\<^sub>\<circ> vinsert \<langle>vcard xs, x\<rangle> xs goal (1 subgoal): 1. \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs [PROOF STEP] by (auto simp: \<open>vcard xs = \<D>\<^sub>\<circ> xs\<close>) [PROOF STATE] proof (state) this: \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs goal (2 subgoals): 1. a \<in>\<^sub>\<circ> \<D>\<^sub>\<circ> xs \<Longrightarrow> b = c 2. a = vcard xs \<Longrightarrow> b = c [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: \<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: \<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs goal (1 subgoal): 1. b = c [PROOF STEP] using vsv [PROOF STATE] proof (prove) using this: \<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs \<lbrakk>\<langle>?a, ?b\<rangle> \<in>\<^sub>\<circ> xs; \<langle>?a, ?c\<rangle> \<in>\<^sub>\<circ> xs\<rbrakk> \<Longrightarrow> ?b = ?c goal (1 subgoal): 1. b = c [PROOF STEP] by simp [PROOF STATE] proof (state) this: b = c goal (1 subgoal): 1. a = vcard xs \<Longrightarrow> b = c [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. a = vcard xs \<Longrightarrow> b = c [PROOF STEP] case ndom [PROOF STATE] proof (state) this: a = vcard xs goal (1 subgoal): 1. a = vcard xs \<Longrightarrow> b = c [PROOF STEP] from ab [PROOF STATE] proof (chain) picking this: \<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x [PROOF STEP] have "\<langle>a, b\<rangle> = \<langle>vcard xs, x\<rangle>" [PROOF STATE] proof (prove) using this: \<langle>a, b\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x goal (1 subgoal): 1. \<langle>a, b\<rangle> = \<langle>vcard xs, x\<rangle> [PROOF STEP] unfolding ndom vcons_def [PROOF STATE] proof (prove) using this: \<langle>vcard xs, b\<rangle> \<in>\<^sub>\<circ> vinsert \<langle>vcard xs, x\<rangle> xs goal (1 subgoal): 1. \<langle>vcard xs, b\<rangle> = \<langle>vcard xs, x\<rangle> [PROOF STEP] using \<open>vcard xs = \<D>\<^sub>\<circ> xs\<close> mem_not_refl [PROOF STATE] proof (prove) using this: \<langle>vcard xs, b\<rangle> \<in>\<^sub>\<circ> vinsert \<langle>vcard xs, x\<rangle> xs vcard xs = \<D>\<^sub>\<circ> xs ?i \<notin>\<^sub>\<circ> ?i goal (1 subgoal): 1. \<langle>vcard xs, b\<rangle> = \<langle>vcard xs, x\<rangle> [PROOF STEP] by blast [PROOF STATE] proof (state) this: \<langle>a, b\<rangle> = \<langle>vcard xs, x\<rangle> goal (1 subgoal): 1. a = vcard xs \<Longrightarrow> b = c [PROOF STEP] moreover [PROOF STATE] proof (state) this: \<langle>a, b\<rangle> = \<langle>vcard xs, x\<rangle> goal (1 subgoal): 1. a = vcard xs \<Longrightarrow> b = c [PROOF STEP] from ac [PROOF STATE] proof (chain) picking this: \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x [PROOF STEP] have "\<langle>a, c\<rangle> = \<langle>vcard xs, x\<rangle>" [PROOF STATE] proof (prove) using this: \<langle>a, c\<rangle> \<in>\<^sub>\<circ> xs #\<^sub>\<circ> x goal (1 subgoal): 1. \<langle>a, c\<rangle> = \<langle>vcard xs, x\<rangle> [PROOF STEP] unfolding ndom vcons_def [PROOF STATE] proof (prove) using this: \<langle>vcard xs, c\<rangle> \<in>\<^sub>\<circ> vinsert \<langle>vcard xs, x\<rangle> xs goal (1 subgoal): 1. \<langle>vcard xs, c\<rangle> = \<langle>vcard xs, x\<rangle> [PROOF STEP] using \<open>vcard xs = \<D>\<^sub>\<circ> xs\<close> mem_not_refl [PROOF STATE] proof (prove) using this: \<langle>vcard xs, c\<rangle> \<in>\<^sub>\<circ> vinsert \<langle>vcard xs, x\<rangle> xs vcard xs = \<D>\<^sub>\<circ> xs ?i \<notin>\<^sub>\<circ> ?i goal (1 subgoal): 1. \<langle>vcard xs, c\<rangle> = \<langle>vcard xs, x\<rangle> [PROOF STEP] by blast [PROOF STATE] proof (state) this: \<langle>a, c\<rangle> = \<langle>vcard xs, x\<rangle> goal (1 subgoal): 1. a = vcard xs \<Longrightarrow> b = c [PROOF STEP] ultimately [PROOF STATE] proof (chain) picking this: \<langle>a, b\<rangle> = \<langle>vcard xs, x\<rangle> \<langle>a, c\<rangle> = \<langle>vcard xs, x\<rangle> [PROOF STEP] show ?thesis [PROOF STATE] proof (prove) using this: \<langle>a, b\<rangle> = \<langle>vcard xs, x\<rangle> \<langle>a, c\<rangle> = \<langle>vcard xs, x\<rangle> goal (1 subgoal): 1. b = c [PROOF STEP] by simp [PROOF STATE] proof (state) this: b = c goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: b = c goal (1 subgoal): 1. vbrelation (xs #\<^sub>\<circ> x) [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. vbrelation (xs #\<^sub>\<circ> x) [PROOF STEP] show "vbrelation (xs #\<^sub>\<circ> x)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. vbrelation (xs #\<^sub>\<circ> x) [PROOF STEP] unfolding vcons_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. vbrelation (vinsert \<langle>vcard xs, x\<rangle> xs) [PROOF STEP] using vbrelation_vinsertI [PROOF STATE] proof (prove) using this: vbrelation (vinsert \<langle>?a, ?b\<rangle> xs) goal (1 subgoal): 1. vbrelation (vinsert \<langle>vcard xs, x\<rangle> xs) [PROOF STEP] by auto [PROOF STATE] proof (state) this: vbrelation (xs #\<^sub>\<circ> x) goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: vsv (xs #\<^sub>\<circ> x) goal (1 subgoal): 1. \<D>\<^sub>\<circ> (xs #\<^sub>\<circ> x) \<in>\<^sub>\<circ> \<omega> [PROOF STEP] show "\<D>\<^sub>\<circ> (xs #\<^sub>\<circ> x) \<in>\<^sub>\<circ> \<omega>" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<D>\<^sub>\<circ> (xs #\<^sub>\<circ> x) \<in>\<^sub>\<circ> \<omega> [PROOF STEP] unfolding vcons_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<D>\<^sub>\<circ> (vinsert \<langle>vcard xs, x\<rangle> xs) \<in>\<^sub>\<circ> \<omega> [PROOF STEP] using succ_in_omega [PROOF STATE] proof (prove) using this: ?n \<in>\<^sub>\<circ> \<omega> \<Longrightarrow> ZFC_in_HOL.succ ?n \<in>\<^sub>\<circ> \<omega> goal (1 subgoal): 1. \<D>\<^sub>\<circ> (vinsert \<langle>vcard xs, x\<rangle> xs) \<in>\<^sub>\<circ> \<omega> [PROOF STEP] by (auto simp: vfsequence_vdomain_in_omega succ_def \<open>vcard xs = \<D>\<^sub>\<circ> xs\<close>) [PROOF STATE] proof (state) this: \<D>\<^sub>\<circ> (xs #\<^sub>\<circ> x) \<in>\<^sub>\<circ> \<omega> goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 5299, "file": "CZH_Foundations_czh_sets_CZH_Sets_FSequences", "length": 57}
# coding=utf-8 # fft & low-pass filtering import os import numpy as np import matplotlib.pyplot as plt from scipy import signal data_dir = '/home/murphyhuang/dev/mldata/en_ch_translate_output_ut_analy/recurret_conduct' def frequency_analy(): record_storing_path = os.path.join(data_dir, 'ut_0509_recurrent_1024.npy') recurrent_record = np.load(record_storing_path) for dim_index in range(recurrent_record.shape[3]): dim_test = recurrent_record[:, 0, 2, dim_index] dim_freq = np.fft.fft(dim_test, dim_test.shape[0]) dim_freq_abs = np.abs(dim_freq) plt.plot(dim_test[:32]) plt.show() def euclidean_distance_analy(): record_storing_path = os.path.join(data_dir, 'ut_0509_recurrent_1024.npy') recurrent_record = np.squeeze(np.load(record_storing_path)) word_index = 12 recurrent_word_record = recurrent_record[:, word_index, :] word_representation_norms = np.linalg.norm(recurrent_word_record, axis=1) word_representation_subtract = np.concatenate((np.zeros((1, recurrent_word_record.shape[1])), recurrent_word_record)) word_representation_subtract = recurrent_word_record - word_representation_subtract[:-1, :] word_representation_subtract = word_representation_subtract[1:, :] word_representation_euclidean_distance = np.linalg.norm(word_representation_subtract, axis=1) word_representation_norms = word_representation_norms.T word_representation_euclidean_distance = word_representation_euclidean_distance.T ax_1 = plt.subplot(2, 1, 1) ax_1.plot(word_representation_norms[1:128]) ax_1.set_title('Norm of Word Representation through Cycles') ax_2 = plt.subplot(2, 1, 2) ax_2.plot(word_representation_euclidean_distance[1:128]) ax_2.set_title('Euclidean Distance between Two Adjacent Cycles') plt.show() def main(): euclidean_distance_analy() if __name__ == '__main__': main()
{"hexsha": "4c6199258a61887768005e956e5ce64ddea5424d", "size": 1854, "ext": "py", "lang": "Python", "max_stars_repo_path": "usr_util/recurrent_dynamic_analy.py", "max_stars_repo_name": "EstelleHuang666/tensor2tensor", "max_stars_repo_head_hexsha": "7cebe23da824e87ff2fe37cb5e506f3afaca799b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "usr_util/recurrent_dynamic_analy.py", "max_issues_repo_name": "EstelleHuang666/tensor2tensor", "max_issues_repo_head_hexsha": "7cebe23da824e87ff2fe37cb5e506f3afaca799b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "usr_util/recurrent_dynamic_analy.py", "max_forks_repo_name": "EstelleHuang666/tensor2tensor", "max_forks_repo_head_hexsha": "7cebe23da824e87ff2fe37cb5e506f3afaca799b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.3333333333, "max_line_length": 119, "alphanum_fraction": 0.783171521, "include": true, "reason": "import numpy,from scipy", "num_tokens": 469}
import sys sys.path.append('/home/zankov/dev/miqsar') import os import pickle import joblib import pkg_resources import numpy as np import pandas as pd from itertools import groupby from sklearn.pipeline import Pipeline from CGRtools import RDFRead, RDFWrite from CIMtools.preprocessing import Fragmentor, CGR, EquationTransformer, SolventVectorizer from miqssr.descriptor_calculation.pmapper_3d import calc_pmapper_descriptors from miqssr.conformer_generation.gen_conformers import gen_confs fragmentor_path = pkg_resources.resource_filename(__name__, '.') os.environ['PATH'] += ':{}'.format(fragmentor_path) def read_pkl(fname): with open(fname, 'rb') as f: while True: try: yield pickle.load(f) except EOFError: break def calc_3d_pmapper(conf_files, path='.', ncpu=10): for conf in conf_files: dsc_file = calc_pmapper_descriptors(conf, path=path, ncpu=ncpu, col_clean=None, del_undef=True) with open(dsc_file, 'rb') as inp: data = joblib.load(inp) if 'mol_title' not in data.columns: data = data.reset_index() data['mol_id'] = data['mol_id'].str.lower() out_fname = dsc_file.replace('_proc.pkl', '.csv') data.to_csv(out_fname, index=False) return out_fname def calc_2d_isida(fname, path='.'): reacts = RDFRead(fname, remap=False).read() for reaction in reacts: reaction.standardize() reaction.kekule() reaction.implicify_hydrogens() reaction.thiele() reaction.clean2d() frag = Pipeline( [('CGR', CGR()), ('frg', Fragmentor(fragment_type=9, max_length=5, useformalcharge=True, version='2017.x'))]) res = frag.fit_transform(reacts) res['react_id'] = [i.meta['ID'] for i in reacts] # out_fname = os.path.join(path, '2DDescrISIDA_cgr-data_0.csv') res.to_csv(out_fname, index=False) import shutil del frag frg_files = [i for i in os.listdir() if i.startswith('frg')] for file in frg_files: if os.path.isfile(file): os.remove(file) else: shutil.rmtree(file) return out_fname def create_catalyst_input_file(input_fname=None): data = RDFRead(input_fname, remap=False).read() groups = [] for k, g in groupby(data, lambda x: x.meta['CATALYST_SMILES']): groups.append(list(g)) # smiles = [i[0].meta['CATALYST_SMILES'] for i in groups] act = [np.mean([float(i.meta['SELECTIVITY']) for i in x]) for x in groups] # res = [] ids = {} for i in range(len(smiles)): res.append({'SMILES': smiles[i], 'MOL_ID': i, 'ACT': act[i]}) ids[i] = [x.meta['ID'] for x in groups[i]] # out_fname = 'catalyst_data.smi' res = pd.DataFrame(res) res.to_csv(out_fname, index=False, header=False) return out_fname, ids def calc_descriptors(input_fname=None, nconfs=5, energy=10, ncpu=5, path='.'): cat_data_file, cat_ids = create_catalyst_input_file(input_fname) conf_files = gen_confs(cat_data_file, ncpu=ncpu, nconfs_list=[nconfs], stereo=False, energy=energy, path=path) os.remove(cat_data_file) react_out_fname = calc_2d_isida(input_fname, path=path) cat_out_fname = calc_3d_pmapper(conf_files, ncpu=ncpu, path=path) # reacts = pd.read_csv(react_out_fname, index_col='react_id') catalysts = pd.read_csv(cat_out_fname, index_col='mol_id').sort_index() # res = [] for i in catalysts.index.unique(): for j in cat_ids[i]: cat = catalysts.loc[i:i] react = pd.concat([reacts.loc[j:j]] * len(cat)) react_cat = pd.concat([react, cat.set_index(react.index)], axis=1) res.append(react_cat) out_fname = os.path.join(path, 'PhFprPmapper_concat-data_{}.csv'.format(nconfs)) pd.concat(res).to_csv(out_fname) return out_fname
{"hexsha": "556f91c97cea33c54aa6ef98cd9da8ef580fe901", "size": 3887, "ext": "py", "lang": "Python", "max_stars_repo_path": "miqssr/utils.py", "max_stars_repo_name": "dzankov/3D-MIL-QSSR", "max_stars_repo_head_hexsha": "a66dd78412188d43843cb253736af63f9318d8c8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "miqssr/utils.py", "max_issues_repo_name": "dzankov/3D-MIL-QSSR", "max_issues_repo_head_hexsha": "a66dd78412188d43843cb253736af63f9318d8c8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "miqssr/utils.py", "max_forks_repo_name": "dzankov/3D-MIL-QSSR", "max_forks_repo_head_hexsha": "a66dd78412188d43843cb253736af63f9318d8c8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.3916666667, "max_line_length": 117, "alphanum_fraction": 0.6586056084, "include": true, "reason": "import numpy", "num_tokens": 1033}
%!TEX root = ../dissertation_vkslm.tex \chapter{Handwritten Signature Verification} \label{ch:sig} In this chapter, we give a brief reference to some essential concepts related to Handwritten Signature Verification, including definitions of notation and terminology used in the following chapters. First, we give an introduction and a general overview of the handwritten signature biometry, afterwards we discuss how an Automatic Handwritten Signature Verification system works, and finally, we give a brief overview of the state-of-the-art on offline signature synthesis based on online data. \section{Handwritten Signature: a behavioral multimodal biometry} The term ``Biometrics'' is derived from the Greek word ``bio-metriks''. In which ``bio'' means ``life'' and ``metrics'' means ``to measure''. Biometrics refers to the measurements and analysis of biological or behavioral characteristics peculiar to an individual. Biometric systems are a constantly growing technology \cite{jain2004biometrics} and have been introduced as forms of identification and access control. Biometric identifiers are a measurable characteristic used to distinguish and describe individuals \cite{jain2000biometric}. Biometric systems are often categorized as physiological or behavioral \cite{ross2008introduction}. The physiological category is characterized by measurements of the body, such as fingerprint, face, DNA, iris/retina pattern, and body scent. On the other hand, behavioral biometrics are individual acquired traits and are related to the pattern of behavior of a person. They include typing rhythm, temperament, voice, and handwritten signatures \cite{jain2016}. Most biometric identifiers require a special type of device for security and control of human identity. However, handwritten signature based biometric systems can be performed requiring no sensor except a pen and a piece of paper. According to \cite{pal2014signature} handwritten signatures can be considered the most legally and socially attributes accepted for person identification. Moreover, the challenge that comes with signature-based authentication is the need for high accuracy results to avoid false authorization or rejection. Handwritten signature authentication is based on systems for signature verification. Whether a given signature belongs to a claimed person or not is decided through a signature verification system, which ultimately strives to learn the manner in which an individual makes use of their muscular memory (hands, fingers, and wrist) to reproduce a signature \cite{gupta1997review}. A generic handwritten signature-based biometric system is shown in Figure \ref{fig_ahsv-overview}. Once the user {\boldm $Y$} deposits the signature, a sensor digitalizes the sample. Later, a feature matrix {\boldm $X$} is built with the information extracted from the input sample. Next, the systems typically have two stages: enrollment {\boldm $X_{E}$} and recognition {\boldm $X_{R}$}. The former builds a system database {\boldm $D$} where the users store their reference signatures as a set of templates, whereas the latter is used to recognize, identify or verify the identity of a user, who typically claim to be one of the registered users. Then, a score {\boldm $S$} is obtained according to the similarity of the questioned sample to the claimed template. Finally, the system accepts or rejects the questioned sample. \begin{figure}[!htb] \centering \includegraphics[width=\textwidth]{biometry-overview} \caption{Overview of a typical handwritten signature based system. Figure adapted from \cite{jain2016}.} \label{fig_ahsv-overview} \end{figure} As Figure \ref{fig_ahsv-overview} shows, the signature acquisition sensor can be either an optical scanner or an acquisition device such as a digitizing tablet. These two different acquisition tools characterize the two classes of signatures, namely: static and dynamic. In the static modality, also referred as offline, an optical scanner is used to obtain the signature directly from the pen on the paper, and only the digital image of the signature is available, see Figure \ref{fig:acquisition} (a). In the dynamic mode, also called online, signatures are acquired through a graphic tablet or a pen-sensitive computer display, see Figure \ref{fig:acquisition} (b). In this mode, data is stored during the writing process and consists of a temporal sequence of the two-dimensional coordinates $(x, y)$ of consecutive points. \begin{figure}[!htpb] \centering \subfloat[]{\includegraphics[width=3.2in]{signature.PNG}} \hspace*{0.5in} % separation between the subfigures \subfloat[] {\includegraphics[width=2.7in]{stu500.jpg}} \caption{Different signature acquisition methods. (a) a signature scanned from paper and (b) digitizing tablet Wacom STU-500 \cite{wacom2016}. } \label{fig:acquisition} \end{figure} Specifically, the online modality does not convey information about the overall shape of the signature, the width of the strokes and the texture of the ink on the paper \cite{diaz2014generation}. The offline representation, however, has lost all dynamic information about the manner in which the signature is signed during the acquisition process. As a result, features such as pen trajectory, which can be easily computed in the online domain, can only be inferred from a static image \cite{nel2005estimating}. In Figure \ref{fig:offon} we can see a grayscale offline signatures example and the respective online signature plot. \begin{figure}[!htb] \centering \includegraphics[width=3.8in]{offon} \caption{An offline and a matching online signature sample. Figure extracted from \cite{sigcomp2009}.} \label{fig:offon} \end{figure} Once the signature sample is acquired, during the enrollment phase, the system tries to create the subject identity based on behavioral features in the signature. Because of the way we sign, however, it is a subtle task. The rapid movement behind the signature creation is determined by a motor program stored in the brain of each signer applied to tools such as the pen and the paper \cite{pirlo2014advances}. According to \cite{plamondon1989automatic} there is a wide variety of human and social aspects that might affect the way we produce our handwritten signature, it might be influenced by country, age, time, habits, psychological or emotional state. The variability created on the signing process must be taken into account in the signature authentication process. In fact, the unpredictable intra-personal variability, i.e., the similarity between signatures executed by the same writer, is a crucial challenge of signature-based biometric systems. This variability can be attributed to the several sources of noise ($\eta$) that distort the measured trait. According to \cite{jain2016} and shown in Figure \ref{fig_ahsv-overview}, the intra-personal variability affecting the measured sample {\boldm $M$} can be characterized by several variables. These variables include sensor limitations such as resolution or sample rate; biological aging effects or cognitive-motor impairments; user interaction with the sensor; environment changes like background noise and other factors as consequence of the individuals’ mood, hurry or unwillingness to cooperate. The intra-personal variability effect is illustrated in Figure \ref{fig:intraclass}. \begin{figure}[!h] \centering \includegraphics[width=5.2in]{superimposed} \caption{Superimposed genuine signatures of the same writer. A high intra-personal variability can be noticed. Extracted from \cite{hafemann2015offline}. } \label{fig:intraclass} \end{figure} Another challenge faced by signature-based biometric systems is the unpredictable inter-personal variability, i.e., the similarity between signatures executed by different writers. In a signature-based system, inter-personal variability is mainly attributed to frauds related to malicious people faking the identity of signers. Figure \ref{fig_forgeries} illustrates a visual comparison between genuine signatures and forgeries. \begin{figure}[!htb] \centering \includegraphics[width=\textwidth]{forgeries} \caption[The first column of signatures are genuine references, the following three samples are questioned signatures. How many forgeries would you be able to detect? Signature images extracted from \cite{mcyt-100}.]{The first column of signatures are genuine references; the following three samples are questioned signatures. How many forgeries would you be able to detect? \protect\footnotemark Signature images extracted from \cite{mcyt-100}.} \label{fig_forgeries} \end{figure} In the field of signature verification, forgeries are usually classified into two types \cite{impedovo2008state}. \begin{itemize} \item The first one is the random forgery which is created in a situation which an impostor who has no information about the person or the shape of the original signature tries to verify the identity of a signer using his signature instead of the signature to be tested. The forger does not attempt to simulate or trace a genuine signature. \item The second type is the skilled forgery, in which the forger tries and practices imitating as closely as possible the static and dynamic information of the genuine signature model. The forger has access to both the user’s name and signature and tries to reproduce it with a similar intra-class variability. \end{itemize} \section{Automatic Handwritten Signature Verification} An Automatic Handwritten Signature Verification System (AHSVS) is conceptually a pattern recognition application. Pattern recognition is one of the most important and active fields of research. During the past few decades, there has been a considerable growth of interest in problems of pattern recognition, and in the last few years, many methods have been developed in this area, in particular on handwriting recognition and signature verification \cite{book}. As any Pattern Recognition system, an AHSVS has three phases: \begin{inlinelist} \item data acquisition and pre-processing \item feature extraction \item classification \end{inlinelist} \cite{impedovo2008state}. In the first step, the signatures are acquired and preprocessed, the main goal here is to convert them into a format suitable for the modeling process, correcting geometric distortions and removing noise related to the signature acquisition sensor. Afterwards, features are extracted and stored in a knowledge database. On the classification step, the extracted features are used to distinguish between genuine and forged signatures. Therefore the Signature Verification task is, in essence, a binary classification problem, in which the system's prediction to the input signature sample is either genuine or fraudulent. Verification errors occurring in AHSVS are usually categorized as two types \cite{fairhurst1997signature}. On the one hand, a genuine signer may be rejected by the system as a potential impostor (e.g., it could happen when the signer carelessly executes his/her signature), resulting in what is denoted a Type-1 error or False Rejection. On the other hand, a skilled forger might be able to produce a sample which would be accepted as genuine, resulting in what is called a Type-2 error or False Acceptance. The performance of signature verification systems can be improved by increasing the number of samples in the training dataset. The amount of data available for each user is often insufficient in real applications. Even if there is a significant number of users enrolled in the system, a classifier needs to perform well for a new user, for whom only a small set of samples are available. \section{Off-line Signature Synthesis Using On-line Samples} According to \cite{guest2013assessment}, although online signature samples can be effectively stored as a time-series for use in any form of an AHSVS, there may be a situation where the signature might be needed to be represented as an image. One possible scenario is for human visualization. For instance, in a context of a legal document, it may be required to have a image-based representation of signature that was captured in a biometric system and stored as a time-series. There may also be a case where the modality of the signature used for training the user model differs from the signature domain that is being questioned. Although the test sample is a genuine signature, it might not be possible to prove with either an offline or online signature verification system alone. Hence, converting either the training or questioned data into an image reproducing the original static signature would be useful for the development of an integrated version of an offline and online signature verification systems, overcoming the dynamic vs. static dichotomy, and unifying the signature biometry \cite{chapter}. The works proposed in the literature to generate signature images from online signatures, generally apply different transforms to the dynamic information taking into account the kinematic and the timing order in which the traces were registered, then the samples of the new specimen are interpolated in order to create new images. The accuracy of a series of interpolation methods for recreating a signature image from the online dynamic data was investigated in \cite{guest2013assessment}. They experimented what they refer to linear interpolation, nearest neighbor interpolation, cubic spline interpolation, and Piecewise cubic Hermite interpolation. They experimented several interpolated line thickness widths, varying according to the pen pressure/force. In the work, the authors measured the performance of the synthetic samples using three statistics: the mean Euclidean distance between the synthetic sample and the real, the maximum distance in pixels between the two images, and the percentage of pixels in the recreated image. They found that the linear technique with variable-width produced the best accuracy with 3.047, 67.147 and 27.416 mean pixel distance, percent direct match, and max distance, respectively. In \cite{ferrer2013realistic} another method to generate synthetic offline signature databases starting from synthetic dynamic data is described. They convert the dynamic information to offline data using what they refer to as an ink deposition model. In short, they first create an 8-connected sequences from the online signature, then use an approach to model the pen ballpoint following a 2D Gaussian function. In \cite{diaz2014generation} they used this approach to generate synthetic samples based on real online data. They proposed an experimental protocol to assess whether the synthetic signatures are close to the real ones and to measure if it would be feasible to use these synthetic signatures to increase the training data of offline signature verification systems. They observed that the synthetic samples have a close performance to real signatures, but when used to increase the training dataset synthetically their synthetic samples achieved 18.70\% EER, while the real samples 17.68\%, in the skilled forgeries scenario. Moreover, in \cite{galbally2015line} they also experimented their synthesis approach to improve the performance of dynamic signature verifiers.
{"hexsha": "86b146553e08848cd7e4039c36d0cfe95ffc1a64", "size": 15269, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "conteudo/ch3.tex", "max_stars_repo_name": "victormelo/dissertation", "max_stars_repo_head_hexsha": "942bd6e57796d760e152dbfcc31745950dc3fd32", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-19T15:39:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-19T15:39:48.000Z", "max_issues_repo_path": "conteudo/ch3.tex", "max_issues_repo_name": "victormelo/dissertation", "max_issues_repo_head_hexsha": "942bd6e57796d760e152dbfcc31745950dc3fd32", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "conteudo/ch3.tex", "max_forks_repo_name": "victormelo/dissertation", "max_forks_repo_head_hexsha": "942bd6e57796d760e152dbfcc31745950dc3fd32", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-02-19T15:39:50.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-19T15:39:50.000Z", "avg_line_length": 131.6293103448, "max_line_length": 1182, "alphanum_fraction": 0.8143296876, "num_tokens": 3128}
! ! CalculiX - A 3-dimensional finite element program ! Copyright (C) 1998-2020 Guido Dhondt ! ! This program is free software; you can redistribute it and/or ! modify it under the terms of the GNU General Public License as ! published by the Free Software Foundation(version 2); ! ! ! This program is distributed in the hope that it will be useful, ! but WITHOUT ANY WARRANTY; without even the implied warranty of ! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ! GNU General Public License for more details. ! ! You should have received a copy of the GNU General Public License ! along with this program; if not, write to the Free Software ! Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ! subroutine calcspringforc(imat,elcon,nelcon,ncmat_,ntmat_,t1l, & kode,plicon,nplicon,npmat_,senergy,nener,fk,val) ! ! calculates the spring forc and the spring energy (node-to-face penalty) ! implicit none ! integer i,imat,ncmat_,ntmat_,kode,niso,id,nplicon(0:ntmat_,*), & npmat_,nelcon(2,*),nener ! real*8 t1l,elcon(0:ncmat_,ntmat_,*),elconloc(21),plconloc(802), & xk,fk,val,xiso(200),yiso(200),plicon(0:2*npmat_,ntmat_,*), & senergy ! ! ! ! interpolating the material data ! call materialdata_sp(elcon,nelcon,imat,ntmat_,i,t1l, & elconloc,kode,plicon,nplicon,npmat_,plconloc,ncmat_) ! ! calculating the spring force and the spring constant ! if(kode.eq.2)then xk=elconloc(1) fk=xk*val if(nener.eq.1) then senergy=fk*val/2.d0 endif else niso=int(plconloc(801)) do i=1,niso xiso(i)=plconloc(2*i-1) yiso(i)=plconloc(2*i) enddo call ident(xiso,val,niso,id) if(id.eq.0) then xk=0.d0 fk=yiso(1) if(nener.eq.1) then senergy=fk*val endif elseif(id.eq.niso) then xk=0.d0 fk=yiso(niso) if(nener.eq.1) then senergy=yiso(1)*xiso(1) do i=2,niso senergy=senergy+(xiso(i)-xiso(i-1))* & (yiso(i)+yiso(i-1))/2.d0 enddo senergy=senergy+(val-xiso(niso))*yiso(niso) endif else xk=(yiso(id+1)-yiso(id))/(xiso(id+1)-xiso(id)) fk=yiso(id)+xk*(val-xiso(id)) if(nener.eq.1) then senergy=yiso(1)*xiso(1) do i=2, id senergy=senergy+(xiso(i)-xiso(i-1))* & (yiso(i)+yiso(i-1))/2.d0 enddo senergy=senergy+(val-xiso(id))*(fk+yiso(id))/2.d0 endif endif endif ! return end
{"hexsha": "ddd733a4afc3f007e81fc818d0144ef0d929adb6", "size": 2859, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ccx_prool/CalculiX/ccx_2.17/src/calcspringforc.f", "max_stars_repo_name": "alleindrach/calculix-desktop", "max_stars_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ccx_prool/CalculiX/ccx_2.17/src/calcspringforc.f", "max_issues_repo_name": "alleindrach/calculix-desktop", "max_issues_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ccx_prool/CalculiX/ccx_2.17/src/calcspringforc.f", "max_forks_repo_name": "alleindrach/calculix-desktop", "max_forks_repo_head_hexsha": "2cb2c434b536eb668ff88bdf82538d22f4f0f711", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1235955056, "max_line_length": 77, "alphanum_fraction": 0.555089192, "num_tokens": 863}
# -*- coding: utf-8 -*- """ The following module provides the framework for setting parameter objects - both parameters to be used in the model itself, as well as references to evidence that can subsequently be used to determine these parameter distributions. Created on Thu Nov 12 13:45:23 2015 @author: JTrauer """ import os import sys import numpy from scipy.stats import beta, gamma, norm, truncnorm import matplotlib.pyplot as pyplot class AllEvidence(type): def __iter__(evidencepiece): return iter(evidencepiece.evidence_register) class Evidence: """ Object to summarise evidence for use in parameter estimation """ __metaclass__ = AllEvidence evidence_register = [] def __init__(self, source, parameter, point_estimate, confidence_interval, evidence_fullname, explanation_evidence, reference): self.evidence_register.append(self) self.estimate = point_estimate self.interval = confidence_interval self.name = source self.fullname = evidence_fullname self.reference = reference self.explanation = explanation_evidence if len(confidence_interval) == 2: self.interval_text = ('(' + str(self.interval[0]) + ' - ' + str(self.interval[1]) + ')') elif len(confidence_interval) < 2: self.interval_text = 'No confidence interval available from study' self.text = {'Title': self.fullname, 'Reference': self.reference, 'Point estimate': str(self.estimate), 'Confidence interval': self.interval_text, 'Explanation': self.explanation} self.attributes_ordered = ['Title', 'Point estimate', 'Confidence interval', 'Explanation'] def open_pdf(self): current_dir = os.path.dirname(__file__) location = os.path.join(current_dir, '..', 'evidence', self.name + '.pdf') os.startfile(location) #______________________________________________________________________________ class AllParameters(type): def __iter__(parameterinstance): return iter(parameterinstance.parameter_register) class Parameter: """" Initialises parameters with distributions prior to model runs """ __metaclass__ = AllParameters parameter_register = [] def __init__(self, name, parameter_name, parameter_type, distribution, prior_estimate, spread, limits, model_implementation): self.parameter_register.append(self) self.name = name self.parameter_name = parameter_name self.parameter_type = parameter_type self.model_implementation = model_implementation available_types = ['proportion', 'rate', 'timeperiod', 'multiplier'] assert self.parameter_type in available_types if self.parameter_type == 'proportion': assert len(self.model_implementation) == 2 self.implementation_description = ('Numerator is ' + str(self.model_implementation[0]) + ' and denominator is ' + str(self.model_implementation[1]) + '\n\n') elif self.parameter_type == 'rate': assert len(self.model_implementation) == 2 self.implementation_description = ('From compartment ' + self.model_implementation[0] + ' to compartment ' + self.model_implementation[1] + '\n\n') elif self.parameter_type == 'timeperiod': assert len(self.model_implementation) == 1 self.implementation_description = ('Time spent in ' + self.model_implementation[0]) elif self.parameter_type == 'multiplier': assert len(self.model_implementation) == 1 self.implementation_description = ('Parameter to be multiplied is ' + self.model_implementation[0]) self.distribution = distribution available_distributions = ['beta_symmetric_params2', 'beta_full_range', 'gamma', 'normal_unlimited', 'normal_positive', 'normal_truncated'] assert distribution in available_distributions, \ 'Distribution not available' self.prior_estimate = prior_estimate if len(spread) == 0: if self.parameter_type == 'proportion': if self.prior_estimate < 0.5: self.spread = prior_estimate / 2. else: self.spread = (1 - prior_estimate) / 2. else: self.spread = prior_estimate / 2. elif len(spread) == 1: self.spread = spread[0] elif len(spread) == 2: self.spread = (spread[1] - spread[0]) / 4. self.limits = limits assert len(self.limits) <= 2, 'Too many limits provided' if len(self.limits) == 0: self.limit_text = 'No additional limits applied' elif len(self.limits) == 1: self.limit_text = ('One additional limit set at ' + str(self.limits[0])) elif len(self.limits) == 2: self.limit_text = ('Two additional limits set at ' + str(self.limits[0]) + ' and ' + str(self.limits[1])) self.text = {'Title': self.parameter_name, 'Type': self.parameter_type, 'Estimate': str(self.prior_estimate), 'Spread': str(self.spread), 'Limits': str(self.limit_text), 'Implementation': str(self.implementation_description), 'Distribution': str(self.distribution)} self.attributes_ordered = ['Title', 'Type', 'Distribution', 'Estimate', 'Spread', 'Limits', 'Implementation'] if self.distribution == 'beta_symmetric_params2': self.xvalues = numpy.arange(0., 1., 1e-3) self.x_max_forgraph = self.prior_estimate * 2. elif self.distribution == 'beta_full_range': self.xvalues = numpy.arange(0., 1., 1e-3) self.x_max_forgraph = 1. elif self.distribution == 'gamma': self.xvalues = numpy.arange(0., self.prior_estimate * 3., self.prior_estimate / 1e2) self.x_max_forgraph = self.prior_estimate * 3. elif self.distribution == 'normal_unlimited': self.xvalues = numpy.arange(0., self.prior_estimate * 5, 1e-3) self.x_max_forgraph = self.prior_estimate * 2. elif self.distribution == 'normal_positive': self.xvalues = numpy.arange(0., self.prior_estimate * 5, 1e-3) self.x_max_forgraph = self.prior_estimate * 2. elif self.distribution == 'normal_truncated': assert isinstance(self.limits, list), 'List of two limits required' assert len(self.limits) == 2, 'List of two limits required' self.xvalues = numpy.arange(0., self.prior_estimate * 5, 1e-3) self.x_max_forgraph = self.prior_estimate * 2. def calculate_prior(self): self.initiate_distributions() if self.distribution == 'beta_symmetric_params2': self.beta_symmetric_params2() elif self.distribution == 'beta_full_range': self.beta_full_range() elif self.distribution == 'gamma': self.gamma() elif self.distribution == 'normal_unlimited': self.normal_unlimited() elif self.distribution == 'normal_positive': self.normal_positive() elif self.distribution == 'normal_truncated': self.normal_truncated() def initiate_distributions(self): self.prior_pdf = [0] * len(self.xvalues) self.prior_cdf = [0] * len(self.xvalues) def beta_symmetric_params2(self): assert self.prior_estimate > self.spread and (1 - self.prior_estimate) > self.spread, \ 'Values outside the range of zero to one will result from entered spread and ' + \ 'prior estimate' self.distribution_description = ('Symmetric beta distribution with ' + 'alpha parameter = beta parameter = 2') self.lower_limit = self.prior_estimate - self.spread self.upper_limit = self.prior_estimate + self.spread self.beta_param_alpha = 2 self.beta_param_beta = 2 for i in range(len(self.xvalues)): self.prior_pdf[i] \ = self.beta_symmetric_params2_pdf(self.xvalues[i]) self.prior_cdf[i] \ = self.beta_symmetric_params2_cdf(self.xvalues[i]) def beta_symmetric_params2_pdf(self, xvalue): transformed_value = (xvalue - self.lower_limit) \ / (self.upper_limit - self.lower_limit) if transformed_value > 0 and transformed_value < 1: beta_symmetric_params2_pdf = beta.pdf(transformed_value, self.beta_param_alpha, self.beta_param_beta) else: beta_symmetric_params2_pdf = 0 return beta_symmetric_params2_pdf def beta_symmetric_params2_cdf(self, xvalue): transformed_value = (xvalue - self.lower_limit) \ / (self.upper_limit - self.lower_limit) if transformed_value > 0 and transformed_value < 1: beta_symmetric_params2_cdf = beta.cdf(transformed_value, self.beta_param_alpha, self.beta_param_beta) elif transformed_value >= 1: beta_symmetric_params2_cdf = 1 else: beta_symmetric_params2_cdf = 0 return beta_symmetric_params2_cdf def beta_full_range(self): self.distribution_description = ('Beta distribution with parameters ' + 'determined from expectation and spread values') self.lower_limit = 0 self.upper_limit = 1 self.beta_param_alpha \ = - self.prior_estimate \ * (self.spread ** 2 + self.prior_estimate ** 2 - self.prior_estimate) \ / (self.spread ** 2) self.beta_param_beta \ = (self.spread ** 2 + self.prior_estimate ** 2 - self.prior_estimate) \ * (self.prior_estimate - 1) / (self.spread ** 2) for i in range(len(self.xvalues)): self.prior_pdf[i] = self.beta_full_range_pdf(self.xvalues[i]) self.prior_cdf[i] = self.beta_full_range_cdf(self.xvalues[i]) def beta_full_range_pdf(self, xvalue): return beta.pdf(xvalue, self.beta_param_alpha, self.beta_param_beta) def beta_full_range_cdf(self, xvalue): return beta.cdf(xvalue, self.beta_param_alpha, self.beta_param_beta) def beta_full_range_ppf(self, xvalue): return beta.ppf(xvalue, self.beta_param_alpha, self.beta_param_beta) def gamma(self): self.distribution_description = ('Gamma distribution with parameters ' + 'determined from expectation and spread values') self.lower_limit = 0 self.upper_limit = 'No upper limit' self.gamma_shape \ = self.prior_estimate ** 2 / self.spread ** 2 # Where self.spread is the standard deviation self.gamma_scale \ = self.spread ** 2 / self.prior_estimate for i in range(len(self.xvalues)): self.prior_pdf[i] = self.gamma_pdf(self.xvalues[i]) self.prior_cdf[i] = self.gamma_cdf(self.xvalues[i]) def gamma_pdf(self, xvalue): return gamma.pdf(xvalue, self.gamma_shape, 0, self.gamma_scale) def gamma_cdf(self, xvalue): return gamma.cdf(xvalue, self.gamma_shape, 0, self.gamma_scale) def gamma_ppf(self, xvalue): return gamma.ppf(xvalue, self.gamma_shape, 0, self.gamma_scale) def normal_unlimited(self): self.distribution_description = ('Normal distribution (not ' + 'truncated)') self.lower_limit = 'No lower limit' self.upper_limit = 'No upper limit' for i in range(len(self.xvalues)): self.prior_ppf[i] = self.normal_unlimited_pdf(self.xvalues[i]) self.prior_cdf[i] = self.normal_unlimited_cdf(self.xvalues[i]) def normal_unlimited_pdf(self, xvalue): return norm.pdf(xvalue, self.prior_estimate, self.spread) def normal_unlimited_cdf(self, xvalue): return norm.cdf(xvalue, self.prior_estimate, self.spread) def normal_unlimited_ppf(self, xvalue): return norm.ppf(xvalue, self.prior_estimate, self.spread) def normal_positive(self): self.distribution_description = ('Normal distribution truncated ' + 'at zero only') self.lower_limit = 0 self.upper_limit = 'No upper limit' for i in range(len(self.xvalues)): self.prior_pdf[i] = self.normal_positive_pdf(self.xvalues[i]) self.prior_cdf[i] = self.normal_positive_cdf(self.xvalues[i]) def normal_positive_pdf(self, xvalue): return truncnorm.pdf(xvalue, - self.prior_estimate / self.spread, 1e10, loc=self.prior_estimate, scale=self.spread) def normal_positive_cdf(self, xvalue): return truncnorm.cdf(xvalue, - self.prior_estimate / self.spread, 1e10, loc=self.prior_estimate, scale=self.spread) def normal_truncated(self): self.distribution_description = ('Normal distribution truncated at ' + 'defined points') self.lower_limit = self.limits[0] self.upper_limit = self.limits[1] for i in range(len(self.xvalues)): self.prior_pdf[i] = self.normal_truncated_pdf(self.xvalues[i]) self.prior_cdf[i] = self.normal_truncated_cdf(self.xvalues[i]) def normal_truncated_pdf(self, xvalue): return truncnorm.pdf(xvalue, (self.lower_limit - self.prior_estimate) / self.spread, (self.upper_limit - self.prior_estimate) / self.spread, loc=self.prior_estimate, scale=self.spread) def normal_truncated_cdf(self, xvalue): return truncnorm.cdf(xvalue, (self.lower_limit - self.prior_estimate) / self.spread, (self.upper_limit - self.prior_estimate) / self.spread, loc=self.prior_estimate, scale=self.spread) def normal_truncated_ppf(self, xvalue): return truncnorm.ppf(xvalue, (self.lower_limit - self.prior_estimate) / self.spread, (self.upper_limit - self.prior_estimate) / self.spread, loc=self.prior_estimate, scale=self.spread) def pdf(self, xvalue): self.calculate_prior() if self.distribution == 'gamma': return self.gamma_pdf(xvalue) elif self.distribution == 'beta_full_range': return self.beta_full_range_pdf(xvalue) elif self.distribution == 'beta_symmetric_params2': return self.beta_symmetric_params2_pdf(xvalue) elif self.distribution == 'normal_unlimited': return self.normal_unlimited_pdf(xvalue) elif self.distribution == 'normal_positive': return self.normal_positive_pdf(xvalue) elif self.distribution == 'normal_truncated': return self.normal_truncated_pdf(xvalue) def cdf(self, xvalue): self.calculate_prior() if self.distribution == 'gamma': return self.gamma_cdf(xvalue) elif self.distribution == 'beta_full_range': return self.beta_full_range_cdf(xvalue) elif self.distribution == 'beta_symmetric_params2': return self.beta_symmetric_params2_cdf(xvalue) elif self.distribution == 'normal_unlimited': return self.normal_unlimited_cdf(xvalue) elif self.distribution == 'normal_positive': return self.normal_positive_cdf(xvalue) elif self.distribution == 'normal_truncated': return self.normal_truncated_cdf(xvalue) def ppf(self, xvalue): self.calculate_prior() if self.distribution == 'gamma': return self.gamma_ppf(xvalue) elif self.distribution == 'beta_full_range': return self.beta_full_range_ppf(xvalue) elif self.distribution == 'beta_symmetric_params2': return self.beta_symmetric_params2_ppf(xvalue) elif self.distribution == 'normal_unlimited': return self.normal_unlimited_ppf(xvalue) elif self.distribution == 'normal_positive': return self.normal_positive_ppf(xvalue) elif self.distribution == 'normal_truncated': return self.normal_truncated_ppf(xvalue) def graph_prior(self): self.calculate_prior() pyplot.plot(self.xvalues, self.prior_pdf, 'r-', label='PDF') pyplot.plot(self.xvalues, self.prior_cdf, 'b-', label='CDF') pyplot.xlim(0., self.x_max_forgraph) pyplot.xlabel('Parameter value') pyplot.ylabel('Probability density') pyplot.legend() module_dir = os.path.dirname(__file__) os.chdir(os.path.join(module_dir, '..', 'graphs')) pyplot.savefig((self.name + '.jpg')) pyplot.close()
{"hexsha": "ed1d9a8a64b69fc2cfeb2708c0bf02c5681a23b2", "size": 17846, "ext": "py", "lang": "Python", "max_stars_repo_path": "autumn/settings/parameter.py", "max_stars_repo_name": "monash-emu/Legacy-AuTuMN", "max_stars_repo_head_hexsha": "513bc14b4ea8c29c5983cc90fb94284e6a003515", "max_stars_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "autumn/settings/parameter.py", "max_issues_repo_name": "monash-emu/Legacy-AuTuMN", "max_issues_repo_head_hexsha": "513bc14b4ea8c29c5983cc90fb94284e6a003515", "max_issues_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "autumn/settings/parameter.py", "max_forks_repo_name": "monash-emu/Legacy-AuTuMN", "max_forks_repo_head_hexsha": "513bc14b4ea8c29c5983cc90fb94284e6a003515", "max_forks_repo_licenses": ["BSD-2-Clause-FreeBSD"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.2331606218, "max_line_length": 107, "alphanum_fraction": 0.6034405469, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3767}
# -*- coding: utf-8 -*- """Functionality built on top of LocaleDB data.""" import numpy as np import psycopg2 import psycopg2.extras import sys from numpy import linalg __all__ = ['LocaleDB'] # ---------------------------------------------------------------------------------------------------------------------- class UnknownLocaleError(Exception): pass class UnknownDiseaseError: pass class ObjectStateError(Exception): pass # ---------------------------------------------------------------------------------------------------------------------- class Result(object): def __init__(self, res=None, ok=True, err_msg=None): self.ok = ok self.err_msg = err_msg self.res = res # ---------------------------------------------------------------------------------------------------------------------- class LocaleDB(object): CURSOR_NAME_PREFIX = 'localedb-py' def __init__(self, pg_host='localhost', pg_port='5433', pg_usr='postgres', pg_pwd='sa', pg_db='localedb'): self.pg_host = pg_host self.pg_port = pg_port self.pg_usr = pg_usr self.pg_pwd = pg_pwd self.pg_db = pg_db self.locale_id = None self.disease_id = None self.locale_fips = None # not None for the US only (per the main.locale table) self.conn = self._get_new_conn() self.cursor_num = -1 def _exec(self, qry, vars=None, do_get=True, itersize=2000): with self.conn.cursor() as c: if itersize > 0: c.itersize = itersize c.execute(qry, vars) if do_get: return c.fetchall() def _get_next_cursor_name(self): self.cursor_num += 1 return f'{self.CURSOR_NAME_PREFIX}-{self.cursor_num}' def _get_id(self, tbl, col='rowid', where_sql=None, where_vars=None): return self._get_num(conn, tbl, 'rowid', where_sql, where_vars) def _get_row_cnt(self, tbl, where_sql=None, where_vars=None): return self._get_num(tbl, 'COUNT(*)', where_sql, where_vars) def _get_new_conn(self, cursor_factory=psycopg2.extras.NamedTupleCursor): return psycopg2.connect(host=self.pg_host, port=self.pg_port, user=self.pg_usr, password=self.pg_pwd, database=self.pg_db, cursor_factory=cursor_factory) def _get_num(self, tbl, col, where_sql=None, where_vars=None): where_sql = '' if where_sql is None else f' WHERE {where_sql}' with self.conn.cursor() as c: c.execute(f'SELECT {col} FROM {tbl}{where_sql};', where_vars) row = c.fetchone() return row[0] if row else None def _req_disease(self): if self.disease_id is None: raise ObjectStateError('No disease has been set') def _req_locale(self, do_req_us=False): if self.locale_id is None: raise ObjectStateError('No locale has been set') if do_req_us and self.locale_iso_num != 840: raise ObjectStateError('A U.S. locale is required') def _set_pop_view_household(self, fips): self._exec(f""" DROP VIEW IF EXISTS pop_person_view; CREATE OR REPLACE TEMP VIEW pop_person_view AS SELECT p.* FROM pop.person AS p INNER JOIN pop.household AS h ON p.household_id = h.id WHERE h.stcotrbg LIKE '{fips}%'; """) def _set_pop_view_household_geo(self, fips, geo_tbl): return self._exec(f""" DROP VIEW IF EXISTS pop_person_view; CREATE OR REPLACE TEMP VIEW pop_person_view AS SELECT p.*, g.gid AS household_geo_id FROM pop.person AS p INNER JOIN pop.household AS h ON p.household_id = h.id INNER JOIN geo.{geo_tbl} AS g ON ST_Contains(g.geom, h.coords) WHERE h.stcotrbg LIKE '{fips}%'; """) def get_dis_dyn_norm(self, conf, dead, do_inc_delta=False): self._req_disease() and self._req_locale() # res = self.get_dis_dyn_delta(conf, dead) # if not res.ok: # return res # delta = res.res delta = self.get_dis_dyn_delta_by_day(conf, dead) # norm1 = np.sum(arr1 ** 2) # norm2 = np.sum(arr2 ** 2) # norm = np.sum((arr1 - arr2) ** 2) return { 'conf': linalg.norm(delta['conf']), 'dead': linalg.norm(delta['dead']), 'delta': None if not do_inc_delta else delta } def _get_dis_dyn_comp_stats_x(self, x, vals, day_from=1, day_to=sys.maxsize, itersize=2000): Y_obs = np.array(self._get_dis_dyn_by_day_x(x, day_from, day_to, itersize)).flatten() Y_hat = np.array(vals) if Y_obs.size != Y_hat.size: raise ValueError(f'The sizes of the observed ({Y_obs.size}) and predicted ({Y_hat.size}) time series do not match.') # Corr: corr = np.corrcoef(Y_obs, Y_hat)[0,1] if np.isnan(corr): corr = 0.0 # MAE: mae = np.absolute(Y_obs - Y_hat).mean() # RMSE: rmse = np.linalg.norm(Y_obs - Y_hat) / np.sqrt(len(Y_obs)) # SRMSE: ybar = Y_obs.mean() srmse = rmse / ybar # R2: u = np.sum((Y_hat - Y_obs)**2) v = np.sum((Y_obs - ybar)**2) r2 = 1.0 - u / v return { 'corr': corr, 'mae': mae, 'rmse': rmse, 'srmse': srmse, 'r2': r2 } def get_dis_dyn_comp_stats(self, conf, dead, day_from=1, day_to=sys.maxsize, itersize=2000): return { 'conf': self.get_dis_dyn_comp_stats_conf(conf, day_from, day_to, itersize), 'dead': self.get_dis_dyn_comp_stats_dead(dead, day_from, day_to, itersize) } def get_dis_dyn_comp_stats_conf(self, vals, day_from=1, day_to=sys.maxsize, itersize=2000): return self._get_dis_dyn_comp_stats_x('n_conf', vals, day_from, day_to, itersize) def get_dis_dyn_comp_stats_dead(self, vals, day_from=1, day_to=sys.maxsize, itersize=2000): return self._get_dis_dyn_comp_stats_x('n_dead', vals, day_from, day_to, itersize) def _get_dis_dyn_by_day_x(self, x, day_from=1, day_to=sys.maxsize, itersize=2000): self._req_disease() and self._req_locale() if day_from > day_to: raise ValueError('Incorrect day range') res = {} return np.array( self._exec( f'SELECT {x} FROM dis.dyn WHERE disease_id = %s AND locale_id = %s AND day_i BETWEEN %s AND %s ORDER BY day_i;', [self.disease_id, self.locale_id, day_from, day_to], itersize ) ) def get_dis_dyn_by_day_conf(self, day_from=1, day_to=sys.maxsize, itersize=2000): return self._get_dis_dyn_by_day_x('n_conf', day_from, day_to, itersize) def get_dis_dyn_by_day_dead(self, day_from=1, day_to=sys.maxsize, itersize=2000): return self._get_dis_dyn_by_day_x('n_dead', day_from, day_to, itersize) def get_dis_dyn_by_day(self, do_get_conf=False, do_get_dead=False, day_from=1, day_to=sys.maxsize, itersize=2000): self._req_disease() and self._req_locale() if day_from > day_to: raise ValueError('Incorrect day range') res = {} if do_get_conf: res['conf'] = np.array( self._exec( 'SELECT n_conf FROM dis.dyn WHERE disease_id = %s AND locale_id = %s AND day_i BETWEEN %s AND %s ORDER BY day_i;', [self.disease_id, self.locale_id, day_from, day_to], itersize ) ) if do_get_dead: res['dead'] = np.array( self._exec( 'SELECT n_dead FROM dis.dyn WHERE disease_id = %s AND locale_id = %s AND day_i BETWEEN %s AND %s ORDER BY day_i;', [self.disease_id, self.locale_id, day_from, day_to], itersize ) ) return res def get_dis_dyn_delta_by_day(self, conf=None, dead=None, day_from=1, day_to=sys.maxsize, itersize=2000): self._req_disease() and self._req_locale() if day_from > day_to: raise ValueError('Incorrect day range') res = {} if conf: conf_obs = np.array( self._exec( 'SELECT n_conf FROM dis.dyn WHERE disease_id = %s AND locale_id = %s AND day_i BETWEEN %s AND %s ORDER BY day_i;', [self.disease_id, self.locale_id, day_from, day_to], itersize ) ) if len(conf_obs) != len(conf): raise ValueError('The sizes of the confirmed cases time series provided is incongruent with the observed one; the database may not contain enough data or the date range is incorrect.') res['conf'] = conf_obs - np.ndarray(conf) if dead: dead_obs = np.array( self._exec( 'SELECT n_dead FROM dis.dyn WHERE disease_id = %s AND locale_id = %s AND day_i BETWEEN %s AND %s ORDER BY day_i;', [self.disease_id, self.locale_id, day_from, day_to], itersize ) ) if len(dead_obs) != len(dead): raise ValueError('The sizes of the dead cases time series provided is incongruent with the observed one; the database may not contain enough data or the date range is incorrect.') res['dead'] = dead_obs - np.ndarray(dead) return res def get_locale_inf(self): pass # self._check_locale() # inf = self._exec(f'SELECT iso2, iso3 FROM main.locale WHERE id = ?;', [self.locale_id])[0] # return f'{inf.iso2} {inf.iso3}' def get_geo_counties(self, st_fips): return self._exec(f"SELECT gid, statefp10, countyfp10, geoid10, name10, namelsad10 FROM geo.co WHERE statefp10 = %s ORDER BY geoid10;", [st_fips]) def get_geo_states(self): return self._exec('SELECT gid, statefp10, geoid10, stusps10, name10 FROM geo.st ORDER BY geoid10;') def get_pop_size(self): self._req_locale() return self._exec('SELECT pop FROM main.locale WHERE id = %s;', [self.locale_id])[0].pop def get_pop_size_synth(self): """Get the size of the U.S. synthetic population that is currently loaded into the database. The U.S. only restriction stems from the fact that currently no other country is covered. This method is most useful for states and counties because synthetic population data is loaded on a per state basis. Consequently, unless all the states are loaded, the entire U.S. synthetic population size will be artifically low. Returns: int: -1 for non-US locale; non-negative integer for US locales. """ if not self.is_locale_us(): return -1 # return self._get_row_cnt('pop_person_view') # return self._exec_get('WITH h AS (SELECT id FROM pop.household WHERE stcotrbg LIKE %s) SELECT COUNT(*) FROM pop.person p WHERE p.household_id IN (SELECT id FROM h);', [f'{self.locale_fips}%'])[0][0] # return self._exec_get('SELECT COUNT(*) FROM pop.person AS p INNER JOIN pop.household AS h ON p.household_id = h.id WHERE h.stcotrbg LIKE %s;', [f'{self.locale_fips}%'])[0][0] if self.locale_fips is None: # entire US return self._exec('SELECT COUNT(*) FROM pop.person;')[0][0] elif len(self.locale_fips) == 2: # US state return self._exec('SELECT COUNT(*) FROM pop.person p INNER JOIN pop.household h ON p.household_id = h.id INNER JOIN main.locale l ON h.st_id = l.id WHERE l.fips = %s;', [self.locale_fips])[0][0] elif len(self.locale_fips) == 5: # US county return self._exec('SELECT COUNT(*) FROM pop.person p INNER JOIN pop.household h ON p.household_id = h.id INNER JOIN main.locale l ON h.co_id = l.id WHERE l.fips = %s;', [self.locale_fips])[0][0] else: raise ValueError('Incorrect FIPS code: {self.locale_fips}') def get_synth_pop(self, cols=['age'], limit=0, itersize=2000): self._req_locale(True) limit = f'LIMIT {limit}' if limit > 0 else '' if len(self.locale_fips) == 2: # US state locale_id_col = 'st_id' elif len(self.locale_fips) == 5: # US county locale_id_col = 'co_id' return self._exec( f''' SELECT {",".join(cols)} FROM pop.person p INNER JOIN pop.household h ON p.household_id = h.id INNER JOIN main.locale l ON h.{locale_id_col} = l.id WHERE l.id = %s ORDER BY p.id {limit}; ''', [self.locale_id], itersize ) def is_locale_us(self): return self.locale_id is not None and self.locale_iso_num == 840 def set_disease(self, name): self.disease_id = self._get_num('dis.disease', 'id', 'name = %s', [name]) if self.disease_id is None: raise UnknownDiseaseError(f'Disease not found: {name}') return self def set_locale_by_name(self, admin0, admin1=None, admin2=None): with self.conn.cursor() as c: c.execute('SELECT id, iso_num, fips FROM main.locale WHERE admin0 = %s AND admin1 IS NOT DISTINCT FROM %s AND admin2 IS NOT DISTINCT FROM %s;', [admin0, admin1, admin2]) if c.rowcount == 0: raise UnknownLocaleError(f'No locale found with the following name: {admin0}, {admin1}, {admin2}') r = c.fetchone() self.locale_id = r.id self.locale_iso_num = r.iso_num self.locale_fips = r.fips # if self.locale_fips is not None: # self._set_pop_view_household(self.locale_fips) # self._set_pop_view_household_geo(self.locale_fips, 'st') return self def set_locale_by_us_fips(self, fips=None): with self.conn.cursor() as c: c.execute('SELECT id FROM main.locale WHERE iso_num = %s AND fips IS NOT DISTINCT FROM %s;', [840, fips]) if c.rowcount == 0: raise UnknownLocaleError(f'No U.S. locale found with the following FIPS code: {fips}') self.locale_id = c.fetchone().id self.locale_iso_num = 840 self.locale_fips = fips # self._set_pop_view_household(fips) # self._set_pop_view_household_geo(fips, 'st') return self # ---------------------------------------------------------------------------------------------------------------------- if __name__ == '__main__': import time def disp_locale_inf(db): t0 = time.perf_counter() print(f'id: {db.locale_id} iso_num: {db.locale_iso_num} fips: {db.locale_fips} pop: {db.get_pop_size()} pop-synth: {db.get_pop_size_synth()} ({time.perf_counter() - t0:.0f} s)', flush=True) def disp_locale_dis_dyn_by_day(db): conf = db.get_dis_dyn_by_day_conf() print(f"{db.locale_id}: n={conf.size}; {conf.flatten().tolist()[:48]}") def disp_synth_pop(db): c = db.get_synth_pop(['sex', 'age', 'WIDTH_BUCKET(age::INTEGER,ARRAY[18,60]) AS age_grp', 'income', 'CASE WHEN school_id IS NULL THEN 0 ELSE 1 END AS is_student', 'CASE WHEN workplace_id IS NULL THEN 0 ELSE 1 END is_worker'], limit=4) print(np.array(c).tolist()) db = LocaleDB() db.set_disease('COVID-19') # Test basic population and synthetic population queries: db.set_locale_by_name('China') ; disp_locale_inf(db) db.set_locale_by_name('Italy') ; disp_locale_inf(db) db.set_locale_by_name('US') ; disp_locale_inf(db) db.set_locale_by_name('US', 'Alaska') ; disp_locale_inf(db) db.set_locale_by_us_fips('02') ; disp_locale_inf(db) db.set_locale_by_name('US', 'Alaska', 'Anchorage') ; disp_locale_inf(db) db.set_locale_by_us_fips('02020') ; disp_locale_inf(db) db.set_locale_by_name('US', 'Pennsylvania') ; disp_locale_inf(db) db.set_locale_by_us_fips('42') ; disp_locale_inf(db) db.set_locale_by_name('US', 'Pennsylvania', 'Allegheny') ; disp_locale_inf(db) db.set_locale_by_us_fips('42003') ; disp_locale_inf(db) # Test disease dynamics queries: db.set_locale_by_name('China') ; disp_locale_dis_dyn_by_day(db) db.set_locale_by_name('Italy') ; disp_locale_dis_dyn_by_day(db) db.set_locale_by_name('US') ; disp_locale_dis_dyn_by_day(db) db.set_locale_by_name('US', 'Alaska') ; disp_locale_dis_dyn_by_day(db) db.set_locale_by_name('US', 'Alaska', 'Anchorage') ; disp_locale_dis_dyn_by_day(db) db.set_locale_by_name('US', 'Pennsylvania') ; disp_locale_dis_dyn_by_day(db) db.set_locale_by_name('US', 'Pennsylvania', 'Allegheny') ; disp_locale_dis_dyn_by_day(db) # Test disease dynamics comparison: db.set_locale_by_name('US') print(db.get_dis_dyn_comp_stats_conf([1, 1, 2, 2, 6], day_to=5)) # Test synthetic population retrieval queries: db.set_locale_by_name('US', 'Pennsylvania') ; disp_synth_pop(db) db.set_locale_by_name('US', 'Pennsylvania', 'Allegheny') ; disp_synth_pop(db) db.set_locale_by_name('US', 'Pennsylvania', 'Adams') ; disp_synth_pop(db)
{"hexsha": "8cd2b26ef62005a171f4d27e4e42241af3fe8512", "size": 17516, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/localedb/localedb.py", "max_stars_repo_name": "scotthaleen/localedb", "max_stars_repo_head_hexsha": "328102eaea717db63437acf6049a5df012b76cdb", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/localedb/localedb.py", "max_issues_repo_name": "scotthaleen/localedb", "max_issues_repo_head_hexsha": "328102eaea717db63437acf6049a5df012b76cdb", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/localedb/localedb.py", "max_forks_repo_name": "scotthaleen/localedb", "max_forks_repo_head_hexsha": "328102eaea717db63437acf6049a5df012b76cdb", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.1443298969, "max_line_length": 242, "alphanum_fraction": 0.5934574104, "include": true, "reason": "import numpy,from numpy", "num_tokens": 4405}
import os import json from pkg_resources import resource_filename import numpy as np from astrometry.util.fits import fits_table from mappings import petal_id_to_gfa_num class PetalMetrology(object): def __init__(self, fids, gfa_trans): self.fids = fids I = np.flatnonzero(fids.gif_num == 1) assert(len(I) == 1) self.gif1 = fids[I[0]] I = np.flatnonzero(fids.gif_num == 2) assert(len(I) == 1) self.gif2 = fids[I[0]] self.fifs = fids[fids.is_fif] G1,G2 = self.gif1, self.gif2 Ti = gfa_trans v1 = np.array([np.mean(G1.x), np.mean(G1.y)]) v2 = np.array([np.mean(G2.x), np.mean(G2.y)]) vc = (v1 + v2) / 2. dv = v2 - v1 p1 = np.array([np.mean(Ti.gif_1_mm_x), np.mean(Ti.gif_1_mm_y)]) p2 = np.array([np.mean(Ti.gif_2_mm_x), np.mean(Ti.gif_2_mm_y)]) pc = (p1 + p2) / 2. dp = p2 - p1 th1 = np.arctan2(dv[1], dv[0]) th2 = np.arctan2(dp[1], dp[0]) dth = th2 - th1 R = np.array([[np.cos(dth), np.sin(dth)],[-np.sin(dth), np.cos(dth)]]) S = np.sqrt(np.sum(dv**2)) / np.sqrt(np.sum(dp**2)) M = np.zeros((2,3), np.float32) M[:2,:2] = R * S M[:,2] = vc MI = np.zeros((2,3), np.float32) MI[:2,:2] = R.T / S MI[:,2] = pc self.pc = pc self.vc = vc self.M = M self.MI = MI for k in ['pix_x_coeffs', 'pix_y_coeffs', 'mm_x_coeffs', 'mm_y_coeffs']: setattr(self, k, gfa_trans.get(k)) self.gfa = gfa_trans # GFA CCD bounds w,h = 2048, 1032 self.ccdw, self.ccdh = w,h self.ccdbpx = np.array([0.5, 0.5, w+0.5, w+0.5, 0.5]) self.ccdbpy = np.array([0.5, h+0.5, h+0.5, 0.5, 0.5]) self.ccdbx,self.ccdby = self.gfa_pix_to_focal_mm(self.ccdbpx, self.ccdbpy) def gfa_mm_to_focal_mm(self, gfax, gfay): gfax = gfax.ravel() gfay = gfay.ravel() N = len(gfax) v = np.zeros((3,N)) v[0,:] = gfax - self.pc[0] v[1,:] = gfay - self.pc[1] v[2,:] = 1. xy = np.matmul(self.M, v) return xy[0,:], xy[1,:] def focal_mm_to_gfa_mm(self, x, y): x = x.ravel() y = y.ravel() N = len(x) v = np.zeros((3,N)) v[0,:] = x - self.vc[0] v[1,:] = y - self.vc[1] v[2,:] = 1. xy = np.matmul(self.MI, v) return xy[0,:], xy[1,:] def gfa_mm_to_gfa_pix(self, x, y): cox = self.pix_x_coeffs coy = self.pix_y_coeffs return (cox[0] + cox[1] * x + cox[2] * y, coy[0] + coy[1] * x + coy[2] * y) def gfa_pix_to_gfa_mm(self, x, y): cox = self.mm_x_coeffs coy = self.mm_y_coeffs return (cox[0] + cox[1] * x + cox[2] * y, coy[0] + coy[1] * x + coy[2] * y) def focal_mm_to_gfa_pix(self, x, y): gx,gy = self.focal_mm_to_gfa_mm(x, y) return self.gfa_mm_to_gfa_pix(gx, gy) def gfa_pix_to_focal_mm(self, x, y): gx,gy = self.gfa_pix_to_gfa_mm(x, y) return self.gfa_mm_to_focal_mm(gx, gy) def get_petal(petal_id): gfa_num = petal_id_to_gfa_num[petal_id] # datadir = resource_filename('desi_commish', 'data') fn = os.path.join(datadir, 'petal-metrology-json', 'petal%i.json' % petal_id) J = json.load(open(fn)) Fids = fits_table() Fids.name = [] Fids.petal_id = [] Fids.device_loc = [] Fids.xyz = np.zeros((len(J),4,3), np.float32) for i,(k,v) in enumerate(J.items()): Fids.name.append(k) Fids.petal_id.append(v['petal_id']) Fids.device_loc.append(v['device_loc']) for ipin in range(4): vv = v['pinhole%i' % (ipin+1)] Fids.xyz[i, ipin, 0] = vv['x'] Fids.xyz[i, ipin, 1] = vv['y'] Fids.xyz[i, ipin, 2] = vv['z'] Fids.to_np_arrays() Fids.x = Fids.xyz[:,:,0] Fids.y = Fids.xyz[:,:,1] Fids.z = Fids.xyz[:,:,2] ## MAGIC numbers 541,542 are from DESI-0530 table "Positioners and Fiducial Locations" Fids.gif_num = np.array([{541:1, 542:2}.get(d,0) for d in Fids.device_loc]) Fids.is_gif = np.array([d in [541, 542] for d in Fids.device_loc]) Fids.is_fif = np.logical_not(Fids.is_gif) fn = os.path.join(datadir, 'gfa-metrology-transforms.fits') T = fits_table(fn) I = np.flatnonzero(T.gfa_num == gfa_num) assert(len(I) == 1) Ti = T[I[0]] petal = PetalMetrology(Fids, Ti) return petal
{"hexsha": "6da73accb4e60c4568cd6ba3b8139e4f054f6760", "size": 4559, "ext": "py", "lang": "Python", "max_stars_repo_path": "petal_metrology.py", "max_stars_repo_name": "dstndstn/desi-commish", "max_stars_repo_head_hexsha": "71d95c0e20a1a730dbd75bdd1731c9baace6a0ed", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "petal_metrology.py", "max_issues_repo_name": "dstndstn/desi-commish", "max_issues_repo_head_hexsha": "71d95c0e20a1a730dbd75bdd1731c9baace6a0ed", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "petal_metrology.py", "max_forks_repo_name": "dstndstn/desi-commish", "max_forks_repo_head_hexsha": "71d95c0e20a1a730dbd75bdd1731c9baace6a0ed", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4413793103, "max_line_length": 90, "alphanum_fraction": 0.5292827374, "include": true, "reason": "import numpy", "num_tokens": 1593}
module CUTENSOR using ..APIUtils using ..CUDA using ..CUDA: CUstream, cudaDataType using ..CUDA: libcutensor, @retry_reclaim using CEnum: @cenum const cudaDataType_t = cudaDataType # core library include("libcutensor_common.jl") include("error.jl") include("libcutensor.jl") # low-level wrappers include("tensor.jl") include("wrappers.jl") # high-level integrations include("interfaces.jl") # cache for created, but unused handles const idle_handles = HandleCache{CuContext,Base.RefValue{cutensorHandle_t}}() function handle() cuda = CUDA.active_state() # every task maintains library state per device LibraryState = @NamedTuple{handle::Base.RefValue{cutensorHandle_t}} states = get!(task_local_storage(), :CUTENSOR) do Dict{CuContext,LibraryState}() end::Dict{CuContext,LibraryState} # get library state @noinline function new_state(cuda) new_handle = pop!(idle_handles, cuda.context) do handle = Ref{cutensorHandle_t}() cutensorInit(handle) handle end finalizer(current_task()) do task push!(idle_handles, cuda.context, new_handle) do # CUTENSOR doesn't need to actively destroy its handle end end (; handle=new_handle) end state = get!(states, cuda.context) do new_state(cuda) end return state.handle end end
{"hexsha": "1b92ab717a684932f320c2927a5e58f65833f7d4", "size": 1405, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "lib/cutensor/CUTENSOR.jl", "max_stars_repo_name": "eschnett/CUDA.jl", "max_stars_repo_head_hexsha": "717a0d55cdbe80cd1d135cf8710cb1263cf8829d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-27T19:05:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-27T19:05:16.000Z", "max_issues_repo_path": "lib/cutensor/CUTENSOR.jl", "max_issues_repo_name": "eschnett/CUDA.jl", "max_issues_repo_head_hexsha": "717a0d55cdbe80cd1d135cf8710cb1263cf8829d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/cutensor/CUTENSOR.jl", "max_forks_repo_name": "eschnett/CUDA.jl", "max_forks_repo_head_hexsha": "717a0d55cdbe80cd1d135cf8710cb1263cf8829d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-24T14:38:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-24T14:38:02.000Z", "avg_line_length": 22.6612903226, "max_line_length": 77, "alphanum_fraction": 0.6790035587, "num_tokens": 346}
"""Define trials for the experiment.""" import numpy as np def gen_trial(rng, nsamples): """Generate trials for a participant. A trial consists out of `nsamples` samples, which are digits between 1 and 9 that are of two colors - here indicated by the sign (negative or positive). Each trial contains exactly ``nsamples/2`` samples of each color. Parameters ---------- rng : np.random.Generator The random number generator object based on which to generate the trials. nsamples : int The number of digits shown per trial. Returns ------- color_samples : np.ndarray, shape(nsamples,) Samples for this trial. """ # Digits from 1 to 9 digits = np.arange(1, 10) # Half of samples are red, other half of samples are blue # all drawn from uniform distribution samples = rng.choice(digits, nsamples, replace=True) colors = rng.choice([-1, 1] * int(nsamples / 2), nsamples, replace=False) # Negative samples are red, positive samples are blue, see: get_digit_stims color_samples = samples * colors return color_samples def gen_trials(n_trials, nsamples, prop_regen=0, seed=None): """Generate multiple trials. Parameters ---------- n_trials : int The number of trials to generate. nsamples : int The number of digits shown per trial. prop_regen : float between 0 and 1 The proportion of trials to regenerate. Will select the given proportion based on trials sorted by difficulty difference between single and dual stream. This parameter defaults to 0 (do not generate any trials), but could be used to help avoid trials that are very different between single and dual stream conditions in terms of difficulty (based on expected value difference between options). seed : int | None The seed for the random number generator. Returns ------- trials : np.ndarray, shape(n_trials, nsamples) The generated trials, with nsamples samples each. """ assert prop_regen >= 0 and prop_regen <= 1, "`prop_regen` must be between 0 and 1." rng = np.random.default_rng(seed) trials = np.nan * np.zeros((n_trials, nsamples)) for itrial in range(n_trials): trials[itrial, ...] = gen_trial(rng, nsamples) # Re-generate a proportion of trials where difficulty difference # between single and dual stream tasks is highest difficulties_diffs = calc_trial_difficulty_diffs(trials) idxs_descending = np.argsort(difficulties_diffs)[::-1] n_regen = int(np.round(n_trials * prop_regen)) idxs_regen = idxs_descending[0:n_regen] for idx in idxs_regen: trials[idx, ...] = gen_trial(rng, nsamples) return trials def calc_trial_difficulty_diffs(trials): """Calculate difficulty diffference of each trial between single and dual stream. Parameters ---------- trials : np.ndarray, shape(n_trials, nsamples) The trials to calculate difficulty differences for. Returns ------- difficulties_diffs : np.ndarray, shape(n_trials,) The difficulty differences of the trials. """ midpoint = 5 difficulties_diffs = np.nan * np.zeros(trials.shape[0]) for itrial, trial in enumerate(trials): digits = np.abs(trial) colors = np.sign(trial) # Calc difficulty of single/dual task by means of "expected value difference" ev_diff_single = np.abs(midpoint - digits.mean()) ev_diff_dual = np.abs(digits[colors < 0].mean() - digits[colors > 0].mean()) difficulties_diffs[itrial] = np.abs(ev_diff_single - ev_diff_dual) return difficulties_diffs def evaluate_trial_correct(trial, choice, stream): """Evaluate whether a choice was correct for a trial, given a task type (stream). In case the trial does not have an objectively correct choice (ambiguous trials), the correctness will be determined randomly with a draw from a uniform distribution. If the choice is "n/a", the correctness will be "n/a" as well; but the ambiguity of the trial (True/False) will still be determined. Parameters ---------- trial : np.ndarray The samples in this trial, negative 1 to 9 and positive 1 to 9. the sign of each sample determines which of two "colors" in the stream it belonged to. Negative samples are red, positive samples are blue. choice : {"lower", "higher", "blue", "red", "n/a"} The choice the participant made. lower/higher relate to single stream, blue/red relate to dual stream. Choices that are "n/a" are due to a slow response of participants and will result in a correctness of "n/a". stream : {"single", "dual"} The task (stream) that the trial and choice are from. Returns ------- correct : bool | "n/a" Whether or not the choice in this trial was correct, given the stream. (Determined randomly if ambiguous is True) ambiguous : bool Whether or not the trial was ambiguous (not possible to objectively determine correctness). """ set_correct_na = False if choice == "n/a": # Set choice to an arbitrary but valid value to evaluate `ambiguous` variable # but set `correct` to "n/a" later set_correct_na = True choice = {"single": "lower", "dual": "red"}[stream] # arbitrary digits = np.abs(trial) # correct True/False is determined randomly for ambiguous trials ambiguous = True rng = np.random.default_rng() correct = rng.choice([True, False]) if stream == "single": assert choice in ["lower", "higher"] midpoint = 5 # Can only evaluate correctness for non-ambiguous trials if digits.mean() != midpoint: ambiguous = False correct = False correct_higher = (digits.mean() > midpoint) and (choice == "higher") correct_lower = (digits.mean() < midpoint) and (choice == "lower") if correct_higher or correct_lower: correct = True else: assert stream == "dual" assert choice in ["blue", "red"] colors = np.sign(trial) mean_red = digits[colors < 0].mean() mean_blue = digits[colors > 0].mean() # Can only evaluate correctness for non-ambiguous trials if mean_red != mean_blue: ambiguous = False correct = False correct_red = (mean_red > mean_blue) and (choice == "red") correct_blue = (mean_red < mean_blue) and (choice == "blue") if correct_red or correct_blue: correct = True if set_correct_na: correct = "n/a" return correct, ambiguous
{"hexsha": "65ad98adcb0657be0cc077db4fd552860a6a26f6", "size": 6798, "ext": "py", "lang": "Python", "max_stars_repo_path": "ecomp_experiment/define_trials.py", "max_stars_repo_name": "sappelhoff/ecomp_experiment", "max_stars_repo_head_hexsha": "6f5ae81d6fd1fe55b876d84badc0f5bccd8ced03", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ecomp_experiment/define_trials.py", "max_issues_repo_name": "sappelhoff/ecomp_experiment", "max_issues_repo_head_hexsha": "6f5ae81d6fd1fe55b876d84badc0f5bccd8ced03", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ecomp_experiment/define_trials.py", "max_forks_repo_name": "sappelhoff/ecomp_experiment", "max_forks_repo_head_hexsha": "6f5ae81d6fd1fe55b876d84badc0f5bccd8ced03", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.1595744681, "max_line_length": 88, "alphanum_fraction": 0.6500441306, "include": true, "reason": "import numpy", "num_tokens": 1566}
import glob import os import mmcv import numpy as np from tqdm import tqdm from mmhuman3d.core.conventions.keypoints_mapping import convert_kps from mmhuman3d.data.data_structures.human_data import HumanData from .base_converter import BaseModeConverter from .builder import DATA_CONVERTERS @DATA_CONVERTERS.register_module() class PosetrackConverter(BaseModeConverter): """PoseTrack18 dataset `Posetrack: A benchmark for human pose estimation and tracking' CVPR'2018 More details can be found in the `paper <https://arxiv.org/abs/1710.10000>`_ . """ ACCEPTED_MODES = ['val', 'train'] def convert_by_mode(self, dataset_path: str, out_path: str, mode: str) -> dict: """ Args: dataset_path (str): Path to directory where raw images and annotations are stored. out_path (str): Path to directory to save preprocessed npz file mode (str): Mode in accepted modes Returns: dict: A dict containing keys image_path, bbox_xywh, keypoints2d, keypoints2d_mask stored in HumanData() format """ # use HumanData to store all data human_data = HumanData() # structs we use image_path_, bbox_xywh_, keypoints2d_ = [], [], [] # training mode ann_folder = os.path.join( dataset_path, 'posetrack_data/annotations/{}/*.json'.format(mode)) ann_files = sorted(glob.glob(ann_folder)) for ann_file in tqdm(ann_files): json_data = mmcv.load(ann_file) counter = 0 for im, ann in zip(json_data['images'], json_data['annotations']): # sample every 10 image and check image is labelled if counter % 10 != 0 and not im['is_labeled']: continue keypoints2d = np.array(ann['keypoints']).reshape(17, 3) keypoints2d[keypoints2d[:, 2] > 0, 2] = 1 # check if all major body joints are annotated if sum(keypoints2d[5:, 2] > 0) < 12: continue image_path = im['file_name'] image_abs_path = os.path.join(dataset_path, image_path) if not os.path.exists(image_abs_path): print('{} does not exist!'.format(image_abs_path)) continue counter += 1 bbox_xywh = np.array(ann['bbox']) # store data image_path_.append(image_path) keypoints2d_.append(keypoints2d) bbox_xywh_.append(bbox_xywh) # convert keypoints bbox_xywh_ = np.array(bbox_xywh_).reshape((-1, 4)) bbox_xywh_ = np.hstack([bbox_xywh_, np.ones([bbox_xywh_.shape[0], 1])]) keypoints2d_ = np.array(keypoints2d_).reshape((-1, 17, 3)) keypoints2d_, mask = convert_kps(keypoints2d_, 'posetrack', 'human_data') human_data['image_path'] = image_path_ human_data['bbox_xywh'] = bbox_xywh_ human_data['keypoints2d_mask'] = mask human_data['keypoints2d'] = keypoints2d_ human_data['config'] = 'posetrack' human_data.compress_keypoints_by_mask() # store the data struct if not os.path.isdir(out_path): os.makedirs(out_path) out_file = os.path.join(out_path, 'posetrack_{}.npz'.format(mode)) human_data.dump(out_file)
{"hexsha": "56a3d6e725197ec22b9abcb00d4cb7c9fe7eb250", "size": 3514, "ext": "py", "lang": "Python", "max_stars_repo_path": "mmhuman3d/data/data_converters/posetrack.py", "max_stars_repo_name": "ttxskk/mmhuman3d", "max_stars_repo_head_hexsha": "f6d39e24a2d5cc216448fc3bd82832ff45eee436", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-03T04:17:52.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-03T04:17:52.000Z", "max_issues_repo_path": "mmhuman3d/data/data_converters/posetrack.py", "max_issues_repo_name": "wmj142326/mmhuman3d", "max_issues_repo_head_hexsha": "f107203714f9627a9308d4515d35ab8fbd0074a4", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mmhuman3d/data/data_converters/posetrack.py", "max_forks_repo_name": "wmj142326/mmhuman3d", "max_forks_repo_head_hexsha": "f107203714f9627a9308d4515d35ab8fbd0074a4", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9894736842, "max_line_length": 79, "alphanum_fraction": 0.5953329539, "include": true, "reason": "import numpy", "num_tokens": 832}
\documentclass[12pt]{article} %\usepackage{fullpage} %\usepackage[top=1in, bottom=1in, left=1in, left=1in, right=1in]{geometry} \usepackage[margin=1in, paperwidth=8.5in, paperheight=11in]{geometry} \usepackage{graphicx} \usepackage{subcaption} \usepackage{listings} \usepackage{color} \definecolor{dkgreen}{rgb}{0,0.6,0} \definecolor{gray}{rgb}{0.5,0.5,0.5} \definecolor{mauve}{rgb}{0.58,0,0.82} \lstset{frame=tb, language=Java, aboveskip=3mm, belowskip=3mm, showstringspaces=false, columns=flexible, basicstyle={\small\ttfamily}, numbers=none, numberstyle=\tiny\color{gray}, keywordstyle=\color{blue}, commentstyle=\color{dkgreen}, stringstyle=\color{mauve}, breaklines=true, breakatwhitespace=true, tabsize=3 } \begin{document} \title{UT Machine Learning: HomeWork 2} \author{Mohamad amin Katebsaber} \date{\today} \maketitle \section{Problem 5 summary} Figure \ref{fig:mean_nvp} illustrates the distribution of NVP feature which is considered as regression target in our problem set. According to this plot one can deduct that most of our data can almost be clustered in two categories. This deduction proves that we can plot the whole data using PCA technique. \begin{figure}[h!] \centering \begin{subfigure}[b]{0.7\linewidth} \includegraphics[width=\linewidth]{./plots/mean_NVP.png} \end{subfigure} \caption{Distribution of NVP field} \label{fig:mean_nvp} \end{figure} Figure \ref{fig:pca} shows the data plotted using two calculated components of PCA. Observations prove our initial hypothesis that the data is clustered in two groups; Subsequently there is a possibility to address this problem in a classification framework but as the question demands, we continue in a regression framework. As the question states, we have to use Multivariate linear regression, Ridge and LASSO. Results from applying aforementioned algorithms (with $\alpha$ parameters: $100 for Ridge and 0.001 for LASSO$) on the whole dataset is shown in following block: \begin{lstlisting} Multivariate linear regression model Number of used coefficients: 219 R squared Score: -2.858706063278099e+21 Mean Absolute Error: 1055112737.1771085 Mean Squared Error: 5.7555691297568686e+20 Root Mean Squared Error: 23990767244.414818 Ridge regression model Number of used coefficients: 219 R squared test Score: 0.5793774720087499 Mean Absolute Error: 0.24082586437752515 Mean Squared Error: 0.08468593775642133 Root Mean Squared Error: 0.2910084839938886 Lasso regression model Number of used coefficients: 63 R squared test Score: 0.6744580726645737 Mean Absolute Error: 0.18791821976331105 Mean Squared Error: 0.0655429073832366 Root Mean Squared Error: 0.25601349062742107 \end{lstlisting} \begin{figure}[h!] \centering \begin{subfigure}[b]{0.7\linewidth} \includegraphics[width=\linewidth]{./plots/c1-c2.png} \end{subfigure} \caption{PCA with two components over data} \label{fig:pca} \end{figure} Next block of results, shows the outputs of applying algorithms on initial 50 samples of dataset. \begin{lstlisting} Multivariate linear regression model Number of used coefficients: 125 R squared Score: -0.0865482085770819 Mean Absolute Error: 0.3661537717841558 Mean Squared Error: 0.2187599280531736 Root Mean Squared Error: 0.46771778676160436 Ridge regression model Number of used coefficients: 120 R squared test Score: 0.09748593118386495 Mean Absolute Error: 0.39903892985285944 Mean Squared Error: 0.18170745780322944 Root Mean Squared Error: 0.42627157752215833 Lasso regression model Number of used coefficients: 51 R squared test Score: 0.1530951544250253 Mean Absolute Error: 0.3099673082411051 Mean Squared Error: 0.17051138791944567 Root Mean Squared Error: 0.41293024582784643 \end{lstlisting} A glimpse to the results (Total samples and 50 initial samples), demonstrate the superiority of Lasso over Ridge and Multivariate regression since the R squared score is higher in this model. However a closer look, shows that the Lasso model was able to achieve this comparable result by only utilizing approximately $40\%$ of coefficients used by other models (Compare number of used coefficients in the result boxes). Addition of feature products (P1*P2, P1*P3, P1*P4 and etc) as new features, increased R squared score about $14\%$ -training on the whole dataset- as shown in following box. \begin{lstlisting} Multivariate linear regression model Number of used coefficients: 12347 R squared Score: -8.213881354439473e+18 Mean Absolute Error: 435603297.01152885 Mean Squared Error: 1.6537398708591306e+18 Root Mean Squared Error: 1285978176.6651917 Ridge regression model Number of used coefficients: 12109 R squared test Score: 0.7152238542745143 Mean Absolute Error: 0.18155617142118535 Mean Squared Error: 0.05733533832958122 Root Mean Squared Error: 0.239447986689346 Lasso regression model Number of used coefficients: 282 R squared test Score: 0.7982564852964698 Mean Absolute Error: 0.1384875055133704 Mean Squared Error: 0.040617983089341905 Root Mean Squared Error: 0.20153903614273316 \end{lstlisting} Figure \ref{fig:benchmark} demonstrates the overall performance of models. \begin{figure}[h!] \centering \begin{subfigure}[b]{0.4\linewidth} \includegraphics[width=\linewidth]{./plots/actual_predicted_difference.png} \caption{Prediction using models trained on the whole dataset} \end{subfigure} \begin{subfigure}[b]{0.4\linewidth} \includegraphics[width=\linewidth]{./plots/actual_predicted_difference_50.png} \caption{Prediction using models trained on 50 initial records} \end{subfigure} \caption{Model benchmarks: X axis is the sample identifier of the record in dataset and Y axis illustrates the real and predicted value for the record. Note that NVP values are normalized between 0 and 1.} \label{fig:benchmark} \end{figure} \end{document}
{"hexsha": "2c0ff4c8a7a3529938fdcbc4b777eb56c7d3305d", "size": 5890, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "HW2/P5/Summary/details.tex", "max_stars_repo_name": "katebsaber96/UT-Machine-Learning-2019", "max_stars_repo_head_hexsha": "e6330266a7927c00024f2d1c862bfe52d2656ff4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "HW2/P5/Summary/details.tex", "max_issues_repo_name": "katebsaber96/UT-Machine-Learning-2019", "max_issues_repo_head_hexsha": "e6330266a7927c00024f2d1c862bfe52d2656ff4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "HW2/P5/Summary/details.tex", "max_forks_repo_name": "katebsaber96/UT-Machine-Learning-2019", "max_forks_repo_head_hexsha": "e6330266a7927c00024f2d1c862bfe52d2656ff4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0595238095, "max_line_length": 419, "alphanum_fraction": 0.7889643463, "num_tokens": 1702}
# Copyright (c) 2012-2014, Max Zwiessele # Licensed under the BSD 3-clause license (see LICENSE.txt) class StochasticStorage(object): ''' This is a container for holding the stochastic parameters, such as subset indices or step length and so on. ''' def __init__(self, model): """ Initialize this stochastic container using the given model """ def do_stochastics(self): """ Update the internal state to the next batch of the stochastic descent algorithm. """ pass def reset(self): """ Reset the state of this stochastics generator. """ class SparseGPMissing(StochasticStorage): def __init__(self, model, batchsize=1): """ Here we want to loop over all dimensions everytime. Thus, we can just make sure the loop goes over self.d every time. """ self.d = xrange(model.Y_normalized.shape[1]) class SparseGPStochastics(StochasticStorage): """ For the sparse gp we need to store the dimension we are in, and the indices corresponding to those """ def __init__(self, model, batchsize=1): self.batchsize = batchsize self.output_dim = model.Y.shape[1] self.reset() self.do_stochastics() def do_stochastics(self): if self.batchsize == 1: self.current_dim = (self.current_dim+1)%self.output_dim self.d = [self.current_dim] else: import numpy as np self.d = np.random.choice(self.output_dim, size=self.batchsize, replace=False) def reset(self): self.current_dim = -1 self.d = None
{"hexsha": "dc71d53934dc41daa27b12ee1115155fc44475e3", "size": 1685, "ext": "py", "lang": "Python", "max_stars_repo_path": "GPy/inference/optimization/stochastics.py", "max_stars_repo_name": "strongh/GPy", "max_stars_repo_head_hexsha": "775ce9e64c1e8f472083b8f2430134047d97b2fa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-08-06T13:47:10.000Z", "max_stars_repo_stars_event_max_datetime": "2015-08-06T13:47:10.000Z", "max_issues_repo_path": "GPy/inference/optimization/stochastics.py", "max_issues_repo_name": "strongh/GPy", "max_issues_repo_head_hexsha": "775ce9e64c1e8f472083b8f2430134047d97b2fa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GPy/inference/optimization/stochastics.py", "max_forks_repo_name": "strongh/GPy", "max_forks_repo_head_hexsha": "775ce9e64c1e8f472083b8f2430134047d97b2fa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-09T01:31:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-09T01:31:17.000Z", "avg_line_length": 29.5614035088, "max_line_length": 90, "alphanum_fraction": 0.6189910979, "include": true, "reason": "import numpy", "num_tokens": 382}
[STATEMENT] lemma steps_z_beta_complete': "A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>* \<langle>l',Z'\<rangle> \<Longrightarrow> valid_abstraction A X k \<Longrightarrow> Z \<subseteq> V \<Longrightarrow> Z' \<noteq> {} \<Longrightarrow> \<exists> Z''. A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l',Z''\<rangle> \<and> Z'' \<noteq> {}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>* \<langle>l', Z'\<rangle>; valid_abstraction A X (\<lambda>x. real (k x)); Z \<subseteq> V; Z' \<noteq> {}\<rbrakk> \<Longrightarrow> \<exists>Z''. A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l', Z''\<rangle> \<and> Z'' \<noteq> {} [PROOF STEP] using steps_z_beta_complete [PROOF STATE] proof (prove) using this: \<lbrakk>?A \<turnstile> \<langle>?l, ?Z\<rangle> \<leadsto>* \<langle>?l', ?Z'\<rangle>; valid_abstraction ?A X (\<lambda>x. real (k x)); ?Z \<subseteq> V\<rbrakk> \<Longrightarrow> \<exists>Z''. ?A \<turnstile> \<langle>?l, ?Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>?l', Z''\<rangle> \<and> ?Z' \<subseteq> Z'' goal (1 subgoal): 1. \<lbrakk>A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>* \<langle>l', Z'\<rangle>; valid_abstraction A X (\<lambda>x. real (k x)); Z \<subseteq> V; Z' \<noteq> {}\<rbrakk> \<Longrightarrow> \<exists>Z''. A \<turnstile> \<langle>l, Z\<rangle> \<leadsto>\<^sub>\<beta>* \<langle>l', Z''\<rangle> \<and> Z'' \<noteq> {} [PROOF STEP] by fast
{"llama_tokens": 597, "file": "Timed_Automata_Approx_Beta", "length": 2}
[STATEMENT] lemma fwi_len: "\<exists> ys. set ys \<subseteq> set xs \<union> {k} \<and> len (fwi m n k n n) i j xs = len m i j ys" if "i \<le> n" "j \<le> n" "k \<le> n" "m k k \<ge> 0" "set xs \<subseteq> {0..n}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<exists>ys. set ys \<subseteq> set xs \<union> {k} \<and> len (fwi m n k n n) i j xs = len m i j ys [PROOF STEP] using that [PROOF STATE] proof (prove) using this: i \<le> n j \<le> n k \<le> n (0::'a) \<le> m k k set xs \<subseteq> {0..n} goal (1 subgoal): 1. \<exists>ys. set ys \<subseteq> set xs \<union> {k} \<and> len (fwi m n k n n) i j xs = len m i j ys [PROOF STEP] proof (induction xs arbitrary: i) [PROOF STATE] proof (state) goal (2 subgoals): 1. \<And>i. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set [] \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set [] \<union> {k} \<and> len (fwi m n k n n) i j [] = len m i j ys 2. \<And>a xs i. \<lbrakk>\<And>i. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set xs \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set xs \<union> {k} \<and> len (fwi m n k n n) i j xs = len m i j ys; i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set (a # xs) \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set (a # xs) \<union> {k} \<and> len (fwi m n k n n) i j (a # xs) = len m i j ys [PROOF STEP] case Nil [PROOF STATE] proof (state) this: i \<le> n j \<le> n k \<le> n (0::'a) \<le> m k k set [] \<subseteq> {0..n} goal (2 subgoals): 1. \<And>i. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set [] \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set [] \<union> {k} \<and> len (fwi m n k n n) i j [] = len m i j ys 2. \<And>a xs i. \<lbrakk>\<And>i. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set xs \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set xs \<union> {k} \<and> len (fwi m n k n n) i j xs = len m i j ys; i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set (a # xs) \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set (a # xs) \<union> {k} \<and> len (fwi m n k n n) i j (a # xs) = len m i j ys [PROOF STEP] then [PROOF STATE] proof (chain) picking this: i \<le> n j \<le> n k \<le> n (0::'a) \<le> m k k set [] \<subseteq> {0..n} [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: i \<le> n j \<le> n k \<le> n (0::'a) \<le> m k k set [] \<subseteq> {0..n} goal (1 subgoal): 1. \<exists>ys. set ys \<subseteq> set [] \<union> {k} \<and> len (fwi m n k n n) i j [] = len m i j ys [PROOF STEP] apply (simp add: fwi_step') [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> {k} \<and> min (m i j) (m i k + m k j) = len m i j ys [PROOF STEP] unfolding min_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> {k} \<and> (if m i j \<le> m i k + m k j then m i j else m i k + m k j) = len m i j ys [PROOF STEP] apply (clarsimp; safe) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; m i j \<le> m i k + m k j\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> {k} \<and> m i j = len m i j ys 2. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; \<not> m i j \<le> m i k + m k j\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> {k} \<and> m i k + m k j = len m i j ys [PROOF STEP] apply (rule exI[where x = "[]"]; simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; \<not> m i j \<le> m i k + m k j\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> {k} \<and> m i k + m k j = len m i j ys [PROOF STEP] by (rule exI[where x = "[k]"]; simp) [PROOF STATE] proof (state) this: \<exists>ys. set ys \<subseteq> set [] \<union> {k} \<and> len (fwi m n k n n) i j [] = len m i j ys goal (1 subgoal): 1. \<And>a xs i. \<lbrakk>\<And>i. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set xs \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set xs \<union> {k} \<and> len (fwi m n k n n) i j xs = len m i j ys; i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set (a # xs) \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set (a # xs) \<union> {k} \<and> len (fwi m n k n n) i j (a # xs) = len m i j ys [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>a xs i. \<lbrakk>\<And>i. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set xs \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set xs \<union> {k} \<and> len (fwi m n k n n) i j xs = len m i j ys; i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set (a # xs) \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set (a # xs) \<union> {k} \<and> len (fwi m n k n n) i j (a # xs) = len m i j ys [PROOF STEP] case (Cons x xs) [PROOF STATE] proof (state) this: \<lbrakk>?i1 \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set xs \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set xs \<union> {k} \<and> len (fwi m n k n n) ?i1 j xs = len m ?i1 j ys i \<le> n j \<le> n k \<le> n (0::'a) \<le> m k k set (x # xs) \<subseteq> {0..n} goal (1 subgoal): 1. \<And>a xs i. \<lbrakk>\<And>i. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set xs \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set xs \<union> {k} \<and> len (fwi m n k n n) i j xs = len m i j ys; i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set (a # xs) \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set (a # xs) \<union> {k} \<and> len (fwi m n k n n) i j (a # xs) = len m i j ys [PROOF STEP] then [PROOF STATE] proof (chain) picking this: \<lbrakk>?i1 \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set xs \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set xs \<union> {k} \<and> len (fwi m n k n n) ?i1 j xs = len m ?i1 j ys i \<le> n j \<le> n k \<le> n (0::'a) \<le> m k k set (x # xs) \<subseteq> {0..n} [PROOF STEP] obtain ys where "set ys \<subseteq> set xs \<union> {k}" "len (fwi m n k n n) x j xs = len m x j ys" [PROOF STATE] proof (prove) using this: \<lbrakk>?i1 \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set xs \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set xs \<union> {k} \<and> len (fwi m n k n n) ?i1 j xs = len m ?i1 j ys i \<le> n j \<le> n k \<le> n (0::'a) \<le> m k k set (x # xs) \<subseteq> {0..n} goal (1 subgoal): 1. (\<And>ys. \<lbrakk>set ys \<subseteq> set xs \<union> {k}; len (fwi m n k n n) x j xs = len m x j ys\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by force [PROOF STATE] proof (state) this: set ys \<subseteq> set xs \<union> {k} len (fwi m n k n n) x j xs = len m x j ys goal (1 subgoal): 1. \<And>a xs i. \<lbrakk>\<And>i. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set xs \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set xs \<union> {k} \<and> len (fwi m n k n n) i j xs = len m i j ys; i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set (a # xs) \<subseteq> {0..n}\<rbrakk> \<Longrightarrow> \<exists>ys. set ys \<subseteq> set (a # xs) \<union> {k} \<and> len (fwi m n k n n) i j (a # xs) = len m i j ys [PROOF STEP] with Cons.prems [PROOF STATE] proof (chain) picking this: i \<le> n j \<le> n k \<le> n (0::'a) \<le> m k k set (x # xs) \<subseteq> {0..n} set ys \<subseteq> set xs \<union> {k} len (fwi m n k n n) x j xs = len m x j ys [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: i \<le> n j \<le> n k \<le> n (0::'a) \<le> m k k set (x # xs) \<subseteq> {0..n} set ys \<subseteq> set xs \<union> {k} len (fwi m n k n n) x j xs = len m x j ys goal (1 subgoal): 1. \<exists>ys. set ys \<subseteq> set (x # xs) \<union> {k} \<and> len (fwi m n k n n) i j (x # xs) = len m i j ys [PROOF STEP] apply (simp add: fwi_step') [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; x \<le> n \<and> set xs \<subseteq> {0..n}; set ys \<subseteq> insert k (set xs); len (fwi m n k n n) x j xs = len m x j ys\<rbrakk> \<Longrightarrow> \<exists>ysa. set ysa \<subseteq> insert k (insert x (set xs)) \<and> min (m i x) (m i k + m k x) + len m x j ys = len m i j ysa [PROOF STEP] unfolding min_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; x \<le> n \<and> set xs \<subseteq> {0..n}; set ys \<subseteq> insert k (set xs); len (fwi m n k n n) x j xs = len m x j ys\<rbrakk> \<Longrightarrow> \<exists>ysa. set ysa \<subseteq> insert k (insert x (set xs)) \<and> (if m i x \<le> m i k + m k x then m i x else m i k + m k x) + len m x j ys = len m i j ysa [PROOF STEP] apply (clarsimp; safe) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set ys \<subseteq> insert k (set xs); len (fwi m n k n n) x j xs = len m x j ys; x \<le> n; set xs \<subseteq> {0..n}; m i x \<le> m i k + m k x\<rbrakk> \<Longrightarrow> \<exists>ysa. set ysa \<subseteq> insert k (insert x (set xs)) \<and> m i x + len m x j ys = len m i j ysa 2. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set ys \<subseteq> insert k (set xs); len (fwi m n k n n) x j xs = len m x j ys; x \<le> n; set xs \<subseteq> {0..n}; \<not> m i x \<le> m i k + m k x\<rbrakk> \<Longrightarrow> \<exists>ysa. set ysa \<subseteq> insert k (insert x (set xs)) \<and> m i k + m k x + len m x j ys = len m i j ysa [PROOF STEP] apply (rule exI[where x = "x # ys"]; auto; fail) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>i \<le> n; j \<le> n; k \<le> n; (0::'a) \<le> m k k; set ys \<subseteq> insert k (set xs); len (fwi m n k n n) x j xs = len m x j ys; x \<le> n; set xs \<subseteq> {0..n}; \<not> m i x \<le> m i k + m k x\<rbrakk> \<Longrightarrow> \<exists>ysa. set ysa \<subseteq> insert k (insert x (set xs)) \<and> m i k + m k x + len m x j ys = len m i j ysa [PROOF STEP] by (rule exI[where x = "k # x # ys"]; auto simp: add.assoc) [PROOF STATE] proof (state) this: \<exists>ys. set ys \<subseteq> set (x # xs) \<union> {k} \<and> len (fwi m n k n n) i j (x # xs) = len m i j ys goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 4938, "file": "Floyd_Warshall_Floyd_Warshall", "length": 23}
import numpy as np import tensorflow as tf from baselines.a2c.utils import conv, conv_without_bias, fc, conv_to_fc, batch_to_seq, seq_to_batch, lstm, lnlstm, mse, cat_entropy from baselines.common.distributions import make_pdtype def nature_cnn_h3(unscaled_images, first_layer_mode='', trainable=True, conv1_fn=lambda x: x): """ CNN from Nature paper. """ scaled_images = tf.cast(unscaled_images, tf.float32) / 255. activ = tf.nn.relu print("scaled_images: {}".format(scaled_images)) if first_layer_mode == 'Share': assert False # input_activations = [] # for start in range(3): # input_images = scaled_images[..., start:start+2] # assert input_images.get_shape()[-1] == 2 # Should be a pair of frames. # h = activ(conv(input_images, 'c1_2_frame_input', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), reuse=start!=0, trainable=trainable)) # input_activations.append(h) # assert len(input_activations) == 3 # Should have 3 pairs of frames. # h = (1. / 3.) * tf.add_n(input_activations, name='c1') # Average the activations of the three pairs of frames. elif first_layer_mode == '2Frame': input_images = scaled_images[..., -2:] assert input_images.get_shape()[-1] == 2 # Should be a pair of frames. h = activ(conv(input_images, 'c1_2_frame_input', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), trainable=trainable)) else: assert False # scaled_images = scaled_images[..., -2:] # print("scaled_images: {}".format(scaled_images)) # h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), trainable=trainable)) # print('scaled_images: {}'.format(scaled_images.get_shape())) h = conv1_fn(h) h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), trainable=trainable)) _h3 = conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), trainable=trainable) h3 = activ(_h3) return h3, _h3 def nature_cnn(unscaled_images, first_layer_mode='', trainable=True): h3, _h3 = nature_cnn_h3(unscaled_images, first_layer_mode, trainable) h3 = conv_to_fc(h3) return tf.nn.relu(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2), trainable=trainable)) class LnLstmPolicy(object): def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, nlstm=256, reuse=False): nenv = nbatch // nsteps nh, nw, nc = ob_space.shape ob_shape = (nbatch, nh, nw, nc) nact = ac_space.n X = tf.placeholder(tf.uint8, ob_shape) #obs M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1) S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states with tf.variable_scope("model", reuse=reuse): h = nature_cnn(X) xs = batch_to_seq(h, nenv, nsteps) ms = batch_to_seq(M, nenv, nsteps) h5, snew = lnlstm(xs, ms, S, 'lstm1', nh=nlstm) h5 = seq_to_batch(h5) pi = fc(h5, 'pi', nact) vf = fc(h5, 'v', 1) self.pdtype = make_pdtype(ac_space) self.pd = self.pdtype.pdfromflat(pi) v0 = vf[:, 0] a0 = self.pd.sample() neglogp0 = self.pd.neglogp(a0) self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32) def step(ob, state, mask): return sess.run([a0, v0, snew, neglogp0], {X:ob, S:state, M:mask}) def value(ob, state, mask): return sess.run(v0, {X:ob, S:state, M:mask}) self.X = X self.M = M self.S = S self.pi = pi self.vf = vf self.step = step self.value = value class LstmPolicy(object): def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, nlstm=256, reuse=False): nenv = nbatch // nsteps nh, nw, nc = ob_space.shape ob_shape = (nbatch, nh, nw, nc) nact = ac_space.n X = tf.placeholder(tf.uint8, ob_shape) #obs M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1) S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states with tf.variable_scope("model", reuse=reuse): h = nature_cnn(X) xs = batch_to_seq(h, nenv, nsteps) ms = batch_to_seq(M, nenv, nsteps) h5, snew = lstm(xs, ms, S, 'lstm1', nh=nlstm) h5 = seq_to_batch(h5) pi = fc(h5, 'pi', nact) vf = fc(h5, 'v', 1) self.pdtype = make_pdtype(ac_space) self.pd = self.pdtype.pdfromflat(pi) v0 = vf[:, 0] a0 = self.pd.sample() neglogp0 = self.pd.neglogp(a0) self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32) def step(ob, state, mask): return sess.run([a0, v0, snew, neglogp0], {X:ob, S:state, M:mask}) def value(ob, state, mask): return sess.run(v0, {X:ob, S:state, M:mask}) self.X = X self.M = M self.S = S self.pi = pi self.vf = vf self.step = step self.value = value class CnnPolicy(object): def __init__(self, sess, X, ob_space, ac_space, nbatch, nsteps, reuse=False, hparams=None): #pylint: disable=W0613 assert hparams != None nh, nw, nc = ob_space.shape ob_shape = (nbatch, nh, nw, nc) nact = ac_space.n self.X = X scope = hparams.get('_policy_scope', 'model') with tf.variable_scope(scope, reuse=reuse): # if '_teacher_h' in hparams: # h = hparams['_teacher_h'] # else: # h = nature_cnn(X, first_layer_mode=hparams['first_layer_mode'], trainable=hparams['base_trainable']) # for i in range(hparams['fc_depth']): # h = tf.nn.relu(fc(h, 'additional_fc{}'.format(i), nh=512, init_scale=np.sqrt(2))) h = nature_cnn(X, first_layer_mode=hparams['first_layer_mode'], trainable=hparams['base_trainable']) self.original_h = h if hparams['use_extra_path']: with tf.variable_scope('model_extrapath', reuse=reuse): notransfer_h = nature_cnn(X, first_layer_mode=hparams['first_layer_mode'], trainable=hparams['base_trainable']) print("notransfer_h: {}".format(notransfer_h)) print('original h: {}'.format(h)) concatenated_h = tf.concat([h, notransfer_h], axis=-1) print('concatenated_h: {}'.format(concatenated_h)) h = tf.nn.relu(fc(concatenated_h, 'extra_path_fc', nh=512, init_scale=np.sqrt(2), trainable=True)) print('final h: {}'.format(h)) self.h = h init_scales = [0.01, 1] if hparams.get('init_pi_v_zero'): init_scales = [0 for _ in init_scales] pi = fc(h, 'pi', nact, init_scale=init_scales[0]) vf = fc(h, 'v', 1, init_scale=init_scales[1])[:,0] self.pdtype = make_pdtype(ac_space) self.pd = self.pdtype.pdfromflat(pi) a0 = self.pd.sample() neglogp0 = self.pd.neglogp(a0) self.initial_state = None def step(ob, *_args, **_kwargs): a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob}) return a, v, neglogp def value(ob, *_args, **_kwargs): return sess.run(vf, {X:ob}) # copy_weights_op = None # def setup_copy_weights(): # nonlocal copy_weights_op # assert hparams.get('_src_scope') # layer_names = ['c1_2_frame_input', 'c2', 'c3', 'fc1', 'pi', 'v'] # layer_types = ['w', 'b'] # assert hparams['fc_depth'] == 0 # src_scope = hparams['_src_scope'] # assign_ops = [] # for layer_name in layer_names: # for layer_type in layer_types: # var_name = layer_name + '/' + layer_type + ':0' # dst_var_name = scope + '/' + var_name # src_var_name = src_scope + '/' + var_name # dst_var = None # src_var = None # for v in tf.global_variables(): # if v.name == dst_var_name: dst_var = v # if v.name == src_var_name: src_var = v # assert dst_var is not None # assert src_var is not None # assign_op = tf.assign(ref=dst_var, value=src_var) # assign_ops.append(assign_op) # copy_weights_op = tf.group(*assign_ops) # def copy_weights(): # sess.run(copy_weights_op) # self.copy_weights = copy_weights # self.setup_copy_weights = setup_copy_weights self.X = X self.pi = pi self.vf = vf self.step = step self.value = value class CnnAttentionPolicy(object): def _init_attention_20(self, conv1): if not self.hparams.get('attention_20'): return conv1 print("INIT ATTENTION 20!!") attention_logits, attention_selector = conv_without_bias(conv1, 'attention_logits_20', nf=1, rf=1, stride=1, trainable=True) attention_logits_20 = attention_logits orig_attention_shape = attention_logits.get_shape() attention_logits = tf.reshape(attention_logits, (self.nbatch, -1)) attention_weights = tf.nn.softmax(attention_logits / self.hparams['attention_temperature']) attention_weights = tf.reshape(attention_weights, orig_attention_shape) attention_weights_20 = attention_weights # self.context_20 = tf.reduce_sum(tf.reduce_sum(self.attention_weights_20 * conv1, axis=1), axis=1) # assert(len(self.context_20.shape) == 2) # assert(self.context_20.shape[-1] == 32) if self.reuse: tf.summary.image('predicted_attention_20', attention_weights_20, max_outputs=1) if self.hparams['predict_attention_only']: return conv1 if self.hparams['attention_skip']: branch1 = conv1 * attention_weights * 400.0 branch2 = conv1 return branch1 * self.hparams['attention_skip_c'] + (1. - self.hparams['attention_skip_c']) * branch2 return attention_logits_20, attention_weights_20, conv1 * attention_weights * 400.0 def _init_attention_20_main_branch(self, conv1): self.attention_logits_20, self.attention_weights_20, attention_weights = self._init_attention_20(conv1) return attention_weights def _init_attention_20_extrapath(self, conv1): self.extrapath_attention_logits_20, self.extrapath_attention_weights_20, attention_weights = self._init_attention_20(conv1) return attention_weights # def _init_attention(self, h3): # trainable = True # if self.hparams.get('attention_frozen'): trainable = False # init_scale = 0.0 if self.hparams.get('init_attention_zero') else 1.0 # attention_logits, attention_selector = conv_without_bias(h3, 'attention_logits', nf=1, rf=1, stride=1, init_scale=init_scale, trainable=trainable) # if self.hparams['attention_cosine_distance']: # attention_selector_norm = tf.norm(tf.reshape(attention_selector, [-1])) # # Was getting nan issues with tf.norm, manually implementing it seemed to work though? # h3_norm = tf.sqrt(tf.reduce_sum(tf.square(h3), axis=-1, keep_dims=True) + 1e-7) # attention_logits /= (attention_selector_norm * h3_norm + 1e-5) # self.attention_logits = attention_logits # orig_attention_shape = attention_logits.get_shape() # attention_logits = tf.reshape(attention_logits, (self.nbatch, -1)) # attention_weights = tf.nn.softmax(attention_logits / self.hparams['attention_temperature']) # attention_weights = tf.reshape(attention_weights, orig_attention_shape) # self.attention_weights = attention_weights # if self.reuse: # tf.summary.image('predicted_attention', self.attention_weights, max_outputs=1) # def _get_noisy_pi_and_vf(self, reuse, init_scales): # random_dim = 64 if self.hparams.get('dropout_more_random') else 1 # random_in = tf.truncated_normal(shape=[80, 7, 7, random_dim]) # stddev = tf.sqrt((1.0 - self.attention_weights) / (self.attention_weights + 1e-6) + 1e-7) # stddev *= self.hparams['_dropout_strength'] # noise = self.h3 * (random_in * stddev) # if reuse: # tf.summary.scalar('noise_min', tf.reduce_min(tf.abs(noise))) # tf.summary.scalar('noise_max', tf.reduce_max(tf.abs(noise))) # tf.summary.scalar('noise_uncentered_mean', tf.reduce_mean(tf.abs(noise))) # tf.summary.scalar('h3_min', tf.reduce_min(tf.abs(self.h3))) # tf.summary.scalar('h3_max', tf.reduce_max(tf.abs(self.h3))) # tf.summary.scalar('h3_uncentered_mean', tf.reduce_mean(tf.abs(self.h3))) # h3_noise = self.h3 + noise # h3_noise = conv_to_fc(h3_noise) # h_noise = tf.nn.relu(fc(h3_noise, 'fc1', nh=512, init_scale=np.sqrt(2), trainable=True, reuse=True)) # pi_noise = fc(h_noise, 'pi', self.nact, init_scale=init_scales[0], reuse=True) # vf_noise = fc(h_noise, 'v', 1, init_scale=init_scales[1], reuse=True)[:,0] # return pi_noise, vf_noise def __init__(self, sess, X, ob_space, ac_space, nbatch, nsteps, reuse=False, hparams=None): #pylint: disable=W0613 assert hparams != None nh, nw, nc = ob_space.shape ob_shape = (nbatch, nh, nw, nc) nact = ac_space.n self.X = X self.nbatch = nbatch self.reuse = reuse self.hparams = hparams self.nact = nact scope = hparams.get('_policy_scope', 'model') with tf.variable_scope(scope, reuse=reuse): # if '_teacher_h3' in hparams: # h3 = self.hparams['_teacher_h3'] # else: # h3, _h3 = nature_cnn_h3(X, # first_layer_mode=hparams['first_layer_mode'], # trainable=hparams['base_trainable'], # conv1_fn=lambda x: self._init_attention_20(x)) h3, _h3 = nature_cnn_h3(X, first_layer_mode=hparams['first_layer_mode'], trainable=hparams['base_trainable'], conv1_fn=lambda x: self._init_attention_20_main_branch(x)) # self.flow_base = h3 # self.attention_weights = tf.ones(shape=[nbatch, 7, 7, 1], dtype=tf.float32) / 49.0 # if '_attention_truth' in hparams: # self.attention_weights = tf.reshape(self.hparams['_attention_truth'], [nbatch, 7, 7, 1]) # else: # self._init_attention(_h3) # attention_weighted_vectors = tf.reduce_sum(h3 * self.attention_weights, axis=[1,2]) # # Sanity check hparams. # attention_modes = ['use_global_vecs', 'attention_weighted_fc'] # mode_count = sum(hparams.get(mode) is True for mode in attention_modes) # assert mode_count <= 1 # if hparams.get('use_global_vecs'): # global_vecs = tf.nn.relu(conv(h3, 'global_vec_conv', nf=4, rf=1, stride=1, trainable=True)) # global_vecs = conv_to_fc(global_vecs) # h = tf.concat([attention_weighted_vectors, global_vecs], -1) # assert len(h.get_shape()) == 2 # assert h.get_shape()[-1] == 260 # for i in range(hparams['fc_depth']): # h = tf.nn.relu(fc(h, 'additional_fc{}'.format(i), nh=260, init_scale=np.sqrt(2))) # elif hparams.get('attention_weighted_fc'): # # attention_entropy = cat_entropy(tf.reshape(self.attention_logits, [-1, 49])) # # max_entropy = np.log(49) # # entropy_percentage = attention_entropy / max_entropy # # attention_scaling_factor = 49.0 * entropy_percentage # # if reuse: # # tf.summary.scalar('max_attention_scaling_factor', tf.reduce_max(attention_scaling_factor)) # # tf.summary.scalar('min_attention_scaling_factor', tf.reduce_min(attention_scaling_factor)) # # tf.summary.scalar('mean_attention_scaling_factor', tf.reduce_mean(attention_scaling_factor)) # # attention_scaling_factor = tf.expand_dims(tf.expand_dims(tf.expand_dims(attention_scaling_factor, axis=1), axis=1), axis=1) # # attention_scaling_factor = tf.stop_gradient(attention_scaling_factor) # # Intentionally not using the attention weighted vectors in this path. # h3 = h3 * self.attention_weights * 49.0 # Multiply so that we don't change the scale too much, attention=1/49 if uniform # self.h3 = h3 # h3_flat = conv_to_fc(h3) # h = tf.nn.relu(fc(h3_flat, 'fc1', nh=512, init_scale=np.sqrt(2), trainable=True)) # if hparams.get('add_global_vecs'): # h = tf.concat([h, attention_weighted_vectors], axis=-1) # assert h.get_shape()[-1] == 512 + 64 # #for i in range(hparams['fc_depth']): # # h = tf.nn.relu(fc(h, 'additional_fc{}'.format(i), nh=512, init_scale=np.sqrt(2))) # elif hparams.get('gaussian_attention'): # # Gaussian dropout + reparamaterization trick from vae's. # if self.hparams.get('fixed_dropout_noise'): # random_in = hparams['_env_random'] # else: # random_dim = 64 if hparams.get('dropout_more_random') else 1 # random_in = tf.truncated_normal(shape=[7, 7, random_dim]) # stddev = tf.sqrt((1.0 - self.attention_weights) / (self.attention_weights + 1e-6) + 1e-7) # stddev *= hparams['_dropout_strength'] # noise = h3 * (random_in * stddev) # if reuse: # tf.summary.scalar('noise_min', tf.reduce_min(tf.abs(noise))) # tf.summary.scalar('noise_max', tf.reduce_max(tf.abs(noise))) # tf.summary.scalar('noise_uncentered_mean', tf.reduce_mean(tf.abs(noise))) # tf.summary.scalar('h3_min', tf.reduce_min(tf.abs(h3))) # tf.summary.scalar('h3_max', tf.reduce_max(tf.abs(h3))) # tf.summary.scalar('h3_uncentered_mean', tf.reduce_mean(tf.abs(h3))) # h3 = h3 + noise # h3 = conv_to_fc(h3) # h = tf.nn.relu(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2), trainable=True)) # for i in range(hparams['fc_depth']): # h = tf.nn.relu(fc(h, 'additional_fc{}'.format(i), nh=512, init_scale=np.sqrt(2))) # else: # h3 = conv_to_fc(h3) # h = tf.nn.relu(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2), trainable=True)) # h = tf.concat([attention_weighted_vectors, h], -1) # assert len(h.get_shape()) == 2 # assert h.get_shape()[-1] == (512+64) # for i in range(hparams['fc_depth']): # h = tf.nn.relu(fc(h, 'additional_fc{}'.format(i), nh=(512+64), init_scale=np.sqrt(2))) h3 = conv_to_fc(h3) h = tf.nn.relu(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2), trainable=True)) self.original_h = h if hparams['use_extra_path']: with tf.variable_scope('model_extrapath', reuse=reuse): h3, _h3 = nature_cnn_h3(X, first_layer_mode=hparams['first_layer_mode'], trainable=hparams['base_trainable'], conv1_fn=lambda x: self._init_attention_20_extrapath(x)) h3 = conv_to_fc(h3) notransfer_h = tf.nn.relu(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2), trainable=True)) print("notransfer_h: {}".format(notransfer_h)) print('original h: {}'.format(h)) concatenated_h = tf.concat([h, notransfer_h], axis=-1) print('concatenated_h: {}'.format(concatenated_h)) h = tf.nn.relu(fc(concatenated_h, 'extra_path_fc', nh=512, init_scale=np.sqrt(2), trainable=True)) print('final h: {}'.format(h)) self.h = h if hparams['context_20']: h = tf.concat([h, self.context_20], axis=-1) init_scales = [0.01, 1] # if hparams.get('init_pi_v_zero'): # init_scales = [0 for _ in init_scales] pi = fc(h, 'pi', nact, init_scale=init_scales[0]) vf = fc(h, 'v', 1, init_scale=init_scales[1])[:,0] if reuse: tf.summary.scalar('vf', tf.reduce_mean(vf)) # if hparams.get('gaussian_attention') and hparams.get('attention_weighted_fc') and reuse: # self.pi_noises = [] # self.vf_noises = [] # for i in range(hparams.get("num_dropout_models")): # pi_noise, vf_noise = self._get_noisy_pi_and_vf(reuse, init_scales) # self.pi_noises.append(pi_noise) # self.vf_noises.append(vf_noise) # self.pi_noise = pi_noise # self.vf_noise = vf_noise # pi_target = tf.stop_gradient(tf.nn.softmax(pi)) # vf_target = tf.stop_gradient(vf) # noise_pi_loss = tf.nn.softmax_cross_entropy_with_logits(logits=pi_noise, labels=pi_target) # noise_vf_loss = hparams['_vf_coef'] * mse(vf_noise, vf_target) # noise_loss = tf.reduce_mean(noise_pi_loss+noise_vf_loss) * hparams['noise_loss_c'] # if reuse: # tf.summary.scalar('noise_pi_loss', tf.reduce_mean(noise_pi_loss)) # tf.summary.scalar('noise_vf_loss', tf.reduce_mean(noise_vf_loss)) # tf.summary.scalar('noise_loss', noise_loss) # self.noise_loss = noise_loss self.pdtype = make_pdtype(ac_space) self.pd = self.pdtype.pdfromflat(pi) a0 = self.pd.sample() neglogp0 = self.pd.neglogp(a0) self.initial_state = None # reshaped_attn = tf.reshape(self.attention_weights_20, [-1, 400]) def step(ob, *_args, **_kwargs): feed_dict = {X:ob} if '_dropout_strength' in _kwargs: (k, v) = _kwargs['_dropout_strength'] feed_dict[k] = v ops = [a0, vf, neglogp0] results = sess.run(ops, feed_dict) return results[0], results[1], results[2] def value(ob, *_args, **_kwargs): feed_dict = {X:ob} if '_dropout_strength' in _kwargs: (k, v) = _kwargs['_dropout_strength'] feed_dict[k] = v return sess.run(vf, feed_dict) # copy_weights_op = None # def setup_copy_weights(): # nonlocal copy_weights_op # assert hparams.get('_src_scope') # layer_names = ['c1_2_frame_input', 'c2', 'c3', 'fc1', 'pi', 'v', 'attention_logits'] # layer_types = ['w', 'b'] # assert hparams['fc_depth'] == 0 # src_scope = hparams['_src_scope'] # assign_ops = [] # for layer_name in layer_names: # for layer_type in layer_types: # if layer_name == 'attention_logits' and layer_type == 'b': continue # var_name = layer_name + '/' + layer_type + ':0' # dst_var_name = scope + '/' + var_name # src_var_name = src_scope + '/' + var_name # dst_var = None # src_var = None # for v in tf.global_variables(): # if v.name == dst_var_name: dst_var = v # if v.name == src_var_name: src_var = v # assert dst_var is not None # assert src_var is not None # assign_op = tf.assign(ref=dst_var, value=src_var) # assign_ops.append(assign_op) # copy_weights_op = tf.group(*assign_ops) # def copy_weights(): # sess.run(copy_weights_op) # self.copy_weights = copy_weights # self.setup_copy_weights = setup_copy_weights self.X = X self.pi = pi self.vf = vf self.step = step self.value = value class MlpPolicy(object): def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False): #pylint: disable=W0613 ob_shape = (nbatch,) + ob_space.shape actdim = ac_space.shape[0] X = tf.placeholder(tf.float32, ob_shape, name='Ob') #obs with tf.variable_scope("model", reuse=reuse): activ = tf.tanh h1 = activ(fc(X, 'pi_fc1', nh=64, init_scale=np.sqrt(2))) h2 = activ(fc(h1, 'pi_fc2', nh=64, init_scale=np.sqrt(2))) pi = fc(h2, 'pi', actdim, init_scale=0.01) h1 = activ(fc(X, 'vf_fc1', nh=64, init_scale=np.sqrt(2))) h2 = activ(fc(h1, 'vf_fc2', nh=64, init_scale=np.sqrt(2))) vf = fc(h2, 'vf', 1)[:,0] logstd = tf.get_variable(name="logstd", shape=[1, actdim], initializer=tf.zeros_initializer()) pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1) self.pdtype = make_pdtype(ac_space) self.pd = self.pdtype.pdfromflat(pdparam) a0 = self.pd.sample() neglogp0 = self.pd.neglogp(a0) self.initial_state = None def step(ob, *_args, **_kwargs): a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob}) return a, v, self.initial_state, neglogp def value(ob, *_args, **_kwargs): return sess.run(vf, {X:ob}) self.X = X self.pi = pi self.vf = vf self.step = step self.value = value
{"hexsha": "3f3b13026aef3feda3e82d72e19e0bdbc3b58e92", "size": 26321, "ext": "py", "lang": "Python", "max_stars_repo_path": "baselines/a2c/policies.py", "max_stars_repo_name": "vik-goel/MOREL", "max_stars_repo_head_hexsha": "55c8bb81b25de7c2dfba451db61564c352cdb5e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 47, "max_stars_repo_stars_event_min_datetime": "2018-11-27T03:04:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-05T15:35:02.000Z", "max_issues_repo_path": "baselines/a2c/policies.py", "max_issues_repo_name": "vik-goel/MOREL", "max_issues_repo_head_hexsha": "55c8bb81b25de7c2dfba451db61564c352cdb5e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-03-01T17:48:22.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-13T12:04:36.000Z", "max_forks_repo_path": "baselines/a2c/policies.py", "max_forks_repo_name": "vik-goel/MOREL", "max_forks_repo_head_hexsha": "55c8bb81b25de7c2dfba451db61564c352cdb5e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2019-04-15T21:21:27.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-01T13:10:33.000Z", "avg_line_length": 42.1810897436, "max_line_length": 156, "alphanum_fraction": 0.5704570495, "include": true, "reason": "import numpy", "num_tokens": 6930}
r""" Check for pynormaliz """ from . import PythonModule from .join_feature import JoinFeature class PyNormaliz(JoinFeature): r""" A :class:`sage.features.Feature` describing the presence of the Python package ``PyNormaliz``. EXAMPLES:: sage: from sage.features.normaliz import PyNormaliz sage: PyNormaliz().is_present() # optional - pynormaliz FeatureTestResult('pynormaliz', True) """ def __init__(self): r""" TESTS:: sage: from sage.features.normaliz import PyNormaliz sage: isinstance(PyNormaliz(), PyNormaliz) True """ JoinFeature.__init__(self, 'pynormaliz', [PythonModule('PyNormaliz', spkg="pynormaliz")])
{"hexsha": "ceb67875b018e5ca24caf27e80eb22891fff91c2", "size": 783, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/sage/features/normaliz.py", "max_stars_repo_name": "kliem/sage-test-27122", "max_stars_repo_head_hexsha": "cc60cfebc4576fed8b01f0fc487271bdee3cefed", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/sage/features/normaliz.py", "max_issues_repo_name": "kliem/sage-test-27122", "max_issues_repo_head_hexsha": "cc60cfebc4576fed8b01f0fc487271bdee3cefed", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/sage/features/normaliz.py", "max_forks_repo_name": "kliem/sage-test-27122", "max_forks_repo_head_hexsha": "cc60cfebc4576fed8b01f0fc487271bdee3cefed", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-23T10:29:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-23T10:29:56.000Z", "avg_line_length": 27.0, "max_line_length": 82, "alphanum_fraction": 0.6028097063, "include": true, "reason": "from sage", "num_tokens": 186}
program prog integer vals dimension vals(2) dimension abc(4) write(6, *) vals(1) write(6, *) abc(3) end
{"hexsha": "1365fe5d9eb4a6db83e9fb6885c0ac85817feb77", "size": 146, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "fable/test/valid/combine_decl_1.f", "max_stars_repo_name": "rimmartin/cctbx_project", "max_stars_repo_head_hexsha": "644090f9432d9afc22cfb542fc3ab78ca8e15e5d", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 155, "max_stars_repo_stars_event_min_datetime": "2016-11-23T12:52:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:35:44.000Z", "max_issues_repo_path": "fable/test/valid/combine_decl_1.f", "max_issues_repo_name": "rimmartin/cctbx_project", "max_issues_repo_head_hexsha": "644090f9432d9afc22cfb542fc3ab78ca8e15e5d", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 590, "max_issues_repo_issues_event_min_datetime": "2016-12-10T11:31:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T23:10:09.000Z", "max_forks_repo_path": "fable/test/valid/combine_decl_1.f", "max_forks_repo_name": "rimmartin/cctbx_project", "max_forks_repo_head_hexsha": "644090f9432d9afc22cfb542fc3ab78ca8e15e5d", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 115, "max_forks_repo_forks_event_min_datetime": "2016-11-15T08:17:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T15:30:14.000Z", "avg_line_length": 18.25, "max_line_length": 25, "alphanum_fraction": 0.5, "num_tokens": 43}
#!/usr/bin/env python # -*- coding: utf-8 -*- import random import jams import numpy as np import copy import datetime import pandas as pd import soundfile as psf import librosa.util import os from .utils import find_files_in_dirs from .utils import read_audio PERC_VOICE_SET = ['kicks', 'snares', 'snares_rim', 'crashes', 'rides', 'open_hats', 'closed_hats', 'low_toms', 'mid_toms', 'high_toms', 'congas_bongos', 'claps', 'bells', 'claves'] REDUCED_PERC_VOICE_SET = ['kicks', 'snares', 'closed_hats'] DEFAULT_MIXING_COEFS = [1.0,] * len(PERC_VOICE_SET) def select_audio_files_from_directory(audio_directory): audio_files = [] list_of_list_of_files = [] for perc_voice in PERC_VOICE_SET: list_of_list_of_files.append(find_files_in_dirs([os.path.join(audio_directory, perc_voice)])) audio_files.append(random.choice(list_of_list_of_files[-1])) return audio_files, list_of_list_of_files def select_audio_files_from_filelist(list_of_list_of_files): return [random.choice(l) for l in list_of_list_of_files] def log_to_linear_amp(x, arange=(-48., 0.)): """ Convert a 0-1 log-scaled signal (whose 0 and 1 levels are defined by `arange`) to linear scale. Parameters ---------- x : np.array Input signal that ranges from 0. to 1. arange : tuple[float] The range of the input in dB Returns ------- x_linear : np.array Linear-scaled x Examples -------- >>> log_to_linear_amp(np.array([1.])) array([ 1.]) >>> log_to_linear_amp(np.array([0.5]), arange=(-6., 0.)) array([ 0.70794578]) >>> log_to_linear_amp(np.array([0.]), arange=(-6., 0.)) array([ 0.]) >>> log_to_linear_amp(0., arange=(-6., 0.)) 0.0 """ x_linear = x * (arange[1] - arange[0]) + arange[0] x_linear = (10.0**(x_linear/20.)) * (x > 0.) # make sure 0 is still 0 return x_linear def velocity_to_amp(v, arange=(-60, 0)): """ Convert a 0-1 velocity signal (whose 0 and 1 levels are defined by `arange`) to linear scale. Parameters ---------- x : np.array Input signal that ranges from 0. to 1. arange : tuple[float] The range of the input in dB Returns ------- x_linear : np.array Linear-scaled x Examples -------- >>> velocity_to_amp(np.array([1.])) array([ 1.]) >>> velocity_to_amp(np.array([1/127.]), arange=(-60., 0.)) array([ 0.001]) >>> velocity_to_amp(np.array([0.]), arange=(-6., 0.)) array([ 0.]) >>> velocity_to_amp(0., arange=(-6., 0.)) 0.0 """ r = 10**((arange[1] - arange[0]) / 20.) b = (127. / (126 * (r ** 0.5))) - (1 / 126.) m = (1 - b) / 127.0 return (((127 * m * v) + b) ** 2) * (v > 0.) def _write_audio(path, y, sample_rate, norm=True): """ Write audio file to disk. Parameters ---------- path : str File path to write audio file. Extension dictates format. y : np.array Audio signal array sample_rate : int norm : bool Peak-normalize `y` before writing to disk. Returns ------- None """ if norm: y /= np.max(np.abs(y)) psf.write(path, y, int(sample_rate)) def _dict_of_array_to_dict_of_list(d): new_dict = {} for k, v in d.items(): if isinstance(v, np.ndarray): new_dict[k] = v.tolist() else: new_dict[k] = v return new_dict def _dict_of_list_to_dict_of_array(d): new_dict = {} for k, v in d.items(): if isinstance(v, list): new_dict[k] = np.array(v) else: new_dict[k] = v return new_dict def _repeat_annotations(ann, repetitions): frames = [ann.data] for i in range(1, repetitions): frame = copy.deepcopy(ann.data) frame.time += datetime.timedelta(seconds=(ann.duration * i)) frames.append(frame) frame = pd.DataFrame(pd.concat(frames, ignore_index=True)) ann.data = jams.JamsFrame.from_dataframe(frame) ann.duration *= repetitions return ann def _rotate_annotations(ann, rotation_sec): dur = datetime.timedelta(seconds=ann.duration) ann.data.time += datetime.timedelta(seconds=rotation_sec) ann.data.ix[ann.data.time >= dur, 'time'] -= dur ann.data = jams.JamsFrame.from_dataframe(ann.data.sort_values('time').reset_index(drop=True)) return ann def _trim_annotations(ann, min_sec, max_sec): ann.data.time -= datetime.timedelta(seconds=min_sec) max_sec -= min_sec ann.data = ann.data.ix[(ann.data.time >= datetime.timedelta(seconds=0)) & (ann.data.time <= datetime.timedelta(seconds=max_sec))] dur = max(max_sec - min_sec, ann.data.time.max().total_seconds()) ann.duration = dur ann.data = jams.JamsFrame.from_dataframe(ann.data) return ann class PercussionSynthesizer(object): """ Synthesize percussive audio from annotations Attributes ---------- midi_file_path : str The path to the MIDI file """ def __init__(self, perc_voice_set=PERC_VOICE_SET): self._sample_rate = 44100. self._min_amplitude = -60. self._mixing_coeffs = DEFAULT_MIXING_COEFS self._audio_files = None self._jam = None self._db = None self._downbeats = None self._beats = None self._onsets = None self._has_large_vocab = True self._perc_voice_set = perc_voice_set @property def num_active_voices(self): return len([len(o) > 0 for o in self.get_onsets()]) @property def time_signature(self): raise NotImplementedError() @property def ts_num(self): raise NotImplementedError() @property def ts_denom(self): raise NotImplementedError() @property def tempo(self): raise NotImplementedError() @property def has_large_vocab(self): return self._has_large_vocab @property def perc_voice_set(self): return self._perc_voice_set @property def sample_rate(self): """ Sampling rate. Read-only. Default is 44100. """ return self._sample_rate @property def mixing_coeffs(self): """ Mixing coefficients that determine the relative level for each synthesized pattern. Read-only. Default is [1./num_patterns, 1./num_pattern, ... ]. """ if self._mixing_coeffs is None: return np.ones(self.num_active_voices) / self.num_active_voices else: return self._mixing_coeffs def get_onsets(self, velocity=100): """ Get onset times and amplitudes for percussion voices Returns ------- onsets : list[list] """ onsets = [(v, []) for v in self.perc_voice_set] for k,voice_onset_times in enumerate(self._onsets): for onset_time in voice_onset_times: onsets[k][1].append(dict(time=onset_time, velocity=velocity/127.)) return onsets def get_downbeats(self): return self._downbeats def get_beats(self): return self._beats @property def audio_files(self): """ The audio files used to synthesized the patterns in their respective order by pattern. Read-only. Default is None. """ return self._audio_files @property def jam(self): """ The JAMS annotations for the current pattern. Read-only. Default is None. """ return self._jam @property def duration(self): return self._duration @property def min_amplitude(self): """ The minimum amplitude of an onset in dB. Read-only. Default is -48. """ return self._min_amplitude @staticmethod def from_onsets(duration, onset_activations_array=None, onset_times_list=None, sampling_interval=None, has_beats=True, perc_voice_set=PERC_VOICE_SET, **kwargs): """ Synthesize from either an array of onset activations or a list of onset times. Last two rows are downbeat and beat if `has_beats` is defined. Parameters ---------- onset_activations_array : np.array onset_times_list : list[np.array] sampling_interval : float **kwargs : dict Additional keywors are passed to the peak picking algorithm Returns ------- PercussionSynthesizer """ if onset_activations_array is None and onset_times_list is None: raise Exception("Must define either onset_activations_array or onset_times_list") if sampling_interval is None: sampling_interval = (int(round(0.01 * 22050)) / float(22050)) if onset_activations_array is not None: onset_times_list = [] for i in range(onset_activations_array.shape[1]): peaks = librosa.util.peak_pick(onset_activations_array[:, i], **kwargs) onset_times_list.append(peaks * sampling_interval) perc_loop_synth = PercussionSynthesizer(perc_voice_set=perc_voice_set) if has_beats: perc_loop_synth._downbeats = onset_times_list[-2] perc_loop_synth._beats = onset_times_list[-1] perc_loop_synth._onsets = onset_times_list[:-2] else: perc_loop_synth._onsets = onset_times_list perc_loop_synth._duration = duration return perc_loop_synth @staticmethod def from_jams(jams_file_path=None, jam=None): """ Load a pattern from a JAMS file and instantiate the PercussionLoopSynthesizer Parameters ---------- jams_file_path : str Path to jams file jam : JAM jam. Either `jams_file_path` or `jam` must be defined. Returns ------- rhythm_synth : RhythmSynthesizer """ if jams_file_path is not None: jam = jams.load(jams_file_path) elif jam is not None: pass else: raise Exception('Either `jams_file_path` or `jam` must be defined.') num_patterns = len([ann for ann in jam.search(namespace='onset') if not ann.sandbox.base_pattern]) sample_rate = jam.sandbox.sample_rate min_amplitude = jam.sandbox.min_amplitude mixing_coeffs = [] audio_files = [] for i in range(num_patterns): onset_ann = jam.search(pattern_index=i)[0] mixing_coeffs.append(onset_ann.sandbox.mixing_coeff) audio_files.append(onset_ann.sandbox.audio_source) perc_loop_synth = PercussionSynthesizer() perc_loop_synth._min_amplitude = min_amplitude perc_loop_synth._has_large_vocab = jam.sandbox.has_large_vocab perc_loop_synth._sample_rate = sample_rate perc_loop_synth._jam = jam perc_loop_synth._mixing_coeffs = mixing_coeffs perc_loop_synth._audio_files = audio_files perc_loop_synth._perc_voice_set = jam.sandbox.perc_voice_set perc_loop_synth._duration = jam.file_metadata.duration return perc_loop_synth def generate_jam(self, output_jams_file=None, audio_files=None, audio_directory=None, mixing_coeffs=None, sample_rate=None, min_amplitude=None, duration_sec=None, additional_onset_sandbox_info=None, additional_global_sandbox_info=None): """ Parameters ---------- output_jams_file : str If not None, write jams file to `output_jams_file`. audio_files : list[str] A list of the audio_files to use for rendering the rhythm patterns. These should be ordered according to the pattern (e.g., if the patterns were constructed to be from low frequency to high frequency, a bass drum might be first in the list). If not None, overwrite self.audio_files. Default is None. audio_directory : str If audio_files is None, then this must be set. It will randomly sample from subdirectories in PERC_VOICE_SET Default is None mixing_coeffs : np.array The coefficients specifying the mixing levels for the patterns. If not None, overwrite self.mixing_coeffs. Default is None. sample_rate : float Sampling rate of output. If not None, overwrite self.sample_rate. Default is None. min_amplitude : float The minimum amplitude to render for the softest sound above 0 in log amplitude. If not None, overwrite self.min_amplitude. Default is None. duration_sec : float If not None, extend the rhythm pattern to the target duration. Default is None. additional_onset_sandbox_info : list[dict] If not None, then save this additional info in the sandbox for the corresponding onset annotation. Default is None. additional_global_sandbox_info: dict If not None, then save this additional info in the top-level sandbox of the JAM. Default is None. Returns ------- jam : jams.JAM """ if mixing_coeffs is not None: self._mixing_coeffs = mixing_coeffs if audio_files is None and audio_directory is None: raise Exception("Either `audio_files` or `audio_directory` must be defined.") if audio_files is None: audio_files = select_audio_files_from_directory(audio_directory)[0] self._audio_files = audio_files assert (len(self.audio_files) == len(self.perc_voice_set)) if sample_rate is not None: self._sample_rate = sample_rate if min_amplitude is not None: self._min_amplitude = min_amplitude # make JAM structure jam = jams.JAMS() jam.file_metadata.duration = self.duration jam.sandbox.sample_rate = self.sample_rate # jam.sandbox.time_signature = (self.ts_num, self.ts_denom) jam.sandbox.min_amplitude = self.min_amplitude jam.sandbox.has_large_vocab = self.has_large_vocab jam.sandbox.perc_voice_set = self.perc_voice_set if additional_global_sandbox_info is not None: jam.sandbox.update(**additional_global_sandbox_info) if self.get_beats() is not None: # write beat positions to jams base_pattern = False beat_ann = jams.Annotation(namespace='beat', time=0, duration=jam.file_metadata.duration) beat_ann.sandbox = jams.Sandbox(base_pattern=base_pattern) for k, t in enumerate(self.get_beats()): beat_ann.append(time=t, duration=0.0, value=k) jam.annotations.append(beat_ann) # write tempo to jams # tempo_ann = jams.Annotation(namespace='tempo', time=0, duration=jam.file_metadata.duration) # tempo_ann.append(time=0, duration=jam.file_metadata.duration, value=self.tempo, confidence=1.0) # jam.annotations.append(tempo_ann) # write onsets for each rhythm pattern onsets = self.get_onsets() for i in range(len(self.perc_voice_set)): base_pattern = False onsets_ann = jams.Annotation(namespace='onset', time=0, duration=jam.file_metadata.duration) onsets_ann.sandbox = jams.Sandbox(pattern_index=i, perc_voice=self.perc_voice_set[i], audio_source=self.audio_files[i], mixing_coeff=self.mixing_coeffs[i], base_pattern=base_pattern) if additional_onset_sandbox_info is not None: onsets_ann.sandbox.update(**additional_onset_sandbox_info[i]) # write onsets for onset in onsets[i][1]: onsets_ann.append(time=onset['time'], value=velocity_to_amp(onset['velocity'], (self.min_amplitude, 0.)), duration=0) jam.annotations.append(onsets_ann) self._jam = jam if output_jams_file is not None: jam.save(output_jams_file) return jam def synthesize(self, output_file, **kwargs): """ Synthesize the patterns from self.jam. If self.jam does not exist, pass in the arguments to `generate_jam`. Parameters ---------- output_file : str The path where the rendered and mixed output signal will be written. If None, no file will be written. kwargs : additional keyword arguments Additional keyword arguments to pass to `generate_jam` if self.jam is None. Returns ------- rhythm_audio : np.array The mixed output_signal unmixed_rhythm_audio: np.array An MxN array where M is the number of rhythm patterns and N is the length of the measure in samples See Also -------- generate_jam """ if self.jam is None or len(kwargs) > 0: self.generate_jam(**kwargs) onset_anns = [ann for ann in self.jam.search(namespace='onset') if not ann.sandbox.base_pattern] unmixed_rhythm_audio = np.zeros([len(self.perc_voice_set), int(np.ceil(self.duration * self.sample_rate))]) for k, onset_ann in enumerate(onset_anns): # load audio file sample, _ = read_audio(self.audio_files[k], self.sample_rate, mono=True) sample_length = sample.shape[0] # render samples at onsets for j in range(len(onset_ann.data.index)): start_idx = int(np.round(onset_ann.data.ix[j, 'time'].total_seconds() * self.sample_rate)) start_idx = min(start_idx, unmixed_rhythm_audio.shape[1]-1) stop_idx = start_idx + sample_length stop_idx = min(stop_idx, unmixed_rhythm_audio.shape[1]-1) unmixed_rhythm_audio[k, start_idx:stop_idx] = onset_ann.data.ix[j, 'value'] * sample[:(stop_idx - start_idx)] rhythm_audio = np.dot(self.mixing_coeffs, unmixed_rhythm_audio) if output_file is not None: _write_audio(output_file, rhythm_audio, int(self.sample_rate)) return rhythm_audio, unmixed_rhythm_audio
{"hexsha": "a56456ef43f4db949cea4c69b26353fada3ce8a5", "size": 19148, "ext": "py", "lang": "Python", "max_stars_repo_path": "large_vocab_adt_dafx2018/percussion_synth.py", "max_stars_repo_name": "mcartwright/dafx2018_adt", "max_stars_repo_head_hexsha": "057ac6b1e39cd0c80554d52535cc9d88b6316c74", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-02-28T05:43:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-02T17:05:13.000Z", "max_issues_repo_path": "large_vocab_adt_dafx2018/percussion_synth.py", "max_issues_repo_name": "mcartwright/dafx2018_adt", "max_issues_repo_head_hexsha": "057ac6b1e39cd0c80554d52535cc9d88b6316c74", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "large_vocab_adt_dafx2018/percussion_synth.py", "max_forks_repo_name": "mcartwright/dafx2018_adt", "max_forks_repo_head_hexsha": "057ac6b1e39cd0c80554d52535cc9d88b6316c74", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-07T17:10:40.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-07T17:10:40.000Z", "avg_line_length": 32.9569707401, "max_line_length": 120, "alphanum_fraction": 0.6001148945, "include": true, "reason": "import numpy", "num_tokens": 4334}
struct Bingo table::Matrix{String} end Bingo() = Bingo([cast() for _ in 1:5, _ in 1:5]) string(rand('a':'z', 10)...) Base.setindex!(b::Bingo, v::String, inds...) = b.table[inds...] = v Base.getindex(b::Bingo, inds...) = b.table[inds...] Base.lastindex(b::Bingo) = lastindex(b.table) save(b::Bingo) = save("bingo.pdf", b) function save(filename, b::Bingo) name, ext = splitext(filename) Luxor.Drawing(595, 708, filename) Luxor.origin() tiles = Tiler(600, 650, 5, 5) translate(0, 30) background("#6a6703") for (pos, n) in tiles sethue("white") box(pos, tiles.tilewidth, tiles.tileheight, :fillpreserve) sethue("black") strokepath() fontface("Monaco") fsize = 18 fontsize(fsize) lines = textlines(b[n], 120; rightgutter=3) if length(lines) > 5 fsize = 12 fontsize(fsize) lines = textlines(b[n], 120; rightgutter=3) end if n == 13 sethue("red") fontsize(22) fontface("Helvetica Bold") # settext("<b>FREE</b>", pos - (0, 40), halign="center", markup=true) text("FREE", pos - (0, 40), halign=:center) fontface("Monaco") fontsize(fsize) textbox(lines, pos - (0, 32), leading=20, alignment=:center) sethue("black") else textbox(lines, pos - (0, 50), leading=20, alignment=:center) end end sethue("black") fontsize(35) text("P", Point(-245, -360), valign = :top) text("I", Point(-130, -360), valign = :top) text("Qu", Point(-20, -360), valign = :top) text("I", Point(105, -360), valign = :top) text("L", Point(220, -360), valign = :top) Luxor.finish() end function generate(;preview=true) b = Bingo() save(b) if preview Luxor.preview() end end function Base.show(io::IO, b::Bingo) println("Bingo sheet:") print(io, b[1]) for each in b[2:end] println(io) print(io, each) end end
{"hexsha": "9281e086a41a193a5d19f0f50baaa5e2939acd02", "size": 2079, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/bingo.jl", "max_stars_repo_name": "Roger-luo/PinGo.jl", "max_stars_repo_head_hexsha": "a382ac8c9856614f5f3e3af5293582d0cd438ca7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-06-26T18:35:34.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-26T19:49:30.000Z", "max_issues_repo_path": "src/bingo.jl", "max_issues_repo_name": "Roger-luo/PinGo.jl", "max_issues_repo_head_hexsha": "a382ac8c9856614f5f3e3af5293582d0cd438ca7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2020-06-26T20:05:07.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-26T20:26:43.000Z", "max_forks_repo_path": "src/bingo.jl", "max_forks_repo_name": "Roger-luo/PinGo.jl", "max_forks_repo_head_hexsha": "a382ac8c9856614f5f3e3af5293582d0cd438ca7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-06-26T21:07:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-26T21:07:17.000Z", "avg_line_length": 26.3164556962, "max_line_length": 81, "alphanum_fraction": 0.5387205387, "num_tokens": 624}
From MatchingLogic Require Export Syntax IndexManipulation Semantics ProofSystem StringSignature wftactics DerivedOperators_Syntax DerivedOperators_Semantics NamedAxioms ProofInfo. Module Notations. Export MatchingLogic.Syntax.Notations MatchingLogic.Substitution.Notations MatchingLogic.DerivedOperators_Syntax.Notations MatchingLogic.ApplicationContext.Notations MatchingLogic.ProofInfo.Notations. Export Syntax.BoundVarSugar. End Notations.
{"author": "harp-project", "repo": "AML-Formalization", "sha": "ee6fd737632e1bb2737b22cbbbca3b8a3e68f89d", "save_path": "github-repos/coq/harp-project-AML-Formalization", "path": "github-repos/coq/harp-project-AML-Formalization/AML-Formalization-ee6fd737632e1bb2737b22cbbbca3b8a3e68f89d/matching-logic/src/Logic.v"}
# Copyright (c) 2020, NVIDIA CORPORATION. import itertools import warnings import numpy as np import pandas as pd import cudf from cudf import _lib as libcudf from cudf._lib.join import compute_result_col_names from cudf.core.dtypes import CategoricalDtype class Merge(object): def __init__( self, lhs, rhs, on, left_on, right_on, left_index, right_index, how, sort, lsuffix, rsuffix, method, indicator, suffixes, ): """ Manage the merging of two Frames. Parameters ---------- lhs : Series or DataFrame The left operand of the merge rhs : Series or DataFrame The right operand of the merge on : string or list like A set of key columns in the left and right operands elements must be common to both frames left_on : string or list like A set of key columns in the left operand. Must be specified with right_on or right_index concurrently right_on : string or list like A set of key columns in the right operand. Must be specified with left_on or left_index concurrently left_index : bool Boolean flag indicating the left index column or columns are to be used as join keys in order. right_index : bool Boolean flag indicating the right index column or coumns are to be used as join keys in order. how : string The type of join. Possible values are 'inner', 'outer', 'left', 'leftsemi' and 'leftanti' sort : bool Boolean flag indicating if the output Frame is to be sorted on the output's join keys, in left to right order. lsuffix : string The suffix to be appended to left hand column names that are found to exist in the right frame, but are not specified as join keys themselves. rsuffix : string The suffix to be appended to right hand column names that are found to exist in the left frame, but are not specified as join keys themselves. suffixes : list like Left and right suffixes specified together, unpacked into lsuffix and rsuffix. """ self.lhs = lhs self.rhs = rhs self.left_index = left_index self.right_index = right_index self.method = method self.sort = sort # check that the merge is valid self.validate_merge_cfg( lhs, rhs, on, left_on, right_on, left_index, right_index, how, lsuffix, rsuffix, suffixes, ) self.how = how self.preprocess_merge_params( on, left_on, right_on, lsuffix, rsuffix, suffixes ) def perform_merge(self): """ Call libcudf to perform a merge between the operands. If necessary, cast the input key columns to compatible types. Potentially also cast the output back to categorical. """ output_dtypes = self.compute_output_dtypes() self.typecast_input_to_libcudf() libcudf_result = libcudf.join.join( self.lhs, self.rhs, self.how, self.method, left_on=self.left_on, right_on=self.right_on, left_index=self.left_index, right_index=self.right_index, ) result = self.out_class._from_table(libcudf_result) result = self.typecast_libcudf_to_output(result, output_dtypes) if isinstance(result, cudf.Index): return result else: return result[ compute_result_col_names(self.lhs, self.rhs, self.how) ] def preprocess_merge_params( self, on, left_on, right_on, lsuffix, rsuffix, suffixes ): """ Translate a valid configuration of user input parameters into the subset of input configurations handled by the cython layer. Apply suffixes to columns. """ self.out_class = cudf.DataFrame if isinstance(self.lhs, cudf.MultiIndex) or isinstance( self.rhs, cudf.MultiIndex ): self.out_class = cudf.MultiIndex elif isinstance(self.lhs, cudf.Index): self.out_class = self.lhs.__class__ if on: on = [on] if isinstance(on, str) else list(on) left_on = right_on = on else: if left_on: left_on = ( [left_on] if isinstance(left_on, str) else list(left_on) ) if right_on: right_on = ( [right_on] if isinstance(right_on, str) else list(right_on) ) same_named_columns = set(self.lhs._data.keys()) & set( self.rhs._data.keys() ) if not (left_on or right_on) and not ( self.left_index and self.right_index ): left_on = right_on = list(same_named_columns) no_suffix_cols = [] if left_on and right_on: no_suffix_cols = [ left_name for left_name, right_name in zip(left_on, right_on) if left_name == right_name and left_name in same_named_columns ] if suffixes: lsuffix, rsuffix = suffixes for name in same_named_columns: if name not in no_suffix_cols: self.lhs.rename( {name: f"{name}{lsuffix}"}, inplace=True, axis=1 ) self.rhs.rename( {name: f"{name}{rsuffix}"}, inplace=True, axis=1 ) if left_on and name in left_on: left_on[left_on.index(name)] = f"{name}{lsuffix}" if right_on and name in right_on: right_on[right_on.index(name)] = f"{name}{rsuffix}" self.left_on = left_on if left_on is not None else [] self.right_on = right_on if right_on is not None else [] self.lsuffix = lsuffix self.rsuffix = rsuffix @staticmethod def validate_merge_cfg( lhs, rhs, on, left_on, right_on, left_index, right_index, how, lsuffix, rsuffix, suffixes, ): """ Error for various invalid combinations of merge input parameters """ # must actually support the requested merge type if how not in {"left", "inner", "outer", "leftanti", "leftsemi"}: raise NotImplementedError(f"{how} merge not supported yet") # Passing 'on' with 'left_on' or 'right_on' is ambiguous if on and (left_on or right_on): raise ValueError( 'Can only pass argument "on" OR "left_on" ' 'and "right_on", not a combination of both.' ) # Can't merge on unnamed Series if (isinstance(lhs, cudf.Series) and not lhs.name) or ( isinstance(rhs, cudf.Series) and not rhs.name ): raise ValueError("Can not merge on unnamed Series") # Keys need to be in their corresponding operands if on: if isinstance(on, str): on_keys = [on] elif isinstance(on, tuple): on_keys = list(on) else: on_keys = on for key in on_keys: if not (key in lhs._data.keys() and key in rhs._data.keys()): raise KeyError(f"on key {on} not in both operands") elif left_on and right_on: left_on_keys = ( [left_on] if not isinstance(left_on, list) else left_on ) right_on_keys = ( [right_on] if not isinstance(right_on, list) else right_on ) for key in left_on_keys: if key not in lhs._data.keys(): raise KeyError(f'Key "{key}" not in left operand') for key in right_on_keys: if key not in rhs._data.keys(): raise KeyError(f'Key "{key}" not in right operand') # Require same total number of columns to join on in both operands len_left_on = 0 len_right_on = 0 if left_on: len_left_on += ( len(left_on) if pd.api.types.is_list_like(left_on) else 1 ) if right_on: len_right_on += ( len(right_on) if pd.api.types.is_list_like(right_on) else 1 ) if not (len_left_on + left_index * lhs._num_indices) == ( len_right_on + right_index * rhs._num_indices ): raise ValueError( "Merge operands must have same number of join key columns" ) # If nothing specified, must have common cols to use implicitly same_named_columns = set(lhs._data.keys()) & set(rhs._data.keys()) if ( not (left_index or right_index) and not (left_on or right_on) and len(same_named_columns) == 0 ): raise ValueError("No common columns to perform merge on") if suffixes: lsuffix, rsuffix = suffixes for name in same_named_columns: if name == left_on == right_on: continue elif left_on and right_on: if (name in left_on and name in right_on) and ( left_on.index(name) == right_on.index(name) ): continue else: if not (lsuffix or rsuffix): raise ValueError( "there are overlapping columns but " "lsuffix and rsuffix are not defined" ) def typecast_input_to_libcudf(self): """ Check each pair of join keys in the left and right hand operands and apply casting rules to match their types before passing the result to libcudf. """ lhs_keys, rhs_keys, lhs_cols, rhs_cols = [], [], [], [] if self.left_index: lhs_keys.append(self.lhs.index._data.keys()) lhs_cols.append(self.lhs.index) if self.right_index: rhs_keys.append(self.rhs.index._data.keys()) rhs_cols.append(self.rhs.index) if self.left_on: lhs_keys.append(self.left_on) lhs_cols.append(self.lhs) if self.right_on: rhs_keys.append(self.right_on) rhs_cols.append(self.rhs) for l_key_grp, r_key_grp, l_col_grp, r_col_grp in zip( lhs_keys, rhs_keys, lhs_cols, rhs_cols ): for l_key, r_key in zip(l_key_grp, r_key_grp): to_dtype = self.input_to_libcudf_casting_rules( l_col_grp._data[l_key], r_col_grp._data[r_key], self.how ) l_col_grp._data[l_key] = l_col_grp._data[l_key].astype( to_dtype ) r_col_grp._data[r_key] = r_col_grp._data[r_key].astype( to_dtype ) def input_to_libcudf_casting_rules(self, lcol, rcol, how): """ Determine what dtype the left and right hand input columns must be cast to for a libcudf join to proceed. """ cast_warn = ( "can't safely cast column from {} with type" " {} to {}, upcasting to {}" ) ctgry_err = ( "can't implicitly cast column {0} to categories" " from {1} during {1} join" ) dtype_l = lcol.dtype dtype_r = rcol.dtype libcudf_join_type = None if pd.api.types.is_dtype_equal(dtype_l, dtype_r): # if categorical and equal, children passed to libcudf libcudf_join_type = dtype_l elif isinstance(dtype_l, CategoricalDtype) and isinstance( dtype_r, CategoricalDtype ): # categories are not equal libcudf_join_type = np.dtype("O") elif how == "left": check_col = rcol.fillna(0) if not check_col.can_cast_safely(dtype_l): libcudf_join_type = self.input_to_libcudf_casting_rules( lcol, rcol, "inner" ) warnings.warn( cast_warn.format( "right", dtype_r, dtype_l, libcudf_join_type ) ) else: libcudf_join_type = dtype_l elif how == "right": check_col = lcol.fillna(0) if not check_col.can_cast_safely(dtype_r): libcudf_join_type = self.input_to_libcudf_casting_rules( lcol, rcol, "inner" ) warnings.warn( cast_warn.format( "left", dtype_l, dtype_r, libcudf_join_type ) ) else: libcudf_join_type = dtype_r elif isinstance(dtype_l, CategoricalDtype): if how == "right": raise ValueError(ctgry_err.format(rcol, "right")) libcudf_join_type = lcol.cat().categories.dtype elif isinstance(dtype_r, CategoricalDtype): if how == "left": raise ValueError(ctgry_err.format(lcol, "left")) libcudf_join_type = rcol.cat().categories.dtype elif how in {"inner", "outer"}: if (np.issubdtype(dtype_l, np.number)) and ( np.issubdtype(dtype_r, np.number) ): if dtype_l.kind == dtype_r.kind: # both ints or both floats libcudf_join_type = max(dtype_l, dtype_r) else: libcudf_join_type = np.find_common_type( [], [dtype_l, dtype_r] ) elif np.issubdtype(dtype_l, np.datetime64) and np.issubdtype( dtype_r, np.datetime64 ): libcudf_join_type = max(dtype_l, dtype_r) return libcudf_join_type def libcudf_to_output_casting_rules(self, lcol, rcol, how): """ Determine what dtype an output merge key column should be cast to after it has been processed by libcudf. Determine if a column should be promoted to a categorical datatype. """ dtype_l = lcol.dtype dtype_r = rcol.dtype merge_return_type = None # we currently only need to do this for categorical variables if isinstance(dtype_l, CategoricalDtype) and isinstance( dtype_r, CategoricalDtype ): if pd.api.types.is_dtype_equal(dtype_l, dtype_r): if how in {"inner", "left"}: merge_return_type = dtype_l elif how == "outer" and not ( dtype_l.ordered or dtype_r.ordered ): new_cats = cudf.concat( dtype_l.categories, dtype_r.categories ).unique() merge_return_type = cudf.core.dtypes.CategoricalDtype( categories=new_cats ) else: merge_return_type = "category" return merge_return_type def compute_output_dtypes(self): """ Determine what datatypes should be applied to the result of a libcudf join, baesd on the original left and right frames. """ index_dtypes = {} l_data_join_cols = {} r_data_join_cols = {} data_dtypes = { name: col.dtype for name, col in itertools.chain( self.lhs._data.items(), self.rhs._data.items() ) } if self.left_index and self.right_index: l_idx_join_cols = list(self.lhs.index._data.values()) r_idx_join_cols = list(self.rhs.index._data.values()) elif self.left_on and self.right_index: # Keep the orignal dtypes in the LEFT index if possible # should trigger a bunch of no-ops l_idx_join_cols = list(self.lhs.index._data.values()) r_idx_join_cols = list(self.lhs.index._data.values()) for i, name in enumerate(self.left_on): l_data_join_cols[name] = self.lhs._data[name] r_data_join_cols[name] = list(self.rhs.index._data.values())[i] elif self.left_index and self.right_on: # see above l_idx_join_cols = list(self.rhs.index._data.values()) r_idx_join_cols = list(self.rhs.index._data.values()) for i, name in enumerate(self.right_on): l_data_join_cols[name] = list(self.lhs.index._data.values())[i] r_data_join_cols[name] = self.rhs._data[name] if self.left_on and self.right_on: l_data_join_cols = self.lhs._data r_data_join_cols = self.rhs._data if self.left_index or self.right_index: for i in range(len(self.lhs.index._data.items())): index_dtypes[i] = self.libcudf_to_output_casting_rules( l_idx_join_cols[i], r_idx_join_cols[i], self.how ) for name in itertools.chain(self.left_on, self.right_on): if name in self.left_on and name in self.right_on: data_dtypes[name] = self.libcudf_to_output_casting_rules( l_data_join_cols[name], r_data_join_cols[name], self.how ) return (index_dtypes, data_dtypes) def typecast_libcudf_to_output(self, output, output_dtypes): """ Apply precomputed output index and data column data types to the output of a libcudf join. """ index_dtypes, data_dtypes = output_dtypes if output._index and len(index_dtypes) > 0: for index_dtype, index_col_lbl, index_col in zip( index_dtypes.values(), output._index._data.keys(), output._index._data.values(), ): if index_dtype: output._index._data[ index_col_lbl ] = self._build_output_col(index_col, index_dtype) for data_col_lbl, data_col in output._data.items(): data_dtype = data_dtypes[data_col_lbl] if data_dtype: output._data[data_col_lbl] = self._build_output_col( data_col, data_dtype ) return output def _build_output_col(self, col, dtype): if isinstance( dtype, (cudf.core.dtypes.CategoricalDtype, pd.CategoricalDtype) ): outcol = cudf.core.column.build_categorical_column( categories=dtype.categories, codes=col.set_mask(None), mask=col.base_mask, ordered=dtype.ordered, ) else: outcol = col.astype(dtype) return outcol
{"hexsha": "0aadcf875cb647b8e11674403fb9c7759ffa1b41", "size": 19468, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/cudf/cudf/core/join/join.py", "max_stars_repo_name": "isVoid/cudf", "max_stars_repo_head_hexsha": "1a3b3f217be93a55b47af3a9d0da29f0fcb7c7e9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-08T21:46:50.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-08T21:46:50.000Z", "max_issues_repo_path": "python/cudf/cudf/core/join/join.py", "max_issues_repo_name": "isVoid/cudf", "max_issues_repo_head_hexsha": "1a3b3f217be93a55b47af3a9d0da29f0fcb7c7e9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-02-01T14:20:00.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-01T14:20:00.000Z", "max_forks_repo_path": "python/cudf/cudf/core/join/join.py", "max_forks_repo_name": "isVoid/cudf", "max_forks_repo_head_hexsha": "1a3b3f217be93a55b47af3a9d0da29f0fcb7c7e9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2532588454, "max_line_length": 79, "alphanum_fraction": 0.5483357304, "include": true, "reason": "import numpy", "num_tokens": 4186}
function nlp3(oct::Bool = false) m = JuMP.Model() @variable(m, 0 <= x[1:10]) for i = 1:8 JuMP.set_lower_bound(x[[1, 2, 4, 6, 7, 8, 9, 10][i]], [1,1,1,85,90,3,1.2,145][i]) end for i = 1:10 JuMP.set_upper_bound(x[i], [2000, 16000, 120, 5000, 2000, 93, 95, 12, 4, 162][i]) JuMP.set_start_value(x[i], [1724.90452208, 16000, 98.0900813608, 3049.1211364, 1995.02326433, 90.718089075, 94.2274481766, 10.432474977, 2.59051951438, 149.682344530][i]) end @constraint(m, e1, x[1] - 1.22*x[4] + x[5] == 0) @constraint(m, e2, x[9] + 0.222*x[10] == 35.82) @constraint(m, e3, 3*x[7] - x[10] == 133) if !oct @NLconstraint(m, e4, x[7] - 1.098*x[8] + 0.038*(x[8]^2) - 0.325*(x[6] - 89) == 86.35) @NLconstraint(m, e5, x[4]*x[9]*x[6] + 1000*x[3]*x[6] - 98000*x[3] == 0) @NLconstraint(m, e6, x[2] + x[5] - x[1]*x[8] == 0) @NLconstraint(m, e7, 1.12*x[1] + 0.13167*x[8]*x[1] - 0.00667*(x[8]^2)*x[1] - x[4] >= 0) @NLobjective(m, Min, 5.04*x[1] + 0.035*x[2] + 10*x[3] + 3.36*x[5] - 0.063*x[4]*x[7]) return m else @variable(m, obj) @objective(m, Min, obj) gm = GlobalModel(model = m, name = "nlp3") add_nonlinear_constraint(gm, :(x -> x[7] - 1.098*x[8] + 0.038*(x[8]^2) - 0.325*(x[6] - 89) - 86.35), vars = [x[6], x[7], x[8]], name = "e4", equality=true) add_nonlinear_constraint(gm, :(x -> x[4]*x[9]*x[6] + 1000*x[3]*x[6] - 98000*x[3]), vars = [x[3], x[4], x[6], x[9]], name = "e5", equality = true) add_nonlinear_constraint(gm, :(x -> x[2] + x[5] - x[1]*x[8]), vars = [x[1], x[2], x[5], x[8]], name = "e6", equality = true) add_nonlinear_constraint(gm, :(x -> 1.12*x[1] + 0.13167*x[8]*x[1] - 0.00667*(x[8]^2)*x[1] - x[4]), vars = [x[1], x[4], x[8]], name = "e7") add_nonlinear_constraint(gm, :(x -> 5.04*x[1] + 0.035*x[2] + 10*x[3] + 3.36*x[5] - 0.063*x[4]*x[7]), vars = [x[1], x[2], x[3], x[4], x[5], x[7]], dependent_var = obj, name = "obj") return gm end end
{"hexsha": "973a558ad1b7f87491d956b5ccc06958af7f3052", "size": 2470, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "data/baron/nlp3.jl", "max_stars_repo_name": "1ozturkbe/OCTHaGOn.jl", "max_stars_repo_head_hexsha": "222a73c8da834c8e4114b6f29492d8ab917f6722", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/baron/nlp3.jl", "max_issues_repo_name": "1ozturkbe/OCTHaGOn.jl", "max_issues_repo_head_hexsha": "222a73c8da834c8e4114b6f29492d8ab917f6722", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2022-02-13T15:33:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T15:45:11.000Z", "max_forks_repo_path": "data/baron/nlp3.jl", "max_forks_repo_name": "1ozturkbe/OCTHaGOn.jl", "max_forks_repo_head_hexsha": "222a73c8da834c8e4114b6f29492d8ab917f6722", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 53.6956521739, "max_line_length": 109, "alphanum_fraction": 0.4174089069, "num_tokens": 1018}
module Subst -- Substitution inside untyped lambda calculus terms. import Term %default total %access public export shift : (cutoff : Nat) -> (distance : Nat) -> Nat -> Nat shift Z distance k = distance+k shift (S c) distance Z = Z shift (S c) distance (S k) = S $ shift c distance k shiftTerm : (cutoff : Nat) -> (distance : Nat) -> Term -> Term -- shiftTerm c d (TVar i) = TVar (shift c d i) -- shiftTerm c d (TAbs e) = TAbs $ shiftTerm (S c) d e -- shiftTerm c d (TApp e1 e2) = let e1' = shiftTerm c d e1 e2' = shiftTerm c d e2 in TApp e1' e2' -- shiftTerm c d (TRec e1 e2 e3) = let e1' = shiftTerm c d e1 e2' = shiftTerm c d e2 e3' = shiftTerm c d e3 in TRec e1' e2' e3' -- shiftTerm c d TZero = TZero -- shiftTerm c d (TSucc e) = TSucc $ shiftTerm c d e -- shiftTerm c d (TPred e) = TPred $ shiftTerm c d e -- shiftTerm c d (TIfz e1 e2 e3) = let e1' = shiftTerm c d e1 e2' = shiftTerm c d e2 e3' = shiftTerm c d e3 in TIfz e1' e2' e3' subst_var : Term -> -- Term that is subtituted in. (i : Nat) -> -- The i-th variable (Var i) is substituted for. (j : Nat) -> -- Substitution takes place inside the term (Var j). Term subst_var ts Z Z = ts subst_var ts Z (S j) = TVar j subst_var ts (S i) Z = TVar Z subst_var ts (S i) (S j) = shiftTerm Z (S Z) $ subst_var ts i j subst : Term -> -- Term that is substituted in. (i : Nat) -> -- The i-th varibale (Var i) is substituted for. Term -> -- Substitution takes place inside this term. Term subst ts i (TVar j) = subst_var ts i j subst ts i (TAbs e) = TAbs $ subst ts (S i) e subst ts i (TApp e1 e2) = TApp (subst ts i e1) (subst ts i e2) subst ts i (TRec e1 e2 e3) = TRec (subst ts i e1) (subst ts i e2) (subst ts i e3) subst _ _ TZero = TZero subst ts i (TSucc e) = TSucc (subst ts i e) subst ts i (TPred e) = TPred (subst ts i e) subst ts i (TIfz e1 e2 e3) = TIfz (subst ts i e1) (subst ts i e2) (subst ts i e3)
{"hexsha": "9bfba2eb8cfef41af74ce7466dee3bcc6d85f0c6", "size": 2294, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "totality/src/Subst.idr", "max_stars_repo_name": "normanrink/PCF", "max_stars_repo_head_hexsha": "6b817263fc7d64f0ed0e5535261814d572292192", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "totality/src/Subst.idr", "max_issues_repo_name": "normanrink/PCF", "max_issues_repo_head_hexsha": "6b817263fc7d64f0ed0e5535261814d572292192", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "totality/src/Subst.idr", "max_forks_repo_name": "normanrink/PCF", "max_forks_repo_head_hexsha": "6b817263fc7d64f0ed0e5535261814d572292192", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3095238095, "max_line_length": 78, "alphanum_fraction": 0.5265911072, "num_tokens": 756}
"""Utilities for training a GP fed from the MEGNet Concatenation layer for a pretrained model.""" from pathlib import Path from typing import Dict, Iterator, List, Optional, Tuple, Union import numpy as np import tensorflow as tf import tensorflow.python.util.deprecation as deprecation import tensorflow_probability as tfp from tqdm import tqdm from ..datalib.metrics import MetricAnalyser deprecation._PRINT_DEPRECATION_WARNINGS = False tfd = tfp.distributions tfk = tfp.math.psd_kernels def convert_index_points(array: np.ndarray) -> tf.Tensor: """Convert an array into a tensor appropriate for GP index points. Args: array (:obj:`np.ndarray`): The array to extend. Returns: tensor (:obj:`tf.Tensor`): The converted Tensor. """ return tf.constant(array, dtype=tf.float64) def get_default_kernel() -> tfp.math.psd_kernels.ExponentiatedQuadratic: """Get a default kernel.""" amplitude = tf.Variable(1.0, trainable=True, dtype=tf.float64, name="amplitude") length_scale = tf.Variable( 1.0, trainable=True, dtype=tf.float64, name="length_scale" ) return tfk.ExponentiatedQuadratic( amplitude=amplitude, length_scale=length_scale, ) class GPTrainer(tf.Module): """Class for training hyperparameters for GP kernels. Args: observation_index_points (:obj:`tf.Tensor`): The observed index points (`x` values). observations (:obj:`tf.Tensor`): The observed samples (`y` values). checkpoint_dir (str or :obj:`Path`, optional): The directory to check for checkpoints and to save checkpoints to. kernel: The kernel to use. Must be instantiated with trainable parameters. Defaults to a radial basis function. Attributes: observation_index_points (:obj:`tf.Tensor`): The observed index points (`x` values). observations (:obj:`tf.Tensor`): The observed samples (`y` values). checkpoint_dir (str or :obj:`Path`, optional): The directory to check for checkpoints and to save checkpoints to. trainable_vars: The trainable variables (parameters) of the kernel as a dictionary. The keys are the variables' names, stripped of the colon. kernel (:obj:`tf.Tensor`): The kernel to use for the Gaussian process. optimizer (:obj:`Optimizer`): The optimizer to use for determining :attr:`trainable_vars`. training_steps (:obj:`tf.Tensor`): The current number of training epochs executed. loss (:obj:`tf.Tensor`): The current loss on the training data (A negative log likelihood). metrics (dict): Contains metric names and values. Default to `np.nan` when uncalculated. ckpt (:obj:`Checkpoint`, optional): A tensorflow training checkpoint. Defaults to `None` if `checkpoint_dir` is not passed. ckpt_manager (:obj:`CheckpointManager`, optional): A checkpoint manager, used to save :attr:`ckpt` to file. Defaults to `None` if `checkpoint_dir` is not passed. gp_prior (:obj:`GaussianProcess`): A Gaussian process using :attr:`kernel` and using :attr:`observation_index_points` as indices. """ def __init__( self, observation_index_points: tf.Tensor, observations: tf.Tensor, checkpoint_dir: Optional[Union[str, Path]] = None, kernel: Optional[tfp.math.psd_kernels.PositiveSemidefiniteKernel] = None, ): """Initialze attributes, kernel, optimizer and checkpoint manager.""" self.observation_index_points = tf.Variable( observation_index_points, dtype=tf.float64, trainable=False, name="observation_index_points", ) self.observations = tf.Variable( observations, dtype=tf.float64, trainable=False, name="observations", ) if kernel is None: self.kernel: tfp.math.psd_kernels.PositiveSemidefiniteKernel = ( get_default_kernel() ) else: self.kernel = kernel self.trainable_vars: Dict[str, tf.Variable] = { var.name.strip(":"): var for var in self.kernel.trainable_variables } self.optimizer = tf.optimizers.Adam() self.training_steps = tf.Variable( 0, dtype=tf.int32, trainable=False, name="training_steps" ) self.loss = tf.Variable( np.nan, dtype=tf.float64, trainable=False, name="training_nll", ) self.metrics = { "nll": tf.Variable( np.nan, dtype=tf.float64, trainable=False, name="validation_nll", ), "mae": tf.Variable( np.nan, dtype=tf.float64, trainable=False, name="validation_mae", ), "sharpness": tf.Variable( np.nan, dtype=tf.float64, trainable=False, name="validation_sharpness", ), "variation": tf.Variable( np.nan, dtype=tf.float64, trainable=False, name="validation_coeff_variance", ), "calibration_err": tf.Variable( np.nan, dtype=tf.float64, trainable=False, name="validation_calibration_error", ), } if checkpoint_dir: self.ckpt = tf.train.Checkpoint( step=self.training_steps, loss=self.loss, val_nll=self.metrics["nll"], val_mae=self.metrics["mae"], val_sharpness=self.metrics["sharpness"], val_coeff_var=self.metrics["variation"], val_cal_err=self.metrics["calibration_err"], **self.trainable_vars, ) self.ckpt_manager = tf.train.CheckpointManager( self.ckpt, checkpoint_dir, max_to_keep=1, step_counter=self.training_steps, ) self.ckpt.restore(self.ckpt_manager.latest_checkpoint) if self.ckpt_manager.latest_checkpoint: print(f"Restored from {self.ckpt_manager.latest_checkpoint}") else: print("No checkpoints found.") else: self.ckpt = None self.ckpt_manager = None self.gp_prior = tfd.GaussianProcess(self.kernel, self.observation_index_points) @staticmethod def load_model(model_dir: str): """Load a `GPTrainer` model from a file. Args: model_dir (str): The directory to import the model from. Returns: The model as a TensorFlow AutoTrackable object. """ return tf.saved_model.load(model_dir) def get_model( self, index_points: tf.Tensor ) -> tfp.python.distributions.GaussianProcessRegressionModel: """Get a regression model for a set of index points. Args: index_points (:obj:`tf.Tensor`): The index points to fit regression model. Returns: gprm (:obj:`GaussianProcessRegressionModel`): The regression model. """ return tfd.GaussianProcessRegressionModel( kernel=self.kernel, index_points=index_points, observation_index_points=self.observation_index_points, observations=self.observations, ) @tf.function(input_signature=[tf.TensorSpec(None, tf.float64)]) def predict(self, points: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: """Predict targets and the standard deviation of the distribution. Args: points (:obj:`tf.Tensor`): The points (`x` values) to make predictions with. Returns: mean (:obj:`tf.Tensor`): The mean of the distribution at each point. stddev (:obj:`tf.Tensor`): The standard deviation of the distribution at each point. """ gprm = self.get_model(points) return gprm.mean(), gprm.stddev() def train_model( self, val_points: tf.Tensor, val_obs: tf.Tensor, epochs: int = 1000, patience: Optional[int] = None, save_dir: Optional[Union[str, Path]] = None, metrics: List[str] = [], ) -> Iterator[Dict[str, float]]: """Optimize model parameters. Args: val_points (:obj:`tf.Tensor`): The validation points. val_obs (:obj:`tf.Tensor`): The validation targets. epochs (int): The number of training epochs. patience (int, optional): The number of epochs after which to stop training if no improvement is seen on the loss of the validation data. save_dir (str or :obj:`Path`, optional): Where to save the model. metrics (list of str): A list of valid metrics to calculate. Possible valid metrics are given in :class:`GPMetrics`. Yields: metrics (dict of str: float): A dictionary of the metrics after the last training epoch. """ best_val_nll: float = self.metrics["nll"].numpy() if np.isnan(best_val_nll): # Set to infinity so < logic works best_val_nll = np.inf if (self.ckpt_manager or patience) and "nll" not in metrics: # We need to track NLL for these to work metrics.append("nll") steps_since_improvement: int = 1 gp_metrics = MetricAnalyser(val_points, val_obs, self.get_model(val_points)) for i in tqdm(range(epochs), "Training epochs"): self.loss.assign(self.optimize_cycle()) self.training_steps.assign_add(1) # * Determine and assign metrics if gp_metrics.REQUIRES_MEAN.intersection(metrics): gp_metrics.update_mean() if gp_metrics.REQUIRES_STDDEV.intersection(metrics): gp_metrics.update_stddevs() try: metric_dict: Dict[str, float] = { metric: getattr(gp_metrics, metric) for metric in metrics } except AttributeError as e: raise ValueError(f"Invalid metric: {e}") for metric, value in metric_dict.items(): self.metrics[metric].assign(value) metric_dict["loss"] = self.loss.numpy() yield metric_dict if patience or self.ckpt_manager: if self.metrics["nll"] < best_val_nll: best_val_nll = self.metrics["nll"].numpy() steps_since_improvement = 1 if self.ckpt_manager: self.ckpt_manager.save(self.training_steps) else: steps_since_improvement += 1 if patience and steps_since_improvement >= patience: print( "Patience exceeded: " f"{steps_since_improvement} steps since NLL improvement." ) break if save_dir: tf.saved_model.save(self, save_dir) @tf.function def optimize_cycle(self) -> tf.Tensor: """Perform one training step. Returns: loss (:obj:`Tensor`): A Tensor containing the negative log probability loss. """ with tf.GradientTape() as tape: loss = -self.gp_prior.log_prob(self.observations) grads = tape.gradient(loss, self.trainable_variables) self.optimizer.apply_gradients(zip(grads, self.trainable_variables)) return loss
{"hexsha": "5ef231fca4cf3dd232cd2b514d15c15f0f6344eb", "size": 11965, "ext": "py", "lang": "Python", "max_stars_repo_path": "unlockgnn/gp/gp_trainer.py", "max_stars_repo_name": "CalvinCYY/unlockGNN", "max_stars_repo_head_hexsha": "f620f12f2ba823b64895e6c7a6d435320223eb06", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "unlockgnn/gp/gp_trainer.py", "max_issues_repo_name": "CalvinCYY/unlockGNN", "max_issues_repo_head_hexsha": "f620f12f2ba823b64895e6c7a6d435320223eb06", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "unlockgnn/gp/gp_trainer.py", "max_forks_repo_name": "CalvinCYY/unlockGNN", "max_forks_repo_head_hexsha": "f620f12f2ba823b64895e6c7a6d435320223eb06", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0391566265, "max_line_length": 97, "alphanum_fraction": 0.5865440869, "include": true, "reason": "import numpy", "num_tokens": 2413}
import numpy as np import matplotlib.pyplot as plt def get_data(num_x, num_k, x_mean, y_mean, sigma): num = int(num_k * num_x) X = np.zeros(num) Y = np.zeros(num) for i in range(num_k): for j in range(num_x): X[i * num_x + j] = np.random.normal(x_mean[i], sigma[i]) Y[i * num_x + j] = np.random.normal(y_mean[i], sigma[i]) X = X[:, np.newaxis] Y = Y[:, np.newaxis] return X, Y def get_blob(num_x, num_k, miu, sigma): S = np.mat(sigma[0]) R = np.linalg.cholesky(S) x = np.dot(np.random.randn(num_x, 2), R) + np.ones((num_x, 1)).dot(miu[0]) for i in range(num_k - 1): S = np.mat(sigma[i + 1]) R = np.linalg.cholesky(S) x = np.append(x, np.dot(np.random.randn(num_x, 2), R) + np.ones((num_x, 1)).dot(miu[i + 1]), axis=0) return np.array(x[:, 0]), np.array(x[:, 1])
{"hexsha": "952d360bde73de606c9e272689888d02b0c3380b", "size": 866, "ext": "py", "lang": "Python", "max_stars_repo_path": "GMM/makedata.py", "max_stars_repo_name": "MartrixG/machine-learning", "max_stars_repo_head_hexsha": "10f1b4aa2c723023a0b6d5c7904654b13f1bdaa8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "GMM/makedata.py", "max_issues_repo_name": "MartrixG/machine-learning", "max_issues_repo_head_hexsha": "10f1b4aa2c723023a0b6d5c7904654b13f1bdaa8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "GMM/makedata.py", "max_forks_repo_name": "MartrixG/machine-learning", "max_forks_repo_head_hexsha": "10f1b4aa2c723023a0b6d5c7904654b13f1bdaa8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.652173913, "max_line_length": 108, "alphanum_fraction": 0.5612009238, "include": true, "reason": "import numpy", "num_tokens": 287}
% Codes for CVPR-15 work `Face Alignment by Coarse-to-Fine Shape Searching' % Any question please contact Shizhan Zhu: zhshzhutah2@gmail.com % Released on July 25, 2015 function T = getTransViaRotateGivenCenter(theta_vector,center,rotatorLength) %T = getTransViaRotateGivenCenter(theta_vector,win_size) % T: m*1 t_concord % theta_vector: m*1 in degree! Make sure abs(theta)<180 % center: 1*2 indicates the x-y coordinates of rotation center % rotatorLength: scalar, length of the ratator. Set this variable to % avoid numerical problems. warning('This function might get non-affine tform'); assert(size(theta_vector,1)==1 || size(theta_vector,2)==1); assert(size(center,1)==1 || size(theta_vector,2)==1); assert(length(center)==2); assert(length(rotatorLength)==1); theta_vector = theta_vector(:); assert(all(abs(theta_vector) < 180)); pose_src = zeros(length(theta_vector),4); pose_src(:,[1 3]) = repmat(center(:)',length(theta_vector),1); pose_src(:,2) = center(1) + rotatorLength * cosd(theta_vector); pose_src(:,4) = center(2) + rotatorLength * sind(theta_vector); pose_dst = [center(1) center(1)+rotatorLength center(2) center(2)]; T = getTransToSpecific(pose_src,pose_dst); end
{"author": "zhusz", "repo": "CVPR15-CFSS", "sha": "11b8d0b28a4a3e954741a4dae2f114df7b644d4e", "save_path": "github-repos/MATLAB/zhusz-CVPR15-CFSS", "path": "github-repos/MATLAB/zhusz-CVPR15-CFSS/CVPR15-CFSS-11b8d0b28a4a3e954741a4dae2f114df7b644d4e/codes_release/trans/getTransViaRotateGivenCenter.m"}
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // // Copyright (c) 2006-2018, Knut Reinert, FU Berlin // Copyright (c) 2016-2018, Knut Reinert & MPI Molekulare Genetik // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== #include <benchmark/benchmark.h> #include <cstring> #include <sstream> #include <string_view> #include <seqan3/std/charconv> // ----------------------------------------------------------------------------- // from_char for integral types // ----------------------------------------------------------------------------- char const * str = "122"; template <typename arithmetic_type> static void from_char(benchmark::State & state) { arithmetic_type val{}; size_t sum{}; for (auto _ : state) { std::from_chars(&str[0], &str[0] + sizeof(str), val); sum += val; } // prevent complete optimisation [[maybe_unused]] volatile auto fin = sum; } BENCHMARK_TEMPLATE(from_char, int8_t); BENCHMARK_TEMPLATE(from_char, uint8_t); BENCHMARK_TEMPLATE(from_char, int16_t); BENCHMARK_TEMPLATE(from_char, uint16_t); BENCHMARK_TEMPLATE(from_char, int32_t); BENCHMARK_TEMPLATE(from_char, uint32_t); BENCHMARK_TEMPLATE(from_char, int64_t); BENCHMARK_TEMPLATE(from_char, uint64_t); template <typename arithmetic_type> static void from_stream(benchmark::State & state) { arithmetic_type val{}; size_t sum{}; for (auto _ : state) { std::stringstream ss; ss << str; ss >> val; sum += val; } // prevent complete optimisation [[maybe_unused]] volatile auto fin = sum; } BENCHMARK_TEMPLATE(from_stream, int8_t); BENCHMARK_TEMPLATE(from_stream, uint8_t); BENCHMARK_TEMPLATE(from_stream, int16_t); BENCHMARK_TEMPLATE(from_stream, uint16_t); BENCHMARK_TEMPLATE(from_stream, int32_t); BENCHMARK_TEMPLATE(from_stream, uint32_t); BENCHMARK_TEMPLATE(from_stream, int64_t); BENCHMARK_TEMPLATE(from_stream, uint64_t); template <typename arithmetic_type> static void from_atol(benchmark::State & state) { arithmetic_type val{}; size_t sum{}; for (auto _ : state) { val = atol(str); sum += val; } // prevent complete optimisation [[maybe_unused]] volatile auto fin = sum; } BENCHMARK_TEMPLATE(from_atol, int8_t); BENCHMARK_TEMPLATE(from_atol, uint8_t); BENCHMARK_TEMPLATE(from_atol, int16_t); BENCHMARK_TEMPLATE(from_atol, uint16_t); BENCHMARK_TEMPLATE(from_atol, int32_t); BENCHMARK_TEMPLATE(from_atol, uint32_t); BENCHMARK_TEMPLATE(from_atol, int64_t); BENCHMARK_TEMPLATE(from_atol, uint64_t); #if __has_include(<boost/spirit/include/qi.hpp>) #include <boost/spirit/include/qi.hpp> template <typename arithmetic_type> static void from_boost(benchmark::State & state) { arithmetic_type val{}; size_t sum{}; for (auto _ : state) { auto it = &str[0]; boost::spirit::qi::phrase_parse(it, &str[0] + sizeof(str), boost::spirit::qi::int_, boost::spirit::ascii::space, val); sum += val; } // prevent complete optimisation [[maybe_unused]] volatile auto fin = sum; } BENCHMARK_TEMPLATE(from_boost, int8_t); BENCHMARK_TEMPLATE(from_boost, uint8_t); BENCHMARK_TEMPLATE(from_boost, int16_t); BENCHMARK_TEMPLATE(from_boost, uint16_t); BENCHMARK_TEMPLATE(from_boost, int32_t); BENCHMARK_TEMPLATE(from_boost, uint32_t); BENCHMARK_TEMPLATE(from_boost, int64_t); BENCHMARK_TEMPLATE(from_boost, uint64_t); #endif // __has_include(<boost/spirit/include/qi.hpp>) // ----------------------------------------------------------------------------- // from_char for floating point types // ----------------------------------------------------------------------------- char const * str_float = "122.45e-2"; template <typename arithmetic_type> static void from_chars_to_float(benchmark::State & state) { arithmetic_type val{}; size_t sum{}; for (auto _ : state) { std::from_chars(&str_float[0], &str_float[0] + sizeof(str_float), val); sum += val; } // prevent complete optimisation [[maybe_unused]] volatile auto fin = sum; } template <typename arithmetic_type> static void from_stream_to_float(benchmark::State & state) { arithmetic_type val{}; size_t sum{}; for (auto _ : state) { std::stringstream ss; ss << str_float; ss >> val; sum += val; } // prevent complete optimisation [[maybe_unused]] volatile auto fin = sum; } BENCHMARK_TEMPLATE(from_chars_to_float, float); BENCHMARK_TEMPLATE(from_chars_to_float, double); BENCHMARK_TEMPLATE(from_stream_to_float, float); BENCHMARK_TEMPLATE(from_stream_to_float, double); BENCHMARK_MAIN();
{"hexsha": "b33f3ed16a2a19b01c743824acbd9048bd714811", "size": 6429, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/performance/std/charconv_from_chars_benchmark.cpp", "max_stars_repo_name": "FirstLoveLife/seqan3", "max_stars_repo_head_hexsha": "ac2e983e0a576515c13ebb2c851c43c1eba1ece1", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/performance/std/charconv_from_chars_benchmark.cpp", "max_issues_repo_name": "FirstLoveLife/seqan3", "max_issues_repo_head_hexsha": "ac2e983e0a576515c13ebb2c851c43c1eba1ece1", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/performance/std/charconv_from_chars_benchmark.cpp", "max_forks_repo_name": "FirstLoveLife/seqan3", "max_forks_repo_head_hexsha": "ac2e983e0a576515c13ebb2c851c43c1eba1ece1", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6699507389, "max_line_length": 99, "alphanum_fraction": 0.6546896874, "num_tokens": 1394}
import os from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier from sklearn.model_selection import RandomizedSearchCV from sklearn.metrics import confusion_matrix from sklearn.pipeline import Pipeline from sklearn.model_selection import train_test_split import numpy as np import pandas as pd import pickle CURR_PATH = os.path.dirname(os.path.realpath(__file__)) def get_random_grid() -> dict: # Parameter values to use in Randomized Search CV return { "n_estimators": [300, 450, 600, 900, 1200], "min_samples_split": [3, 5, 9, 15], "min_samples_leaf": [1, 3], "max_features": ["sqrt"], "max_depth": [25, 50, 100], } def get_pipeline() -> Pipeline: """ Get TF-IDF to Random Forest pipeline. NOTE: Use of 10 fold cross validation along with basic search of parameter space. :return: a sklearn pipeline """ vectorizer = TfidfVectorizer(analyzer="word", stop_words="english", strip_accents="ascii", ngram_range=(1,2)) classifier = RandomizedSearchCV( estimator=RandomForestClassifier(random_state=42), param_distributions=get_random_grid(), n_iter=10, # 10 parameter random samples from grid cv=10, # 10 fold cross validation verbose=2, random_state=42, n_jobs=-1) pipe = Pipeline([('tfidf', vectorizer), ('random_forest', classifier)]) return pipe class SocClassifier: """ The SOC Occupation Classifier class. For creation of models and basic analysis.""" def __init__(self, y_var): """Initialize model and fit based on chosen variable for classification.""" self.y_var = y_var self.data = pd.read_csv(os.path.join(CURR_PATH, '../data/modified_data.csv')) self.model = get_pipeline() self.fit() def fit(self): """Fit model using pipeline.""" self.model.fit(self.data['job_title'], self.data[self.y_var]) def accuracy(self): """Return the accuracy of prediction.""" prediction = self.model.predict(self.data['job_title']) return sum(self.data[self.y_var] == prediction) / len(prediction) def confusion_matrix(self) -> pd.DataFrame: """Return the confusion matrix.""" prediction = self.model.predict(self.data['job_title']) conf_mat = confusion_matrix(self.data[self.y_var], prediction) return pd.DataFrame(conf_mat) def best_params_(self) -> dict: """Return best parameters from RandomizedSearchCV.""" return self.model['random_forest'].best_params_ def pickle_pipeline(self, filename): """Pickle the model for storage.""" pickle.dump(self.model, open(filename, 'wb'))
{"hexsha": "3622fed4029894a0a8877cb4100297b0f91cae85", "size": 2782, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/soc_classifier.py", "max_stars_repo_name": "caseyhartnett/SOC_classifier", "max_stars_repo_head_hexsha": "cc293dabf35d58c1f62ba04f163c9b199483dc77", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-16T10:00:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-16T10:00:57.000Z", "max_issues_repo_path": "src/soc_classifier.py", "max_issues_repo_name": "caseyhartnett/SOC_classifier", "max_issues_repo_head_hexsha": "cc293dabf35d58c1f62ba04f163c9b199483dc77", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/soc_classifier.py", "max_forks_repo_name": "caseyhartnett/SOC_classifier", "max_forks_repo_head_hexsha": "cc293dabf35d58c1f62ba04f163c9b199483dc77", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0933333333, "max_line_length": 113, "alphanum_fraction": 0.6800862689, "include": true, "reason": "import numpy", "num_tokens": 621}
[STATEMENT] lemma lunstream_simps: "g s = Done \<Longrightarrow> lunstream s = LNil" "g s = Skip s' \<Longrightarrow> lunstream s = lunstream s'" "g s = Yield x s' \<Longrightarrow> lunstream s = LCons x (lunstream s')" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (g s = Done \<Longrightarrow> local.lunstream s = LNil) &&& (g s = Skip s' \<Longrightarrow> local.lunstream s = local.lunstream s') &&& (g s = Yield x s' \<Longrightarrow> local.lunstream s = LCons x (local.lunstream s')) [PROOF STEP] by(simp_all add: lunstream.simps)
{"llama_tokens": 201, "file": "Stream_Fusion_Code_Stream_Fusion_LList", "length": 1}
!* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. !* !* Licensed under the Apache License, Version 2.0 (the "License"); !* you may not use this file except in compliance with the License. !* You may obtain a copy of the License at !* !* http://www.apache.org/licenses/LICENSE-2.0 !* !* Unless required by applicable law or agreed to in writing, software !* distributed under the License is distributed on an "AS IS" BASIS, !* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. !* See the License for the specific language governing permissions and !* limitations under the License. subroutine testCase8_icv use omp_lib !$omp parallel !$omp task !$omp parallel PRINT *, omp_get_max_threads() !$omp end parallel !$omp end task !$omp end parallel end subroutine program fortran_omp_task call testcase8_icv print *, "PASS" end program fortran_omp_task
{"hexsha": "1b2e9138e705bb96b38f9beb8d0aedd01501f673", "size": 958, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "test/mp_correct/src/src/nv2041349.f90", "max_stars_repo_name": "kammerdienerb/flang", "max_stars_repo_head_hexsha": "8cc4a02b94713750f09fe6b756d33daced0b4a74", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-12-11T17:43:58.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-11T17:43:58.000Z", "max_issues_repo_path": "test/mp_correct/src/src/nv2041349.f90", "max_issues_repo_name": "kammerdienerb/flang", "max_issues_repo_head_hexsha": "8cc4a02b94713750f09fe6b756d33daced0b4a74", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-12-29T21:15:40.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-15T11:21:10.000Z", "max_forks_repo_path": "test/mp_correct/src/src/nv2041349.f90", "max_forks_repo_name": "kammerdienerb/flang", "max_forks_repo_head_hexsha": "8cc4a02b94713750f09fe6b756d33daced0b4a74", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2019-12-21T06:35:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-06-07T23:18:58.000Z", "avg_line_length": 33.0344827586, "max_line_length": 75, "alphanum_fraction": 0.7056367432, "num_tokens": 222}
from base.base_predictor import BasePredictor import os import numpy as np class WavClassifyPredictor(BasePredictor): def __init__(self, model, data, config): super(WavClassifyPredictor, self).__init__(model, data, config) def predict(self): class_list = ['baby cry', 'siren', 'etc'] self.model.load_weights(self.config.predictor.predict_model) self.model.compile(optimizer=self.config.model.optimizer, loss='categorical_crossentropy', metrics=['acc']) print("-----------predict Result-------------------") predict_result = self.model.predict(x = self.data) print(predict_result) print(class_list[np.argmax(predict_result)])
{"hexsha": "a5fa182ba1afcbd579273178c8cc196632666ac0", "size": 757, "ext": "py", "lang": "Python", "max_stars_repo_path": "predictors/wav_classify_predictor.py", "max_stars_repo_name": "dizwe/VibrationFromWarningForHearingImpaired_ML", "max_stars_repo_head_hexsha": "d585fbf63fb47d27f35ca4025fdb952e4bd50574", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-10T06:17:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-10T06:17:37.000Z", "max_issues_repo_path": "predictors/wav_classify_predictor.py", "max_issues_repo_name": "dizwe/VibrationFromWarningForHearingImpaired_ML", "max_issues_repo_head_hexsha": "d585fbf63fb47d27f35ca4025fdb952e4bd50574", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-09-25T22:07:43.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T01:58:28.000Z", "max_forks_repo_path": "predictors/wav_classify_predictor.py", "max_forks_repo_name": "dizwe/BabyCry_Siren_classify", "max_forks_repo_head_hexsha": "d585fbf63fb47d27f35ca4025fdb952e4bd50574", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.0476190476, "max_line_length": 71, "alphanum_fraction": 0.6287978864, "include": true, "reason": "import numpy", "num_tokens": 152}
from bs4 import BeautifulSoup as BS from selenium import webdriver from functools import reduce import pandas as pd import time import matplotlib.pyplot as plt from selenium.webdriver.firefox.options import Options import numpy as np options = Options() options.add_argument('--headless') def render_page(url): driver = webdriver.Firefox(options=options) driver.get(url) time.sleep(1) r = driver.page_source driver.quit() return r class WUscrape: def __init__(self, city, year): self.city = city self.year = year def city_find(self): if self.city == 'Zagreb': string = 'LDZA' elif self.city == 'Osijek': string = 'LDOS' elif self.city == 'Split': string = 'LDSP' elif self.city == 'Rijeka': string = 'LDRI' elif self.city == 'Dubrovnik': string = 'LDDU' else: print('City is not in the database!') return return string def scrape(self, city_string): page = 'https://www.wunderground.com/history/daily/' + city_string + '/date/' temp = np.array([]) year_str = str(self.year) for m in range(12): m += 1 m_str = f'-{m}-' d31 = [1, 3, 5, 7, 8, 10, 12] d30 = [4, 6, 9, 11] t_month = [] d = 0 if m in d31: while d < 31: d += 1 d_str = f'{d}' url = page + year_str + m_str + d_str r = render_page(url) try: soup = BS(r, "html.parser") container = soup.find('table', class_='mat-table cdk-table mat-sort ng-star-inserted') container = container.find_all('span', class_='test-true wu-unit wu-unit-temperature is-degree-visible ng-star-inserted') for i in range(len(container)): if i%2 != 0: check = container[i].find('span', class_='wu-value wu-value-to') t_month.append((float(check.text) - 32) * 5 / 9) print(f'Done for: month {m} - day {d}') #print(f'Number of elements: {len(t_month)}') except: d = d-1 print(f'Reruning for: month {m} - day {d+1}') temp = np.append(temp, t_month) #plt.plot(temp) #plt.show() elif m in d30: while d < 30: d += 1 d_str = f'{d}' url = page + year_str + m_str + d_str r = render_page(url) try: soup = BS(r, "html.parser") container = soup.find('table', class_='mat-table cdk-table mat-sort ng-star-inserted') container = container.find_all('span', class_='test-true wu-unit wu-unit-temperature is-degree-visible ng-star-inserted') for i in range(len(container)): check = container[i].find('span', class_='wu-value wu-value-to') t_month.append((float(check.text) - 32) * 5 / 9) print(f'Done for: month {m} - day {d}') except: d = d-1 print(f'Reruning for: month {m} - day {d}') temp = np.append(temp, t_month) #plt.plot(temp) #plt.show() elif m == 2: if self.year % 4 == 0: while d < 29: d += 1 d_str = f'{d}' url = page + year_str + m_str + d_str r = render_page(url) try: soup = BS(r, "html.parser") container = soup.find('table', class_='mat-table cdk-table mat-sort ng-star-inserted') container = container.find_all('span', class_='test-true wu-unit wu-unit-temperature is-degree-visible ng-star-inserted') for i in range(len(container)): check = container[i].find('span', class_='wu-value wu-value-to') t_month.append((float(check.text) - 32) * 5 / 9) print(f'Done for: month {m} - day {d}') except: d = d-1 print(f'Reruning for: month {m} - day {d}') temp = np.append(temp, t_month) #plt.plot(temp) #plt.show() else: while d < 28: d += 1 d_str = f'{d}' url = page + year_str + m_str + d_str r = render_page(url) try: soup = BS(r, "html.parser") container = soup.find('table', class_='mat-table cdk-table mat-sort ng-star-inserted') container = container.find_all('span', class_='test-true wu-unit wu-unit-temperature is-degree-visible ng-star-inserted') for i in range(len(container)): check = container[i].find('span', class_='wu-value wu-value-to') t_month.append((float(check.text) - 32) * 5 / 9) print(f'Done for: month {m} - day {d}') except: d = d-1 print(f'Reruning for: month {m} - day {d}') temp = np.append(temp, t_month) #plt.plot(temp) #plt.show() np.savetxt('data/raw/' + self.city + year_str + '.csv', temp, delimiter=",") return temp
{"hexsha": "8065f94061a3ad7bdb27fd35494de9ff83f61655", "size": 6293, "ext": "py", "lang": "Python", "max_stars_repo_path": "data/WScrape.py", "max_stars_repo_name": "sanjin94/HTool-dev", "max_stars_repo_head_hexsha": "b4a13d2af82c12ae21337ac2313d8e732891a7cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data/WScrape.py", "max_issues_repo_name": "sanjin94/HTool-dev", "max_issues_repo_head_hexsha": "b4a13d2af82c12ae21337ac2313d8e732891a7cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/WScrape.py", "max_forks_repo_name": "sanjin94/HTool-dev", "max_forks_repo_head_hexsha": "b4a13d2af82c12ae21337ac2313d8e732891a7cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3719512195, "max_line_length": 149, "alphanum_fraction": 0.4222151597, "include": true, "reason": "import numpy", "num_tokens": 1329}
import os import argparse import scipy.misc import scipy import torch import torchvision import numpy as np import _pickle as cp import os.path as osp import torch.nn as nn import torch.optim as optim import torch.utils.data as data import torch.multiprocessing as mp import torch.backends.cudnn as cudnn import torch.nn.init as init from dataloader import* from utils.utils import * from utils.test_utils import * from model.model import GlyphAdaptor ImageFile.LOAD_TRUNCATED_IMAGES = True def worker_init_fn(worker_id): np.random.seed(np.random.get_state()[1][0] + worker_id) def main(): parser = argparse.ArgumentParser(description='GlyphAdaptor') ## data setting parser.add_argument('--root', default='./data',type=str) parser.add_argument('--load_height', default=32, type=int) parser.add_argument('--evalset', default='FontSynth', type=str,help='FontSynth,Omniglot') parser.add_argument("--val", dest="val", action = 'store_true') parser.add_argument("--visualize", dest="visualize", action = 'store_true') parser.add_argument("--cross", action = 'store_true',help="use training font as reference font") parser.add_argument('--lang', default="EN", type=str,help='language for testing in Google1k ') # model params parser.add_argument("--gpus", dest="gpu", default="0", type=str) parser.add_argument('--d_model', default=360, type=int, help='dimension of transformer') parser.add_argument('--num_workers', default=4, type=int) ## model setting parser.add_argument('--alphabet', default='/ abcdefghijklmnopqrstuvwxyz-', type=str) parser.add_argument('--pretrained_name', default='att4_omni', type=str) parser.add_argument("--TPS", dest="TPS", action = 'store_true') ## output setting parser.add_argument('--model_folder', default='', type=str) args = parser.parse_args() if osp.exists(args.model_folder) == False: raise Exception('directory for pretrained model is not found') result_dir = osp.join('./results') if osp.exists(result_dir) == False: os.mkdir(result_dir) args.nClasses = len(args.alphabet) os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu device = torch.device("cuda:0") torch.set_default_tensor_type('torch.FloatTensor') ###### setup fonts and alphabet converter ###### converter = strLabelConverter(args.alphabet) if args.evalset =='FontSynth': testfonts = select_test_font(args.evalset,args.root) elif args.evalset == 'Omniglot': testfonts = np.arange(0,20) else: raise Exception('test dataset is not recognized') args.alphabet_gt=select_alphabet(args.lang) converter_gt = strLabelConverter(args.alphabet_gt) if args.cross: ref_fonts = [] ref_list = open(osp.join(args.root,'gt','ref_10_random_fonts.txt'),'r').readlines() for line in ref_list: ref_fonts.append(line.split('/')[-1].replace('.ttf','').replace('\n','')) else: ref_fonts= [''] ###### setup model ###### net = GlyphAdaptor(args) net = torch.nn.DataParallel(net).to(device) if args.pretrained_name: resume_file = osp.join(args.model_folder,args.pretrained_name+'.pth') checkpoint = torch.load(resume_file) net.load_state_dict(checkpoint['model_state_dict'],strict=True) else: raise Exception('checkpoint not specified') total_acc,total_cer,total_wer = 0,0,0 ###### start testing ###### print('======== testing starts ========') cudnn.benchmark = True net.eval() for font in ref_fonts: args.ref_font = font if args.cross: print('======== Using %s as reference font========'%args.ref_font) total_correct,cers,wers,counter = 0,0,0,0 for test_font in testfonts: args.fontname = test_font if args.evalset == 'FontSynth': testset = TestLoader(args,aug= False) elif args.evalset == 'Omniglot': testset = Omniglot(args,args.root,alpha_ind = test_font ,background=False,size = 500) seq_sampler = data.SequentialSampler(testset) test_loader = data.DataLoader(testset, 1, num_workers=args.num_workers, sampler = seq_sampler,pin_memory=True, collate_fn=text_collate_eval,drop_last=False,worker_init_fn=worker_init_fn) cer_font,acc_font,wer_font,font_logs = [],[],[],[] for index, sample in enumerate(test_loader): imgs, char_seg_label, labels, length, alphabet= sample if args.evalset != 'Omniglot': args.alphabet = alphabet converter = strLabelConverter(args.alphabet) imgs = torch.transpose(imgs.unsqueeze(1),2,3) imgs = imgs.float().to(device) labels = labels.long().to(device) #[batch*len] preds,sims,final_conv = net(imgs,char_seg_label,length.long(),char_seg_label) #[batch,len,classes] correct,cer,wer,pred_str,gt_str = lex_free_acc(preds,labels,converter,'ctc',converter_gt = converter_gt) counter += 1 if args.visualize: print(pred_str[0],'---',gt_str[0]) cer_font.append(cer) wer_font.append(wer) acc_font.append(correct) total_correct += correct cers += cer wers += wer font_summary = 'font: %s CER: %.1f WER: %.1f' %(test_font,np.mean(cer_font)*100,np.mean(cer_font)*100) print(font_summary) font_logs.append(font_summary) total_acc += total_correct*1.0/counter total_cer += cers*1.0 / counter total_wer += wers*1.0 / counter if not args.cross: args.ref_font = 'itself' total_summary = 'ref_font=%s cer = %.1f wer = %.1f accuracy=%.1f total_samples= %d'%(args.ref_font,total_cer*100,total_wer*100,total_acc*100,counter) print('----------') print(total_summary) print('----------') if args.cross: result_file= open(osp.join(result_dir,args.ref_font +'_cross_' + args.evalset+'_acc.txt'),'w') else: result_file= open(osp.join(result_dir,args.ref_font +'_' + args.evalset+'_acc.txt'),'w') for font_summary in font_logs: result_file.write(font_summary) result_file.write('======================================================') result_file.write(total_summary) result_file.close() ref_total_cer = total_cer / len(ref_fonts) ref_total_wer = total_wer / len(ref_fonts) ref_total_acc = total_acc / len(ref_fonts) if args.cross: print('======================================================') print('Total results over all reference fonts') print('cer = %.1f wer = %.1f accuracy=%.1f total_ref_fonts=%d '%(ref_total_cer*100,ref_total_wer*100,ref_total_acc*100,len(ref_fonts))) print('======================================================') if __name__ == '__main__': main()
{"hexsha": "6eab01d2030e03b06d77bffa3f8dee4f9c2fd055", "size": 6431, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "Chuhanxx/FontAdaptor", "max_stars_repo_head_hexsha": "dd086ce705216a92babd03d9113d5392bd992a4c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 116, "max_stars_repo_stars_event_min_datetime": "2020-08-20T10:03:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T06:01:47.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "Chuhanxx/FontAdaptor", "max_issues_repo_head_hexsha": "dd086ce705216a92babd03d9113d5392bd992a4c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2020-10-14T09:27:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:47:11.000Z", "max_forks_repo_path": "test.py", "max_forks_repo_name": "Chuhanxx/FontAdaptor", "max_forks_repo_head_hexsha": "dd086ce705216a92babd03d9113d5392bd992a4c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2020-08-31T05:32:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-24T14:42:12.000Z", "avg_line_length": 31.9950248756, "max_line_length": 151, "alphanum_fraction": 0.6885398849, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1628}
import pandas as pd # 데이터프레임 import numpy as np # 행렬처리 from tkinter import filedialog from tkinter import messagebox import tkinter as tk import tkinter.ttk as ttk from winreg import * import os def central_box(root): # Gets the requested values of the height and widht. windowWidth = root.winfo_reqwidth() windowHeight = root.winfo_reqheight() # Gets both half the screen width/height and window width/height positionRight = int(root.winfo_screenwidth() / 2 - windowWidth / 2) positionDown = int(root.winfo_screenheight() / 2 - windowHeight / 2) # Positions the window in the center of the page. root.geometry("+{}+{}".format(positionRight, positionDown)) return root def make_quota(filename, levels, level, num, grouping, condition_name, filtering=None): # Load Data df = pd.read_excel("{}".format(filename), sheet_name=1) if filtering==None: df = df[df.구분=='광역시도'] elif filtering[0]=='세종특별자치시': df = df[df.구분==level] df = df.drop(levels, axis=1) # 필터링 if filtering!=None: if (filtering[1]=='구 지역') | (filtering[0]=='세종특별자치시'): df = df[df.광역시도==filtering[0]].sum() df = pd.DataFrame(df).T df['광역시도'] = filtering[0] df['시군구'] = filtering[1] else: df = df[(df.광역시도==filtering[0])&(df.시군구==filtering[1])] df = df.rename(columns={'광역시도': '전체'}) df = df.drop('구분', axis=1) # 그룹화 if grouping: # 그룹화 리스트 gp_name_lst = [('경기도','인천광역시'),('대전광역시','세종특별자치시','충청남도','충청북도'), ('광주광역시','전라남도','전라북도'),('대구광역시','경상북도'), ('부산광역시','울산광역시','경상남도'),('강원도','제주특별자치도')] # 그룹화할 이름을 반복문으로 처리 for gp_names in gp_name_lst: # 충청남도와 세종특별자치시만 추출후 합계 new = df[df.광역시도.isin(gp_names)].sum(axis=0) # 충남/세종 합계 새로운 행으로 추가 df = df.append(new, ignore_index=True) # 이름 변경 df.iloc[-1,0] = '광역시도' df.iloc[-1,1] = '/'.join(gp_names) # /으로 지역들을 묶어줌 # 충남, 세종 제외 df = df[~df.광역시도.isin(gp_names)] elif (level=='광역시도') and (filtering==None): # 그룹화할 이름 gp_names =['충청남도','세종특별자치시'] # 충청남도와 세종특별자치시만 추출후 합계 new = df[df.광역시도.isin(gp_names)].sum(axis=0) # 충남/세종 합계 새로운 행으로 추가 df = df.append(new, ignore_index=True) # 이름 변경 df.iloc[-1,0] = '광역시도' df.iloc[-1,1] = '충청남도/세종특별자치시' # 충남, 세종 제외 df = df[~df.광역시도.isin(gp_names)] # Define Features male_cols = ['남 19-29세', '남 30대', '남 40대', '남 50대', '남 60세 이상'] female_cols = ['여 19-29세', '여 30대', '여 40대', '여 50대', '여 60세이상'] total_cols = male_cols + female_cols # Total Population try: total_pop = df[total_cols].sum().sum() except: messagebox.showerror("메세지 박스","해당 파일의 기준 변수명이 다릅니다.") exit() # 2단계 반올림 전 before_df = df.copy() before_df[total_cols] = (df[total_cols] / total_pop) * num # 각 셀값을 전체 인구로 나누고 정해진 값으로 곱ㅎ before_df['남 합계'] = before_df[male_cols].sum(axis=1) before_df['여 합계'] = before_df[female_cols].sum(axis=1) before_df['총계'] = before_df[['남 합계' ,'여 합계']].sum(axis=1) # 2단계 남여 각각 합계의 반올림 before_sex_sum = before_df[['남 합계' ,'여 합계']].sum().round() # 3단계 반올림 후 after_df = df.copy() after_df[total_cols] = (df[total_cols] / total_pop) * num # 각 셀값을 전체 인구로 나누고 정해진 값으로 곱ㅎ after_df[total_cols] = after_df[total_cols].astype(float).round().astype(int) # 각 셀을 반올림 after_df['남 합계'] = after_df[male_cols].sum(axis=1) after_df['여 합계'] = after_df[female_cols].sum(axis=1) after_df['총계'] = after_df[['남 합계' ,'여 합계']].sum(axis=1) # 3단계 남여 각각 합계의 반올림 after_sex_sum = after_df[['남 합계' ,'여 합계']].sum() # 2,3단계 남여 합계의 차이 ''' 차이는 세 가지 경우로 나뉜다: 남여 각각 차이가 1. 0이거나 / 2. 0보다 크거나 / 3. 0보다 작거나 1. 0인 경우는 추가적인 일 없이 표 완성 2. 만약 차이가 0보다 큰 경우 : xx.5 보다 작고 xx.5에 가장 가까운 값인 반올림 값에 + 1 - Why? 반올림하여 내림이 된 값 중 가장 올림에 가까운 값에 1을 더하는 것이 이상적이기 때문 ex) 2.49999 -> round(2.49999) + 1 3. 만약 차이가 0보다 작은 경우 : xx.5 이상이고 xx.5에 가장 가까운 값인 반올림 값에 - 1 - Why? 반올림하여 올림이 된 값 중 가장 내림에 가까운 값에 1을 빼는 것이 이상적이기 때문 ex) 2.50001 -> round(2.50001) - 1 ''' sex_diff = before_sex_sum - after_sex_sum # 성별 합계 차이를 매꾸는 단계 sex_cols_lst = [male_cols, female_cols] sex_idx = ['남 합계' ,'여 합계'] for i in range(len(sex_idx)): if sex_diff.loc[sex_idx[i]] > 0: # 차이가 0보다 큰 경우 ''' 1. 2단계 반올림 전 값을 모두 내림 한 후 0.5를 더 한다. 2. 1번에서 한 값과 2단계 반올림 전 값을 뺀다. 3. 음수로 나오는 값은 모두 1로 변환. 1이 가장 큰 값이기 때문. ex) 13.45 -> 13 으로 내림 후 (13 + 0.5) - 13.45 = 0.05 ''' temp = (before_df[sex_cols_lst[i]].astype(int) + 0.5) - before_df[sex_cols_lst[i]] # 1,2번 temp = temp[temp >0].fillna(1) # 3번 v = 1 elif sex_diff.loc[sex_idx[i]] < 0: # 차이가 0보다 작은 경우 ''' 1. 2단계 반올림 전 값을 모두 내림 한 후 0.5를 더 한다. 2. 1번에서 한 값과 2단계 반올림 전 값을 뺀다. 3. 음수로 나오는 값은 모두 1로 변환. 1이 가장 큰 값이기 때문. ex) 13.54 -> 13 으로 내림 후 13.54 - (13 + 0.5) = 0.04 ''' temp = before_df[sex_cols_lst[i]] - (before_df[sex_cols_lst[i]].astype(int) + 0.5) # 1,2번 temp = temp[temp >0].fillna(1) # 3번에 해당 v = -1 else: # 차이가 0인 경우는 이후 과정 생략하고 그냥 통과 continue # 실제합계와의 차이: 절대값을 통해서 음수를 변환하고 정수로 타입을 변환 cnt = int(abs(sex_diff.loc[sex_idx[i]])) row_col = np.unravel_index(np.argsort(temp.values.ravel())[:cnt], temp.shape) rows = row_col[0] cols = row_col[1] # 각 (행,열) 좌표값에 v를 더함 for r in range(len(rows)): temp = after_df[sex_cols_lst[i]].copy() temp.iloc[rows[r] ,cols[r]] = temp.iloc[rows[r] ,cols[r]] + v after_df[sex_cols_lst[i]] = temp print() # 부족한 부분이 채워졌으면 합계 계산 after_df['남 합계'] = after_df[male_cols].sum(axis=1) after_df['여 합계'] = after_df[female_cols].sum(axis=1) after_df['총계'] = after_df[['남 합계' ,'여 합계']].sum(axis=1) final_sex_sum = after_df[['남 합계' ,'여 합계']].sum() # 다운로드 폴더 경로 찾기 with OpenKey(HKEY_CURRENT_USER, 'SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders') as key: Downloads = QueryValueEx(key, '{374DE290-123F-4565-9164-39C4925E467B}')[0] # 완료 메세지 if final_sex_sum.sum() != num: messagebox.showerror("메세지 상자","합계가 0이 아닙니다. 문제를 확인해주세요.") else: save_name = filename.split('/')[-1] file_path = '{}/{}{}'.format(Downloads, condition_name, save_name) if filtering==None: messagebox.showinfo("메세지 상자", "다운로드 폴더에 저장되었습니다.") after_df.to_excel(file_path, index=False, encoding='cp949') else: if os.path.isfile(file_path): saved_df = pd.read_excel(file_path) after_df = pd.concat([saved_df,after_df], axis=0) after_df.to_excel(file_path, index=False, encoding='cp949') if __name__=='__main__': # 변경할 파일 이름을 입력받기 위한 코드 upload = tk.Tk() upload = central_box(upload) # 파일 선택하기 upload.filename = filedialog.askopenfilename(initialdir="/", title="Select file", filetypes=(("excel files", "*.xlsx"), ("all files", "*.*"))) filename = upload.filename # 업로드 종료 upload.destroy() starting=True # 계속 실행 반복 while starting: # 버튼 만들기 def start_click(): # 전역 변수 설정하기 global starting starting = True start_check.destroy() def quit_click(): # 전역 변수 설정하기 global starting starting = False start_check.destroy() # start_check 생성 start_check = tk.Tk() start_check = central_box(start_check) # 시작 여부 btn1 = ttk.Button(start_check, text='시작', command=start_click) btn2 = ttk.Button(start_check, text='종료', command=quit_click) btn1.pack() btn2.pack() start_check.mainloop() if not(starting): exit() def simple(): # 전역 변수 설정하기 global work work = True work_check.destroy() def multiplt(): # 전역 변수 설정하기 global work work = False work_check.destroy() # work_check 생성 work_check = tk.Tk() work_check = central_box(work_check) # 작업 종류 btn3 = ttk.Button(work_check, text='단순작업', command=simple) btn4 = ttk.Button(work_check, text='쿼터표 불러오기', command=multiplt) btn3.pack() btn4.pack() work_check.mainloop() if work: # 구분 / 크기를 입력받기 위한 코드 root = tk.Tk() root = central_box(root) # 구분과 크기 입력 창 만들기 ttk.Label(root, text="구분과 크기를 정하세요").grid(column=0, row=0) # 구분 리스트 levels = ['광역시도', '시군구', '읍면동'] level_lst_Chosen = ttk.Combobox(root, width=12, values=levels) level_lst_Chosen.grid(column=0, row=1) # 크기 정하기 var = tk.IntVar().set(1000) cnt = ttk.Entry(root, width=10, text=var) cnt.grid(column=1, row=1) # 버튼 만들기 def level_num_click(): # 전역 변수 설정하기 global level global num level = level_lst_Chosen.get() if not (isinstance(int(cnt.get()), int)): messagebox.showerror("메세지박스", "크기는 숫자만 입력하세요.") exit() num = int(cnt.get()) root.destroy() action = ttk.Button(root, text="시작", command=level_num_click) action.grid(column=2, row=1) # 윈도우 창 실행하기 root.mainloop() # 그룹화 확인 버튼 if level == '광역시도': # root 생성 gp_check = tk.Tk() gp_check = central_box(gp_check) # 그룹화 확인 문구 ttk.Label(gp_check, text="그룹화 여부").grid(column=0, row=0) # 그룹화 여부 radVar = tk.BooleanVar() r1 = ttk.Radiobutton(gp_check, text="Yes", variable=radVar, value=True) r1.grid(column=0, row=1) r2 = ttk.Radiobutton(gp_check, text="No", variable=radVar, value=False) r2.grid(column=1, row=1) # 버튼 만들기 def gp_click(): # 전역 변수 설정하기 global grouping grouping = radVar.get() gp_check.destroy() # 버튼 생성 action = ttk.Button(gp_check, text="시작", command=gp_click) action.grid(column=2, row=1) gp_check.mainloop() else: grouping = False # 저장할 파일명 if grouping: group_name = '그룹' else: group_name = '' condition_name = '[{}_{}_{}]'.format(level, num, grouping) # levels에서 선택된 level은 제외 levels.remove(level) # 실행 make_quota(filename, levels, level, num, grouping, condition_name) else: # 필터링할 파일 이름을 입력받기 위한 코드 filtering_upload = tk.Tk() filtering_upload = central_box(filtering_upload) # 파일 선택하기 filtering_upload.filename = filedialog.askopenfilename(initialdir="/", title="Select file", filetypes=(("excel files", "*.xlsx"), ("all files", "*.*"))) filtering_filename = filtering_upload.filename # 업로드 종료 filtering_upload.destroy() filtering_df = pd.read_excel("{}".format(filtering_filename)) for col in ['전체','시군구','쿼터 합계']: if col not in filtering_df: print('컬럼명이 잘못되었습니다.') print("['전체','시군구','쿼터합계']로 입력해주세요") exit() # 초기값 levels = ['읍면동'] level = '시군구' grouping = False # 저장할 파일명 condition_name = '[{}개_쿼터표]'.format(filtering_df.shape[0]) # 진행정도 process = tk.Tk() process = central_box(process) progress = ttk.Progressbar(process, orient='horizontal', length=286, mode='determinate') progress.pack() progress['maximum'] = filtering_df.shape[0] progress['value'] = 0 # 실행 for i in range(filtering_df.shape[0]): gwang = filtering_df.iloc[i,0] sigoongoo = filtering_df.iloc[i,1] num = filtering_df.iloc[i,2] filtering = [gwang, sigoongoo] make_quota(filename, levels, level, num, grouping, condition_name, filtering) progress['value'] += 1 progress.update() progress.mainloop() messagebox.showinfo("메세지 상자", "다운로드 폴더에 저장되었습니다.")
{"hexsha": "fd045bb4d81fe19b5ce54322ac92baced1b1845b", "size": 13181, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "DataNetworkAnalysis/QuotaSampling", "max_stars_repo_head_hexsha": "7661ac1040d3af39530f067ab2c27e54f79f49db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "DataNetworkAnalysis/QuotaSampling", "max_issues_repo_head_hexsha": "7661ac1040d3af39530f067ab2c27e54f79f49db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "DataNetworkAnalysis/QuotaSampling", "max_forks_repo_head_hexsha": "7661ac1040d3af39530f067ab2c27e54f79f49db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-15T07:58:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-15T07:58:35.000Z", "avg_line_length": 33.7109974425, "max_line_length": 117, "alphanum_fraction": 0.5229497003, "include": true, "reason": "import numpy", "num_tokens": 4633}
## ObjectiveFunc.py -- Perform Gradient Estimation and Evaluation for a Given Function ## ## Copyright (C) 2018, IBM Corp ## PaiShun Ting <paishun@umich.edu> ## Pin-Yu Chen <Pin-Yu.Chen@ibm.com> ## Sijia Liu <sijia.liu@ibm.com> ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. import numpy as np import Utils as util np.random.seed(2018) class OBJFUNC: def __init__(self, MGR, model, origImgs, origLabels): self.const = MGR.parSet['const'] self.model = model self.origImgs = origImgs self.origImgsAT = np.arctanh(origImgs*1.9999999) self.origLabels = origLabels self.nFunc = origImgs.shape[0] self.imageSize = np.size(origImgs)/self.nFunc self.query_count = 0 self.Loss_L2 = 1e10 self.Loss_Attack = 1e10 self.Loss_Overall = self.Loss_L2 + self.const*self.Loss_Attack if(MGR.parSet['rv_dist'] == 'UnitBall'): self.RV_Gen = self.Draw_UnitBall elif(MGR.parSet['rv_dist'] == 'UnitSphere'): self.RV_Gen = self.Draw_UnitSphere else: print('Please specify a valid distribution for random perturbation') def Draw_UnitBall(self): sample = np.random.uniform(-1.0, 1.0, size=self.origImgs[0].shape) return sample/np.linalg.norm(sample.flatten()) def Draw_UnitSphere(self): sample = np.random.normal(0.0, 1.0, size=self.origImgs[0].shape) return sample/np.linalg.norm(sample.flatten()) def evaluate(self, delImgAT, randBatchIdx, addQueryCount = True): if( randBatchIdx.size == 0 ): randBatchIdx = np.arange(0, self.nFunc) batchSize = randBatchIdx.size origLabels_Batched = self.origLabels[randBatchIdx] delImgsAT = np.repeat(np.expand_dims(delImgAT, axis=0), self.nFunc, axis=0) advImgs = np.tanh(self.origImgsAT + delImgsAT)/2.0 advImgs_Batched = advImgs[randBatchIdx] if(addQueryCount): self.query_count += batchSize Score_AdvImgs_Batched = self.model.model.predict(advImgs_Batched) Score_TargetLab = np.maximum(1e-20, np.sum(origLabels_Batched*Score_AdvImgs_Batched, 1)) Score_NonTargetLab = np.maximum(1e-20, np.amax((1-origLabels_Batched)*Score_AdvImgs_Batched - (origLabels_Batched*10000),1)) self.Loss_Attack = np.amax(np.maximum(0.0, -np.log(Score_NonTargetLab) + np.log(Score_TargetLab) ) ) self.Loss_L2 = self.imageSize * np.mean(np.square(advImgs-self.origImgs)/2.0) self.Loss_Overall = self.Loss_L2 + self.const*self.Loss_Attack return self.Loss_Overall def gradient_estimation(self, delImgAT, mu, q, randBatchIdx = np.array([])): f = self.evaluate(delImgAT, randBatchIdx) grad_avg = np.zeros(delImgAT.shape) for q_idx in range(q): u_rand = self.RV_Gen() f_perturb = self.evaluate(delImgAT + mu*u_rand, randBatchIdx) grad_avg += (f_perturb-f)*u_rand return (delImgAT.size/mu)*(grad_avg/q) def print_current_loss(self): print('Loss_Overall: ', self.Loss_Overall, ' Loss_L2: ', self.Loss_L2, ' Loss_Attack: ', self.Loss_Attack)
{"hexsha": "90094df65c7b7674d55ab7e82b3e8d34930a6698", "size": 3818, "ext": "py", "lang": "Python", "max_stars_repo_path": "optimization_methods/ObjectiveFunc.py", "max_stars_repo_name": "zkbfdzp/IBM7", "max_stars_repo_head_hexsha": "48fc2b74c666c1a2acb9303c825236da8b513f63", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2019-01-01T02:19:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T08:55:06.000Z", "max_issues_repo_path": "optimization_methods/ObjectiveFunc.py", "max_issues_repo_name": "zkbfdzp/IBM7", "max_issues_repo_head_hexsha": "48fc2b74c666c1a2acb9303c825236da8b513f63", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "optimization_methods/ObjectiveFunc.py", "max_forks_repo_name": "zkbfdzp/IBM7", "max_forks_repo_head_hexsha": "48fc2b74c666c1a2acb9303c825236da8b513f63", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-05-07T08:36:52.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-13T02:43:35.000Z", "avg_line_length": 41.956043956, "max_line_length": 133, "alphanum_fraction": 0.6471974856, "include": true, "reason": "import numpy", "num_tokens": 1026}
from PySide2 import QtCore from PySide2.QtWebEngineWidgets import QWebEngineView from PySide2.QtWidgets import (QMainWindow, QWidget, QApplication, QAction, QPushButton, QLineEdit, QTextEdit, QVBoxLayout, QGridLayout, QSplitter, QLabel, QFileDialog, QMessageBox, QComboBox, QScrollArea, QStyle, QGroupBox, QCheckBox, QTabWidget) from nwb_conversion_tools.gui.classes.console_widget import ConsoleWidget from nwb_conversion_tools.gui.classes.forms_general import GroupNwbfile, GroupSubject from nwb_conversion_tools.gui.classes.forms_ophys import GroupOphys from nwb_conversion_tools.gui.classes.forms_ecephys import GroupEcephys from nwb_conversion_tools.gui.classes.forms_behavior import GroupBehavior from nwb_conversion_tools.gui.classes.forms_ogen import GroupOgen from nwb_conversion_tools.gui.utils.name_references import name_to_gui_class import numpy as np import nbformat as nbf from pathlib import Path import tempfile import socket import psutil import shutil import datetime import importlib import yaml import sys import os class Application(QMainWindow): def __init__(self, metafile=None, conversion_module='', source_paths={}, kwargs_fields={}, extension_modules={}, extension_forms={}, show_add_del=False): super().__init__() # Dictionary storing source files paths self.source_paths = source_paths # Path to conversion module .py file self.conversion_module_path = conversion_module # Dictionary storing custom boolean options (to form checkboxes) self.kwargs_fields = kwargs_fields # Boolean control to either show/hide the option for add/del Groups self.show_add_del = show_add_del # Extension modules self.extension_modules = extension_modules # Updates name_to_gui_class with extension classes self.name_to_gui_class = name_to_gui_class self.name_to_gui_class.update(extension_forms) # Temporary folder path self.temp_dir = tempfile.mkdtemp() self.resize(1200, 900) self.setWindowTitle('NWB:N conversion tools') # Initialize GUI elements self.init_gui() self.init_meta_tab() self.load_meta_file(filename=metafile) self.init_nwb_explorer() self.show() def init_gui(self): """Initiates GUI elements.""" mainMenu = self.menuBar() fileMenu = mainMenu.addMenu('File') action_choose_conversion = QAction('Choose conversion module', self) fileMenu.addAction(action_choose_conversion) action_choose_conversion.triggered.connect(self.load_conversion_module) helpMenu = mainMenu.addMenu('Help') action_about = QAction('About', self) helpMenu.addAction(action_about) action_about.triggered.connect(self.about) self.tabs = QTabWidget() self.setCentralWidget(self.tabs) def init_meta_tab(self): # Center panels ------------------------------------------------------- self.groups_list = [] # Left-side panel: forms self.btn_load_meta = QPushButton('Load metafile') self.btn_load_meta.setIcon(self.style().standardIcon(QStyle.SP_ArrowDown)) self.btn_load_meta.clicked.connect(lambda: self.load_meta_file(filename=None)) self.btn_load_meta.setToolTip("The YAML file with metadata for this conversion.\n" "You can customize the metadata in the forms below.") self.btn_save_meta = QPushButton('Save metafile') self.btn_save_meta.setIcon(self.style().standardIcon(QStyle.SP_DriveFDIcon)) self.btn_save_meta.clicked.connect(self.save_meta_file) self.btn_run_conversion = QPushButton('Run conversion') self.btn_run_conversion.setIcon(self.style().standardIcon(QStyle.SP_MediaPlay)) self.btn_run_conversion.clicked.connect(self.run_conversion) self.btn_form_editor = QPushButton('Form -> Editor') self.btn_form_editor.clicked.connect(self.form_to_editor) self.lbl_nwb_file = QLabel('Output nwb file:') self.lbl_nwb_file.setToolTip("Path to the NWB file that will be created.") self.lin_nwb_file = QLineEdit('') self.btn_nwb_file = QPushButton() self.btn_nwb_file.setIcon(self.style().standardIcon(QStyle.SP_DialogOpenButton)) self.btn_nwb_file.clicked.connect(self.load_nwb_file) l_grid1 = QGridLayout() l_grid1.setColumnStretch(3, 1) l_grid1.addWidget(self.btn_load_meta, 0, 0, 1, 1) l_grid1.addWidget(self.btn_save_meta, 0, 1, 1, 1) l_grid1.addWidget(self.btn_run_conversion, 0, 2, 1, 1) l_grid1.addWidget(QLabel(), 0, 3, 1, 1) l_grid1.addWidget(self.btn_form_editor, 0, 4, 1, 2) l_grid1.addWidget(self.lbl_nwb_file, 1, 0, 1, 1) l_grid1.addWidget(self.lin_nwb_file, 1, 1, 1, 3) l_grid1.addWidget(self.btn_nwb_file, 1, 4, 1, 1) # Adds custom files/dir paths fields if len(self.source_paths.keys()) == 0: self.lbl_source_file = QLabel('source files:') self.lin_source_file = QLineEdit('') self.btn_source_file = QPushButton() self.btn_source_file.setIcon(self.style().standardIcon(QStyle.SP_DialogOpenButton)) self.btn_source_file.clicked.connect(self.load_source_files) l_grid1.addWidget(self.lbl_source_file, 3, 0, 1, 1) l_grid1.addWidget(self.lin_source_file, 3, 1, 1, 3) l_grid1.addWidget(self.btn_source_file, 3, 4, 1, 1) else: self.group_source_paths = QGroupBox('Source paths') self.grid_source = QGridLayout() self.grid_source.setColumnStretch(3, 1) ii = -1 for k, v in self.source_paths.items(): ii += 1 lbl_src = QLabel(k + ':') setattr(self, 'lbl_src_' + str(ii), lbl_src) lin_src = QLineEdit('') setattr(self, 'lin_src_' + str(ii), lin_src) btn_src = QPushButton() btn_src.setIcon(self.style().standardIcon(QStyle.SP_DialogOpenButton)) setattr(self, 'btn_src_' + str(ii), btn_src) if v['type'] == 'file': btn_src.clicked.connect((lambda x: lambda: self.load_source_files(x[0], x[1]))([ii, k])) else: btn_src.clicked.connect((lambda x: lambda: self.load_source_dir(x[0], x[1]))([ii, k])) self.grid_source.addWidget(lbl_src, ii, 0, 1, 1) self.grid_source.addWidget(lin_src, ii, 1, 1, 3) self.grid_source.addWidget(btn_src, ii, 4, 1, 1) self.group_source_paths.setLayout(self.grid_source) l_grid1.addWidget(self.group_source_paths, 3, 0, 1, 6) # Adds custom kwargs checkboxes if len(self.kwargs_fields.keys()) > 0: self.group_kwargs = QGroupBox('KWARGS') self.grid_kwargs = QGridLayout() self.grid_kwargs.setColumnStretch(4, 1) ii = -1 for k, v in self.kwargs_fields.items(): ii += 1 chk_kwargs = QCheckBox(k) chk_kwargs.setChecked(v) chk_kwargs.clicked.connect((lambda x: lambda: self.update_kwargs(x[0], x[1]))([ii, k])) setattr(self, 'chk_kwargs_' + str(ii), chk_kwargs) self.grid_kwargs.addWidget(chk_kwargs, ii // 4, ii % 4, 1, 1) self.group_kwargs.setLayout(self.grid_kwargs) l_grid1.addWidget(self.group_kwargs, 4, 0, 1, 6) self.l_vbox1 = QVBoxLayout() self.l_vbox1.addStretch() scroll_aux = QWidget() scroll_aux.setLayout(self.l_vbox1) l_scroll = QScrollArea() l_scroll.setWidget(scroll_aux) l_scroll.setWidgetResizable(True) self.l_vbox2 = QVBoxLayout() self.l_vbox2.addLayout(l_grid1) self.l_vbox2.addWidget(l_scroll) # Right-side panel # Metadata text editor_label = QLabel('Metafile preview:') r_grid1 = QGridLayout() r_grid1.setColumnStretch(1, 1) r_grid1.addWidget(editor_label, 0, 0, 1, 1) r_grid1.addWidget(QLabel(), 0, 1, 1, 1) self.editor = QTextEdit() r_vbox1 = QVBoxLayout() r_vbox1.addLayout(r_grid1) r_vbox1.addWidget(self.editor) # Logger log_label = QLabel('Log:') r_grid2 = QGridLayout() r_grid2.setColumnStretch(1, 1) r_grid2.addWidget(log_label, 0, 0, 1, 1) r_grid2.addWidget(QLabel(), 0, 1, 1, 1) self.logger = QTextEdit() self.logger.setReadOnly(True) r_vbox2 = QVBoxLayout() r_vbox2.addLayout(r_grid2) r_vbox2.addWidget(self.logger) r_vsplitter = QSplitter(QtCore.Qt.Vertical) ru_w = QWidget() ru_w.setLayout(r_vbox1) rb_w = QWidget() rb_w.setLayout(r_vbox2) r_vsplitter.addWidget(ru_w) r_vsplitter.addWidget(rb_w) # Metadata/conversion tab Layout self.left_w = QWidget() self.left_w.setLayout(self.l_vbox2) self.splitter = QSplitter(QtCore.Qt.Horizontal) self.splitter.addWidget(self.left_w) self.splitter.addWidget(r_vsplitter) self.metadata_layout = QVBoxLayout() self.metadata_layout.addWidget(self.splitter) self.tab_metadata = QWidget() self.tab_metadata.setLayout(self.metadata_layout) self.tabs.addTab(self.tab_metadata, 'Metadata/Conversion') # Background color p = self.palette() p.setColor(self.backgroundRole(), QtCore.Qt.white) self.setPalette(p) def init_nwb_explorer(self): """Initializes NWB file explorer tab""" # Layout Widgets self.btn_load_nwbexp = QPushButton('Load NWB') self.btn_load_nwbexp.setIcon(self.style().standardIcon(QStyle.SP_ArrowDown)) self.btn_load_nwbexp.clicked.connect(self.load_nwb_explorer) self.btn_load_nwbexp.setToolTip("Choose NWB file to explore!") self.btn_close_nwbexp = QPushButton('Close') self.btn_close_nwbexp.setIcon(self.style().standardIcon(QStyle.SP_DialogCloseButton)) self.btn_close_nwbexp.clicked.connect(self.close_nwb_explorer) self.btn_close_nwbexp.setToolTip("Close current file view.") self.html = QWebEngineView() self.grid_widgets = QGridLayout() self.grid_widgets.setColumnStretch(2, 1) self.grid_widgets.addWidget(self.btn_load_nwbexp, 0, 0, 1, 1) self.grid_widgets.addWidget(self.btn_close_nwbexp, 0, 1, 1, 1) self.grid_widgets.addWidget(QLabel(), 0, 2, 1, 1) self.vbox_widgets = QVBoxLayout() self.vbox_widgets.addLayout(self.grid_widgets) self.vbox_widgets.addWidget(self.html) # Layout Console console_label = QLabel('Ipython console:') self.explorer_console = ConsoleWidget(par=self) self.explorer_console.setToolTip("nwbfile --> NWB file data") self.grid_console = QGridLayout() self.grid_console.addWidget(console_label, 0, 0, 1, 1) self.grid_console.addWidget(self.explorer_console, 1, 0, 1, 1) hsplitter = QSplitter(QtCore.Qt.Horizontal) left_w = QWidget() left_w.setLayout(self.vbox_widgets) right_w = QWidget() right_w.setLayout(self.grid_console) hsplitter.addWidget(left_w) hsplitter.addWidget(right_w) # Add tab to GUI self.tabs.addTab(hsplitter, 'NWB explorer') def write_to_logger(self, txt): time = datetime.datetime.now().time().strftime("%H:%M:%S") full_txt = "[" + time + "] " + txt self.logger.append(full_txt) def run_conversion(self): """Runs conversion function.""" self.write_to_logger('Converting data to NWB... please wait.') self.toggle_enable_gui(enable=False) self.thread = ConversionFunctionThread(self) self.thread.finished.connect(lambda: self.finish_conversion(error=self.thread.error)) self.thread.start() def finish_conversion(self, error): if error: self.write_to_logger('ERROR:') self.write_to_logger(str(error)) else: self.write_to_logger('Data successfully converted to NWB.') self.toggle_enable_gui(enable=True) def toggle_enable_gui(self, enable): self.editor.setEnabled(enable) self.left_w.setEnabled(enable) def save_meta_file(self): """Saves metadata to .yml file.""" filename, _ = QFileDialog.getSaveFileName(self, 'Save file', '', "(*.yml)") if filename: data = {} for grp in self.groups_list: info, error = grp.read_fields() if error is None: data[grp.group_type] = info else: return with open(filename, 'w') as f: yaml.dump(data, f, default_flow_style=False) def read_metadata_from_form(self): """Loads metadata from form.""" metadata = {} for grp in self.groups_list: info, error = grp.read_fields() if error is None: metadata[grp.group_type] = info else: return return metadata def form_to_editor(self): """Loads data from form to editor.""" metadata = self.read_metadata_from_form() txt = yaml.dump(metadata, default_flow_style=False) self.editor.setText(txt) def update_kwargs(self, ind, key): """Updates the boolean values for keyword arguments.""" chk_kw = getattr(self, 'chk_kwargs_' + str(ind)) self.kwargs_fields[key] = chk_kw.isChecked() def load_source_files(self, ind, key): """Browser to source file location.""" filenames, ftype = QFileDialog.getOpenFileNames( parent=self, caption='Open file', directory='', filter="(*)" ) if len(filenames): all_names = '' for fname in filenames: all_names += fname + ', ' lin_src = getattr(self, 'lin_src_' + str(ind)) lin_src.setText(all_names[:-2]) self.source_paths[key]['path'] = all_names[:-2] def load_source_dir(self, ind, key): """Browser to source directory location.""" dirname = QFileDialog.getExistingDirectory( parent=self, caption='Source directory', directory='' ) if len(dirname): lin_src = getattr(self, 'lin_src_' + str(ind)) lin_src.setText(dirname) self.source_paths[key]['path'] = dirname def load_meta_file(self, filename=None): """ Opens (or browsers to) a .yml file containing metadata for NWB. Then: 1. loads the internal variable self.metadata with the content 2. writes content to editor 3. updates forms """ if filename is None: filename, ftype = QFileDialog.getOpenFileName( parent=self, caption='Open file', directory='', filter="(*.yml)" ) if ftype != '(*.yml)': return with open(filename) as f: self.metadata = yaml.safe_load(f) txt = yaml.dump(self.metadata, default_flow_style=False) self.editor.setText(txt) self.update_forms() def load_conversion_module(self): """Browser to conversion script file location.""" filename, ftype = QFileDialog.getOpenFileName( parent=self, caption='Open file', directory='', filter="(*py)" ) if filename != '': self.conversion_module_path = filename def load_nwb_file(self): """Browser to nwb file location.""" filename, ftype = QFileDialog.getSaveFileName( parent=self, caption='Save file', directory='', filter="(*nwb)" ) if filename is not None: self.lin_nwb_file.setText(filename) def load_nwb_explorer(self): """Browser to nwb file location.""" filename, ftype = QFileDialog.getOpenFileName( parent=self, caption='Load file', directory='', filter="(*nwb)" ) if filename != '': # Opens file on Ipython console self.run_console(fname=filename) # Opens file on NWBWidgets self.run_voila(fname=filename) def close_nwb_explorer(self): """Close current NWB file view on explorer""" if hasattr(self, 'voilathread'): # Stop Voila thread self.voilathread.stop() # Closes nwb file on console self.explorer_console._execute('io.close()', True) self.explorer_console.clear() def run_console(self, fname): """Loads NWB file on Ipython console""" # Imports extension modules imports_text = "" for k, v in self.extension_modules.items(): imports_text += "\nfrom " + k + " import " + ", ".join(v) code = """ import pynwb import os """ + imports_text + """ fpath = os.path.join(r'""" + str(fname) + """') io = pynwb.NWBHDF5IO(fpath, 'r', load_namespaces=True) nwbfile = io.read() """ self.explorer_console._execute(code, True) self.explorer_console.clear() self.explorer_console.print_text('nwbfile --> Loaded NWB file\n') def run_voila(self, fname): """Set up notebook and run it with a dedicated Voila thread.""" # Stop any current Voila thread self.close_nwb_explorer() # Write Figure + ipywidgets to a .ipynb file nb = nbf.v4.new_notebook() # Imports extension modules imports_text = "" for k, v in self.extension_modules.items(): imports_text += "\nfrom " + k + " import " + ", ".join(v) code = """ from nwbwidgets import nwb2widget import pynwb import os """ + imports_text + """ fpath = os.path.join(r'""" + str(fname) + """') io = pynwb.NWBHDF5IO(fpath, 'r', load_namespaces=True) nwb = io.read() nwb2widget(nwb) """ nb['cells'] = [nbf.v4.new_code_cell(code)] nbpath = os.path.join(self.temp_dir, Path(fname).stem + '.ipynb') nbf.write(nb, nbpath) # Run instance of Voila with the just saved .ipynb file port = get_free_port() self.voilathread = voilaThread(parent=self, port=port, nbpath=nbpath) self.voilathread.start() # Load Voila instance on GUI self.update_html(url='http://localhost:' + str(port)) # self.parent.write_to_logger(txt=self.name + " ready!") def update_html(self, url): """Loads temporary HTML file and render it.""" self.html.load(QtCore.QUrl(url)) self.html.show() def clean_groups(self): """Removes all groups widgets.""" for grp in self.groups_list: nWidgetsVbox = self.l_vbox1.count() for i in range(nWidgetsVbox): if self.l_vbox1.itemAt(i) is not None: if grp == self.l_vbox1.itemAt(i).widget(): self.l_vbox1.itemAt(i).widget().setParent(None) # deletes widget self.groups_list = [] # deletes all list items def update_forms(self): """Updates forms fields with values in metadata.""" self.clean_groups() for grp in self.metadata: if grp == 'NWBFile': item = GroupNwbfile(parent=self, metadata=self.metadata['NWBFile']) item.write_fields(data=self.metadata['NWBFile']) self.groups_list.append(item) self.l_vbox1.addWidget(item) if grp == 'Subject': item = GroupSubject(parent=self) item.write_fields(data=self.metadata['Subject']) self.groups_list.append(item) self.l_vbox1.addWidget(item) if grp == 'Ophys': item = GroupOphys(self) for subgroup in self.metadata[grp]: # if many items of same class, in list if isinstance(self.metadata[grp][subgroup], list): for subsub in self.metadata[grp][subgroup]: item.add_group( group=self.name_to_gui_class[subgroup](parent=item), metadata=subsub ) else: # if it's just one item of this class item.add_group( group=self.name_to_gui_class[subgroup](parent=item), metadata=self.metadata[grp][subgroup] ) self.groups_list.append(item) self.l_vbox1.addWidget(item) if grp == 'Ecephys': item = GroupEcephys(self) for subgroup in self.metadata[grp]: # if many items of same class, in list if isinstance(self.metadata[grp][subgroup], list): for subsub in self.metadata[grp][subgroup]: item.add_group( group=self.name_to_gui_class[subgroup](parent=item), metadata=subsub ) else: # if it's just one item of this class item.add_group( group=self.name_to_gui_class[subgroup](parent=item), metadata=self.metadata[grp][subgroup] ) self.groups_list.append(item) self.l_vbox1.addWidget(item) if grp == 'Behavior': item = GroupBehavior(self) for subgroup in self.metadata[grp]: # if many items of same class, in list if isinstance(self.metadata[grp][subgroup], list): for subsub in self.metadata[grp][subgroup]: item.add_group( group=self.name_to_gui_class[subgroup](parent=item), metadata=subsub ) else: # if it's just one item of this class item.add_group( group=self.name_to_gui_class[subgroup](parent=item), metadata=self.metadata[grp][subgroup] ) self.groups_list.append(item) self.l_vbox1.addWidget(item) if grp == 'Ogen': item = GroupOgen(self) for subgroup in self.metadata[grp]: # if many items of same class, in list if isinstance(self.metadata[grp][subgroup], list): for subsub in self.metadata[grp][subgroup]: item.add_group( group=self.name_to_gui_class[subgroup](parent=item), metadata=subsub ) else: # if it's just one item of this class item.add_group( group=self.name_to_gui_class[subgroup](parent=item), metadata=self.metadata[grp][subgroup] ) self.groups_list.append(item) self.l_vbox1.addWidget(item) nItems = self.l_vbox1.count() self.l_vbox1.addStretch(nItems) def about(self): """About dialog.""" msg = QMessageBox() msg.setWindowTitle("About NWB conversion") msg.setIcon(QMessageBox.Information) msg.setText("Version: 0.2.0 \n" "Shared tools for converting data from various formats to NWB:N 2.0.\n ") msg.setInformativeText("<a href='https://github.com/catalystneuro/nwb-conversion-tools'>NWB conversion tools Github page</a>") msg.setStandardButtons(QMessageBox.Ok) msg.exec_() def closeEvent(self, event): """Before exiting, executes these actions.""" # Stop any current Voila thread self.close_nwb_explorer() # Remove any remaining temporary directory/files shutil.rmtree(self.temp_dir, ignore_errors=False, onerror=None) event.accept() def get_free_port(): not_free = True while not_free: port = np.random.randint(7000, 7999) with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock: res = sock.connect_ex(('localhost', port)) if res != 0: not_free = False return port def is_listening_to_port(process, port): is_listening = False # iterate over processe's children for child in process.children(recursive=True): # iterate over child connections for con in child.connections(): if con.status == 'LISTEN': if isinstance(con.laddr.port, int): is_listening = con.laddr.port == port elif isinstance(con.laddr.port, list): is_listening = port in con.laddr.port return is_listening return is_listening class voilaThread(QtCore.QThread): def __init__(self, parent, port, nbpath): super().__init__() self.parent = parent self.port = port self.nbpath = nbpath def run(self): os.system("voila " + self.nbpath + " --no-browser --port " + str(self.port)) def stop(self): pid = os.getpid() process = psutil.Process(pid) proc_list = [] for child in process.children(recursive=True): is_listening = is_listening_to_port(child, self.port) if is_listening: proc_list.append(child) for proc in proc_list: for child in process.children(recursive=True): child.kill() # Runs conversion function, useful to wait for thread class ConversionFunctionThread(QtCore.QThread): def __init__(self, parent): super().__init__() self.parent = parent self.error = None def run(self): #try: mod_file = self.parent.conversion_module_path spec = importlib.util.spec_from_file_location(os.path.basename(mod_file).strip('.py'), mod_file) conv_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(conv_module) metadata = self.parent.read_metadata_from_form() conv_module.conversion_function(source_paths=self.parent.source_paths, f_nwb=self.parent.lin_nwb_file.text(), metadata=metadata, **self.parent.kwargs_fields) # self.error = None #except Exception as error: # self.error = error class CustomComboBox(QComboBox): def __init__(self): """Class created to ignore mouse wheel events on combobox.""" super().__init__() def wheelEvent(self, event): event.ignore() if __name__ == '__main__': app = QApplication(sys.argv) # instantiate a QtGui (holder for the app) ex = Application() sys.exit(app.exec_()) # If it is imported as a module def nwb_conversion_gui(metafile=None, conversion_module='', source_paths={}, kwargs_fields={}, extension_modules={}, extension_forms={}, show_add_del=False): """Sets up QT application.""" app = QtCore.QCoreApplication.instance() if app is None: app = QApplication(sys.argv) # instantiate a QtGui (holder for the app) Application( metafile=metafile, conversion_module=conversion_module, source_paths=source_paths, kwargs_fields=kwargs_fields, extension_modules=extension_modules, extension_forms=extension_forms, show_add_del=show_add_del ) sys.exit(app.exec_())
{"hexsha": "5d6ad294d1cd73f19d78cd3166463a744d70e1c4", "size": 28658, "ext": "py", "lang": "Python", "max_stars_repo_path": "singer_lab_to_nwb/gui/nwb_conversion_gui.py", "max_stars_repo_name": "stephprince/singer-lab-to-nwb-hackathon", "max_stars_repo_head_hexsha": "d357c1ebf948d8b49d7bc132dc892610be155e46", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "singer_lab_to_nwb/gui/nwb_conversion_gui.py", "max_issues_repo_name": "stephprince/singer-lab-to-nwb-hackathon", "max_issues_repo_head_hexsha": "d357c1ebf948d8b49d7bc132dc892610be155e46", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "singer_lab_to_nwb/gui/nwb_conversion_gui.py", "max_forks_repo_name": "stephprince/singer-lab-to-nwb-hackathon", "max_forks_repo_head_hexsha": "d357c1ebf948d8b49d7bc132dc892610be155e46", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.9985693848, "max_line_length": 134, "alphanum_fraction": 0.5885267639, "include": true, "reason": "import numpy", "num_tokens": 6104}
#include "StaticSound.h" #include <cassert> #include <boost/scoped_ptr.hpp> #include "../../include/gameaudio/IFileReader.h" #include "../../include/gameaudio/Error.h" #include "WavDecoder.h" #include "OggVorbisDecoder.h" using namespace gameaudio; StaticSound::StaticSound(boost::shared_ptr<IFileReader> reader, encoding_type encoding) { assert(reader != 0); alGenBuffers(1, &_alBuffer); boost::scoped_ptr<IDecoder> decoder(createDecoder(reader, encoding)); ALenum format = decoder->getFormat(); if (format == -1) throw Error("Format which OpenAL does not support"); uint64 size = decoder->getSizeByBytes(); if (size > 0xFFFFFFFF) throw Error("Too large file for non-streaming sound"); char* data = new char[size]; unsigned read_size = decoder->read(data, 0, size); alBufferData(_alBuffer, format, data, read_size, decoder->getFrequency()); delete[] data; alSourcei(_alSource, AL_BUFFER, _alBuffer); _frequency = decoder->getFrequency(); _size = read_size / (decoder->getBitNum() / 8 * decoder->getChannelsNum()); } uint64 StaticSound::getLengthBySamples() const { return _size; } float StaticSound::getLengthBySecs() const { return (float)((double)_size / (double)_frequency); } void StaticSound::update() { if (getPlayPositionBySamples() >= _size) { setPlayPositionBySamples(0); } } float StaticSound::getPlayPositionBySecs() const { return getPlayPositionBySamples() / (float)_frequency; } void StaticSound::setPlayPositionBySecs(float v) { setPlayPositionBySamples(v * _frequency); } unsigned StaticSound::getFrequency() const { return _frequency; }
{"hexsha": "e64b26c7889096ac85b752810d0f906717beb217", "size": 1596, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/gameaudio/StaticSound.cpp", "max_stars_repo_name": "YosukeM/GameAudio", "max_stars_repo_head_hexsha": "49d4f5b56058b6f99d3f33438139adc07f56547a", "max_stars_repo_licenses": ["Unlicense", "MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2015-06-21T09:49:21.000Z", "max_stars_repo_stars_event_max_datetime": "2015-11-12T01:13:41.000Z", "max_issues_repo_path": "src/gameaudio/StaticSound.cpp", "max_issues_repo_name": "YosukeM/GameAudio", "max_issues_repo_head_hexsha": "49d4f5b56058b6f99d3f33438139adc07f56547a", "max_issues_repo_licenses": ["Unlicense", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/gameaudio/StaticSound.cpp", "max_forks_repo_name": "YosukeM/GameAudio", "max_forks_repo_head_hexsha": "49d4f5b56058b6f99d3f33438139adc07f56547a", "max_forks_repo_licenses": ["Unlicense", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.1639344262, "max_line_length": 89, "alphanum_fraction": 0.7399749373, "num_tokens": 399}
// // Created by Quentin Liardeaux on 12/19/19. // #ifndef R_TYPE_CLIENT_HPP #define R_TYPE_CLIENT_HPP #include <string> #include <optional> #include <queue> #include "protocol.hpp" #include "Message.hpp" #include "Protocol/Packet.hpp" #include "ClientHandler.hpp" #include "GameRoom.hpp" #include "Lobby.hpp" #include "IdProvider.hpp" #include "Position.hpp" #include <boost/asio.hpp> #include <boost/bind.hpp> #include <boost/thread.hpp> #include <boost/enable_shared_from_this.hpp> typedef boost::asio::ip::tcp BoostTcp; typedef boost::asio::ip::udp BoostUdp; class ClientHandler; class GameRoom; class Lobby; class Client : public boost::enable_shared_from_this<Client> { public: static boost::shared_ptr<Client> create(boost::asio::io_context &context); void start(); void stop(); void update(); BoostTcp::socket &getSocket() { return m_tcpSocket; } size_t getId() const { return m_id; } const std::string& getNickname() const { return m_nickname; } void setHandler(boost::shared_ptr<ClientHandler> handler) { m_handler = handler; } void setUdpSocket(uint16_t port) { m_udpPort = port; } const Position& getPosition() const { return m_position; } const Position& getVelocity() const { return m_velocity; } void sendPlayerJoinGame(size_t playerId, std::string nickname); void sendPlayerQuitGame(size_t playerId); void sendPlayerState(); void sendFriendState(size_t id, const Position& position, const Position& velocity); void sendEntityState(size_t id, const Position& position, const Position& velocity, EntityType type); void triggerCollision(size_t firstEntity, EntityType firstType, size_t secondEntity, EntityType secondType, const Position& pos); void startGame(); ~Client(); private: explicit Client(boost::asio::io_context &context); void waitHeader(const boost::system::error_code &ec); void receivePacket(const boost::system::error_code &ec); void receiveBody(const boost::system::error_code &ec); std::unique_ptr<Message> handleRequest(uint8_t *data, uint16_t packetId); void dispatchPackets(const Message* msg); void sendTcpMessage(const Message& msg); void sendUdpMessage(const Message& msg); void connectClient(const ClientConnect *msg); void createGame(const CreateGame *msg); void joinGame(const JoinGame *msg); void dispatchUdpPackets(); void receiveUdpPackets(); std::queue<std::unique_ptr<Message>> getServerResponses(); void handlePacket(const Message& msg); void movePlayer(const DirectionState& msg); void fireEntity(const FireEntity& msg); boost::asio::io_service m_ioService; BoostTcp::socket m_tcpSocket; packet_header_t m_packetHeader; uint8_t *m_packetData; uint8_t *m_writePacketData; boost::shared_ptr<ClientHandler> m_handler; size_t m_id; std::string m_nickname; uint16_t m_udpPort; std::string m_ipAddress; std::optional<BoostUdp::endpoint> m_remoteEndpoint; std::optional<BoostUdp::socket> m_udpSocket; bool m_isUdpRunning; Position m_velocity; Position m_position; boost::thread m_thread; std::queue<std::unique_ptr<Message>> m_udpResponses; boost::mutex m_responsesMutex; }; #endif //R_TYPE_CLIENT_HPP
{"hexsha": "dbbc19642ae8b3e1bb0338ea6805cf9e12898624", "size": 3290, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "Server/includes/Client.hpp", "max_stars_repo_name": "LiardeauxQ/r-type", "max_stars_repo_head_hexsha": "8a77164c276b2d5958cd3504a9ea34f1cf6823cf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-02-12T12:02:00.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-23T15:31:59.000Z", "max_issues_repo_path": "Server/includes/Client.hpp", "max_issues_repo_name": "LiardeauxQ/r-type", "max_issues_repo_head_hexsha": "8a77164c276b2d5958cd3504a9ea34f1cf6823cf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Server/includes/Client.hpp", "max_forks_repo_name": "LiardeauxQ/r-type", "max_forks_repo_head_hexsha": "8a77164c276b2d5958cd3504a9ea34f1cf6823cf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2020-02-12T12:02:03.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-23T15:32:55.000Z", "avg_line_length": 31.0377358491, "max_line_length": 105, "alphanum_fraction": 0.7303951368, "num_tokens": 785}
#include <boost/foreach.hpp> #include <framework/framework.h> #include <framework/graphics.h> #include <framework/bitmap.h> #include <framework/texture.h> #include <framework/exception.h> #include <game/world/terrain_helper.h> #include <game/editor/editor_terrain.h> namespace ed { static const int splatt_width = 128; static const int splatt_height = 128; editor_terrain::editor_terrain() { } editor_terrain::~editor_terrain() { } void editor_terrain::render(fw::sg::scenegraph &scenegraph) { int num_baked = 0; { std::unique_lock<std::mutex> lock(_patches_to_bake_mutex); BOOST_FOREACH(auto patch, _patches_to_bake) { bake_patch(std::get<0>(patch), std::get<1>(patch)); num_baked++; } _patches_to_bake.clear(); } terrain::render(scenegraph); } // set the height of the given vertex to the given value. void editor_terrain::set_vertex_height(int x, int z, float height) { while (x < 0) { x += _width; } while (x >= _width) { x -= _width; } while (z < 0) { z += _length; } while (z >= _length) { z -= _length; } _heights[z * _width + x] = height; auto this_patch = std::make_tuple(x / PATCH_SIZE, z / PATCH_SIZE); bool found = false; std::unique_lock<std::mutex> lock(_patches_to_bake_mutex); BOOST_FOREACH(auto patch, _patches_to_bake) { if (patch == this_patch) { found = true; break; } } if (!found) { _patches_to_bake.push_back(this_patch); } } void editor_terrain::initialize_splatt() { std::vector<uint32_t> buffer(splatt_width * splatt_height); for (int y = 0; y < splatt_height; y++) { for (int x = 0; x < splatt_width; x++) { buffer[(y * splatt_width) + x] = 0x000000ff; } } fw::bitmap bmp(splatt_width, splatt_height); bmp.set_pixels(buffer); ensure_patches(); for (int z = 0; z < get_patches_length(); z++) { for (int x = 0; x < get_patches_width(); x++) { set_splatt(x, z, bmp); } } } void editor_terrain::set_splatt(int patch_x, int patch_z, fw::bitmap const &bmp) { std::shared_ptr<fw::texture> splatt = get_patch_splatt(patch_x, patch_z); if (splatt == std::shared_ptr<fw::texture>()) { splatt = std::shared_ptr<fw::texture>(new fw::texture()); set_patch_splatt(patch_x, patch_z, splatt); } int index = get_patch_index(patch_x, patch_z); while (static_cast<int>(_splatt_bitmaps.size()) <= index) { // we'll add the new bitmap to all of them, but they'll eventually // be replaced with the correct one (well, hopefully) _splatt_bitmaps.push_back(bmp); } _splatt_bitmaps[index] = bmp; splatt->create(bmp); } fw::bitmap &editor_terrain::get_splatt(int patch_x, int patch_z) { int index = get_patch_index(patch_x, patch_z); return _splatt_bitmaps[index]; } int editor_terrain::get_num_layers() const { return _layers.size(); } std::shared_ptr<fw::bitmap> editor_terrain::get_layer(int number) { if (number < 0 || number >= static_cast<int>(_layer_bitmaps.size())) return std::shared_ptr<fw::bitmap>(); return _layer_bitmaps[number]; } void editor_terrain::set_layer(int number, std::shared_ptr<fw::bitmap> bitmap) { if (number < 0) return; std::shared_ptr<fw::texture> texture(new fw::texture()); texture->create(bitmap); if (number == static_cast<int>(_layers.size())) { // we need to add a new layer _layer_bitmaps.push_back(bitmap); _layers.push_back(texture); return; } else if (number > static_cast<int>(_layer_bitmaps.size())) { // TODO: not supported yet return; } _layer_bitmaps[number] = bitmap; _layers[number] = texture; } void editor_terrain::build_collision_data(std::vector<bool> &vertices) { if (static_cast<int>(vertices.size()) < (_width * _length)) { BOOST_THROW_EXCEPTION(fw::exception() << fw::message_error_info("vertices vector is too small!")); } game::build_collision_data(vertices, _heights, _width, _length); } }
{"hexsha": "a38db99e299bf72de2103d59763e33a3c9bc903a", "size": 4090, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/game/editor/editor_terrain.cc", "max_stars_repo_name": "codeka/ravaged-planets", "max_stars_repo_head_hexsha": "ab20247b3829414e71b58c9a6e926bddf41f1da5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/game/editor/editor_terrain.cc", "max_issues_repo_name": "codeka/ravaged-planets", "max_issues_repo_head_hexsha": "ab20247b3829414e71b58c9a6e926bddf41f1da5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/game/editor/editor_terrain.cc", "max_forks_repo_name": "codeka/ravaged-planets", "max_forks_repo_head_hexsha": "ab20247b3829414e71b58c9a6e926bddf41f1da5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2017-07-17T22:24:17.000Z", "max_forks_repo_forks_event_max_datetime": "2019-10-15T18:37:15.000Z", "avg_line_length": 26.5584415584, "max_line_length": 103, "alphanum_fraction": 0.6484107579, "num_tokens": 1123}
from tqdm import tqdm import os from glob import glob from multiprocessing.dummy import Pool as ThreadPool from PIL import Image as IM import scipy.misc import imageio as io import numpy as np IMAGE_PATH = "../../../dataset/celebA/*.jpg" SAVE_PATH = "../../../dataset/celebA_crop" NUM_THREAD = 16 def center_crop(x, crop_h, crop_w=None, resize_w=64): if crop_w is None: crop_w = crop_h h, w = x.shape[:2] j = int(round((h - crop_h)/2.)) i = int(round((w - crop_w)/2.)) return scipy.misc.imresize(x[j:j+crop_h, i:i+crop_w], [resize_w, resize_w]) def transform(image, npx=64, resize_w=64): cropped_image = center_crop(image, npx, resize_w=resize_w) return np.array(cropped_image) def imread(path, is_grayscale = False): if (is_grayscale): return io.imread(path).astype(np.float).flatten() else: return io.imread(path).astype(np.float) def get_image(image_path, image_size, resize_w=64, is_grayscale = False): return transform(imread(image_path, is_grayscale), image_size, resize_w) def data_process(data): filename = os.path.basename(data) filename = os.path.join(SAVE_PATH, filename) img = get_image(data, 108, resize_w=64, is_grayscale=0) im = IM.fromarray(img) im.save(filename) def parallel_process(fn,item): pool = ThreadPool(NUM_THREAD) for _ in tqdm(pool.imap_unordered(fn, item), total=len(item)): pass def main(): dataset = glob(IMAGE_PATH) parallel_process(fn=data_process,item=dataset) if __name__ == '__main__': main()
{"hexsha": "ec8cf902fbad7d0b1a10c0f47b3df97a1b0416c1", "size": 1648, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataset/data_process.py", "max_stars_repo_name": "dev6969/DCGAN_face", "max_stars_repo_head_hexsha": "ff373c8521b5023ba30fee2c99cf905977090e14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dataset/data_process.py", "max_issues_repo_name": "dev6969/DCGAN_face", "max_issues_repo_head_hexsha": "ff373c8521b5023ba30fee2c99cf905977090e14", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dataset/data_process.py", "max_forks_repo_name": "dev6969/DCGAN_face", "max_forks_repo_head_hexsha": "ff373c8521b5023ba30fee2c99cf905977090e14", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0163934426, "max_line_length": 77, "alphanum_fraction": 0.6523058252, "include": true, "reason": "import numpy,import scipy", "num_tokens": 414}
#ifndef NOTIFICATIONFUNCTIONTYPETRAITS_HPP_ #define NOTIFICATIONFUNCTIONTYPETRAITS_HPP_ #include "Config.hpp" #include "Widgets.hpp" #include <boost/function.hpp> #include <boost/bind.hpp> struct NotificationFunctionTypeTraitsTracing { }; template< typename NotificationFunction > struct NotificationFunctionTypeTraits { void call( NotificationFunction& aNotificationFunction, NotifyEvent&) { aNotificationFunction(); } }; template< > struct NotificationFunctionTypeTraits< std::function< void( NotifyEvent&) > > { void call( std::function< void( NotifyEvent&) >& aNotificationFunction, NotifyEvent& event) { aNotificationFunction( event); } }; /** * Dummy function to allow for usage of not-yet implemented * std::function<void (CommandEvent&)> functions * @param */ void Ooops( NotifyEvent&); #endif /* NOTIFICATIONFUNCTIONTYPETRAITS_HPP_ */
{"hexsha": "a30735aadfb6f7bf0ae2a10bada33d2807e516ca", "size": 883, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/NotificationFunctionTypeTraits.hpp", "max_stars_repo_name": "MustafaSabur/RobotWereld", "max_stars_repo_head_hexsha": "e696e6e7ad890abb719a78fc1a0c111a680d27e0", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/NotificationFunctionTypeTraits.hpp", "max_issues_repo_name": "MustafaSabur/RobotWereld", "max_issues_repo_head_hexsha": "e696e6e7ad890abb719a78fc1a0c111a680d27e0", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/NotificationFunctionTypeTraits.hpp", "max_forks_repo_name": "MustafaSabur/RobotWereld", "max_forks_repo_head_hexsha": "e696e6e7ad890abb719a78fc1a0c111a680d27e0", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.075, "max_line_length": 77, "alphanum_fraction": 0.7565118913, "num_tokens": 208}
import os import unittest import numpy as np from monty.serialization import loadfn from pymatgen.core import Lattice, Structure from pymatgen.analysis.diffusion.aimd.rdf import RadialDistributionFunctionFast tests_dir = os.path.dirname(os.path.abspath(__file__)) class RDFTest(unittest.TestCase): def test_rdf(self): # Parse the DiffusionAnalyzer object from json file directly obj = loadfn(os.path.join(tests_dir, "cNa3PS4_pda.json")) structure_list = [] for i, s in enumerate(obj.get_drift_corrected_structures()): structure_list.append(s) if i == 9: break species = ["Na", "P", "S"] # Test from_species obj = RadialDistributionFunctionFast(structures=structure_list, ngrid=101, rmax=10.0, sigma=0.1) r, s_na_rdf = obj.get_rdf("S", "Na") self.assertTrue(s_na_rdf.shape == (101,)) self.assertAlmostEqual(r[np.argmax(s_na_rdf)], 2.9000, 4) def test_rdf_coordination_number(self): # create a simple cubic lattice coords = np.array([[0.5, 0.5, 0.5]]) atom_list = ["S"] lattice = Lattice.from_parameters(a=1.0, b=1.0, c=1.0, alpha=90, beta=90, gamma=90) structure = Structure(lattice, atom_list, coords) rdf = RadialDistributionFunctionFast(structures=[structure], rmax=5.0, sigma=0.01, ngrid=500) self.assertEqual(np.round(rdf.get_coordination_number("S", "S")[1][110], 2), 6.0) if __name__ == "__main__": unittest.main()
{"hexsha": "86b561eda41e88b9ce64b006a1540e5da20a331b", "size": 1521, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymatgen/analysis/diffusion/aimd/tests/test_rdf.py", "max_stars_repo_name": "JiQi535/pymatgen-analysis-diffusion", "max_stars_repo_head_hexsha": "3600e70549e9462f30e104e083410e7b4544a4b2", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pymatgen/analysis/diffusion/aimd/tests/test_rdf.py", "max_issues_repo_name": "JiQi535/pymatgen-analysis-diffusion", "max_issues_repo_head_hexsha": "3600e70549e9462f30e104e083410e7b4544a4b2", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pymatgen/analysis/diffusion/aimd/tests/test_rdf.py", "max_forks_repo_name": "JiQi535/pymatgen-analysis-diffusion", "max_forks_repo_head_hexsha": "3600e70549e9462f30e104e083410e7b4544a4b2", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8, "max_line_length": 104, "alphanum_fraction": 0.6600920447, "include": true, "reason": "import numpy", "num_tokens": 413}
import numpy as np import statsmodels.api as sm nsample = 100 #这里,我们想要 x1 的值从 0 到 10 等差排列。 x = np.linspace(0, 10, nsample) # 使用 sm.add_constant() 在 array 上加入一列常项1。 X = sm.add_constant(x) # 然后设置模型里的 β0,β1 β0,β1,这里要设置成 1,10 。 beta = np.array([1, 10]) # 然后还要在数据中加上误差项,所以生成一个长度为k的正态分布样本。 e = np.random.normal(size=nsample) # 由此,我们生成反应项 y(t)。 y = np.dot(X, beta) + e # 好嘞,在反应变量和回归变量上使用 OLS() 函数。 model = sm.OLS(y,X) # 然后获取拟合结果。 results = model.fit() # 再调取计算出的回归系数。 print(results.params) # 当然,也可以将回归拟合的摘要全部打印出来。 print(results.summary(alpha=0.01)) # 预测x=15时y的值(区间预测,置信度为99%) predication = results.get_prediction([1,15]) print(predication.summary_frame(alpha=0.01))
{"hexsha": "fa13e1e71e871dc3d81a0198ae6e820ba7edfaba", "size": 679, "ext": "py", "lang": "Python", "max_stars_repo_path": "References/numpy/\u56de\u5f52\u5206\u6790.py", "max_stars_repo_name": "royqh1979/python_libs_usage", "max_stars_repo_head_hexsha": "57546d5648d8a6b7aca7d7ff9481aa7cd4d8f511", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "References/numpy/\u56de\u5f52\u5206\u6790.py", "max_issues_repo_name": "royqh1979/python_libs_usage", "max_issues_repo_head_hexsha": "57546d5648d8a6b7aca7d7ff9481aa7cd4d8f511", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "References/numpy/\u56de\u5f52\u5206\u6790.py", "max_forks_repo_name": "royqh1979/python_libs_usage", "max_forks_repo_head_hexsha": "57546d5648d8a6b7aca7d7ff9481aa7cd4d8f511", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.0888888889, "max_line_length": 44, "alphanum_fraction": 0.6980854197, "include": true, "reason": "import numpy,import statsmodels", "num_tokens": 342}
#! /usr/bin/env python # -*- coding:utf-8 -*- """Generate SN Ia toy models for Weizmann workshop code-comparison study (Radiation Transfer and Explosive Thermonuclear Burning in Supernovae, 17-28 June 2018) The model is defined by its total mass (--mtot) and asymptotic kinetic energy (--ekin; alternatively it can be determined given the composition based on Eq. 1 of W07). The density profile can either be exponential (--densprof expon) or consist of a broken power law with indices delta,n (--densprof power --densexp <delta>,<n>; see CS89, K10). The ejecta is divided into N zones with constant velocity width (--dvel). The mass of each zone is computed given the zone volume (radii determined from velocity assuming homologous expansion) and density profile. Starting from the central zone, we keep adding mass shells until the ejecta mass reaches 99.99% of the total mass. The ejecta is supposed to consist of four distinct chemical zones: the innermost zone consists of stable IGEs (mass set using --mige; 100% Fe unless --xfracni is set to the relative fraction of stable Ni); then comes the 56Ni zone (mass at t=0 set using --mni56); then the IME zone (mass set using --mime; the IMEs to include are specified using --ime and their relative fraction with --xfracime). Note that some trace amount of Ti can be included in the 56Ni and IME zones with --xfracti (we simply replace xfracti of the 56Ni and IME masses with Ti). Finally, any remaining outermost layer is set to unburnt C/O (the relative fraction of O is set using --xfraco). The ejecta must contain some 56Ni and IMEs, but does not necessarily have to include stable IGEs or unburnt C/O. | || || || | | stable IGEs || 56Ni || IMEs || unburnt C/O | | (optional) || (+Ti) || (+Ti) || (optional) | mass = 0.............................................mtot The abundance profiles are connected using an analytical function (--transprof) over a given mass range (--dmige for stable IGE -> 56Ni connection; --dmni56 for 56Ni -> IME connection; --dmime for IME -> unburnt C/O connection). Note that one can set dmige = dmni56 = dmime using the --dmtrans option. The transition profile can either be a linear function (--transprof linear), an inverse-exponential (aka 'logistic') function with an associated scale factor(--transprof invexpon --transscl <scale factor>; see M18), or a cosine bell (--transprof cosine). The ejecta is evolved to a time (--tend) by solving the first law of thermodynamics assuming a radiation-dominated gas, local energy deposition from 56Ni decay, and no diffusion (i.e. the temperature in each zone is solved independently from adjacent zones). Given these assumptions, the final temperature can be determined analytically by noting that the time-weighted internal energy (=t*E(t)) equals the time-integrated time-weighted decay energy deposition rate (=Int{t*Q(t) dt}), as noted by K13 (we ignore the time-weighted internal energy shortly after explosion E(t0)*t0 << Int{Q(t) t dt}). A minimum temperature can be set using --tempmin. Last, an output file is generated (--fout) and the density/abundance profiles are displayed (unless --noplot is set). Parameters ---------- Typing: python mk_snia_toy_model.py -h will print the usage and input parameters (with their default values)) Examples -------- 1) ejecta with default settings (see python mk_snia_toy_model.py -h): python mk_snia_toy_model.py 2) same as 1) but with broken power-law density profile python mk_snia_toy_model.py --densprof power --densexp 0,10 3) 1.4 Msun ejecta (default) with Ekin computed based on composition, consisting of 0.1 Msun stable IGEs (default), 0.6 Msun 56Ni (default), 0.6 Msun IMEs (Mg, Si, S, Ca, all with default relative mass fractions), and hence 0.1 Msun unburnt C/O in equal mass fractions (default), connected over a mass range 0.1 Msun (default) using a cosine bell: python mk_snia_toy_model.py --ekinw07 --transprof cosine 4) 1.0 Msun ejecta with Ekin=10^51 erg (default) consisting only of 56Ni (0.5 Msun) and Si (0.5 Msun), connected over a mass range 0.1 Msun (default): python mk_snia_toy_model.py --mtot 1.0 --mni56 0.5 --mime 0.5 --ime si References ---------- CS89: Chevalier & Soker (1989), ApJ, 341, 867 J99: Jeffery (1999) arXiv:astro-ph/9907015 K10: Kasen (2010), ApJ, 708, 1025 K13: Katz et al. (2013), arXiv:1301.6766 [astro-ph] M18: Magee et al. (2018), arXiv:1803.04436v1 W07: Woosley et al. (2007), ApJ, 662, 487 TODO ---- - define grid based on delta_mass as opposed to delta_vel - adjust delta_vel (increase resolution) in composition transition zones Revision history ---------------- 27 Mar 2018 - first version of code (Stéphane Blondin, SB) 29 Mar 2018 - revised version (Boaz Katz, BK) o replaced temperature iteration with analytical calculation (see Katz et al. 2013), and removed references to an initial time t0 (ejecta evolved to final time T_END directly) o use a finer grid (in mass coordinates) for abundance profile calculations (change_mass_res() function) o correction to average density in transition region + special treatment of cell containing the break for broken power-law density profile o added values of various constants to output file o added new columns (X_IGE0 (at t=0), X_56Ni0, X_IME, X_CO) to output file and rearranged columns to first display parameters that do not depend on the final time 03 Apr 2018 - revised version for testing by workshop participants (SB) o code clean-up and added references to radioactive data 05 Apr 2018 - revised version (SB, per Frank Timmes' suggestions) o added Python2/3 compatibility o removed unused variables for temperature iteration 15 May 2018 - revised version (SB) o added option to include some Ti in 56Ni & IME zones (--xfracti) o report actual abundances in output file header in addition to requested ones o version date stamp o rearrange IMEs order in output file by decreasing atomic mass 20 May 2018 - revised version (SB) o added nzones and Vmax to output file header 07 Jun 2018 - revised version (SB & BK) o corrected bug in minxfrac option o implemented calculation of gamma-ray escape time t0 from J99 (BK) Author contact -------------- Stéphane Blondin, stephane.blondin@lam.fr """ import sys import os import re import numpy as np ### version number VERSION = '2018-06-07' ### ensure Python2 (2.6 or 2.7) and Python3 compatibility if sys.version_info.major == 2: input = raw_input # input() to mean raw_input() when running Python2 ### constants # (astro)physical constants AMU = 1.660540e-24 # atomic mass unit (g) ARAD = 7.5659125e-15 # radiation constant [erg/cm^3/K^4] MSUN = 1.989e+33 # solar mass (g) # 56Ni decay EDECAY_56NI = 1.7206 # energy per 56Ni decay (MeV) - obtained by summing photon energies from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56NI&unc=nds EDECAY_56CO = 3.6072 # energy per 56Co decay (MeV) - obtained by summing photon energies from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56CO&unc=nds MASS_56NI = 55.94212855 # mass of 56Ni nucleus (AMU) - from https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl?ele=Ni&isotype=all MASS_56CO = 55.93983880 # mass of 56Co nucleus (AMU) - from https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl?ele=Co&isotype=all THALF_56NI = 6.075 # 56Ni half-life (days) - from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56NI&unc=nds THALF_56CO = 77.236 # 56Co half-life (days) - from http://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc=56CO&unc=nds KAPPA_GAMMA = 0.025 # effective gamma-ray opacity (cm^2/g) for calculating the gamma-ray escape time in optically thin limit only, assuming mue=0.5 from J99 # conversion factors DAY2SEC = 86400.0 # days -> sec conversion MEV2ERG = 1.60217733e-6 # MeV -> erg conversion factor # misc EPSILON = 1e-5 # smallish number MAXFRAC_TI = 1e-4 # maximum value for Ti fraction in 56Ni and IME zones MAXMINXFRAC = 1e-5 # ensure --minxfrac option doesn't exceed this value ### defaults MTOT_INIT = 1.40 # total mass (msun) EKIN_INIT = 1.00 # asymptotic kinetic energy (1e51 erg) DVEL_INIT = 100.0 # cell size (km/s) DENSPROF_INIT = 'expon' # "density profile: 'expon' (exponential) or 'power' (broken power-law) DENSEXP_INIT = '0,10' # exponents for broken power-law density profile: <delta>,<n> e.g. --densexp 0,10 MIGE_INIT = 0.1 # stable IGE mass (msun) MNI56_INIT = 0.6 # 56Ni mass at t=0 (msun) MIME_INIT = 0.6 # IME mass (msun) DMIGE_INIT = 0.1 # mass interval over which stable IGE mass fraction transitions from 1 to 0 (msun) DMNI56_INIT = 0.1 # mass interval over which 56Ni mass fraction transitions from 1 to 0 (msun) DMIME_INIT = 0.1 # mass interval over which IME mass fraction transitions from 1 to 0 (msun) DMFINE_INIT = 1e-4 # resolution of fine grid of masses used for transitions (msun) TRANSPROF_INIT = 'linear' # transition profile for mass fraction variation from 1 to 0: 'linear', 'invexpon' (inverse exponential) or 'cosine' (cosine bell) TRANSSCL_INIT = 1.4e2 # scale factor for 'invexpon' (inverse exponential) transition profile; this default value of 140 ensures X>0.999 at the lower boundary XIGEFRAC_NI = 0.1 # fraction of stable IGE mass as stable Ni; the rest gets set to stable Fe XCOFRAC_O = 0.5 # fraction of unburnt C/O mass as O; the rest gets set to C XFRACTI_INIT = 0.0 # fraction of mass in 56Ni and IME zones set to Ti T_END = 1.0 # final time for toy model (days) TEMP_MIN = 1e3 # minimum allowed temperature (K) FOUT_INIT = 'snia_toy.dat' # output file name ### which IMEs to consider # # NOTE: can be modified but ensure Sum(XFRACIME_INIT)=1.0 # (if only one IME is given then --xfracime is set to 1.0 automatically) # # in model DDC10 from Alexei Khokhlov: # # M(Ca+S+Si+Mg) = 0.466 Msun # M(Ca) / M(Ca+S+Si+Mg) ~ 0.087 # M(S) / M(Ca+S+Si+Mg) ~ 0.351 # M(Si) / M(Ca+S+Si+Mg) ~ 0.542 # M(Mg) / M(Ca+S+Si+Mg) ~ 0.020 # IME_INIT = 'ca,s,si,mg' # comma-separated list of IMEs to include XFRACIME_INIT = '0.087,0.351,0.542,0.020' # comma-separated list of relative IME fractions ############################################################################### def change_mass_res(dm_oldres, x_oldres, dm_newres): """for mass grid with cell masses dm_oldres, and abundances x_oldres, find abundances at new resolution grid with cell masses dm_newres """ x_newres = dm_newres * 0.0 l_new = 0 l_old = 0 Nnew = len(dm_newres) Nold = len(dm_oldres) mold = dm_oldres[l_old] mnew = dm_newres[l_new] mxaccum = 0.0 while (l_new < Nnew) and (l_old < Nold): if mnew <= mold: mxaccum += mnew * x_oldres[l_old] mold -= mnew x_newres[l_new] = mxaccum / dm_newres[l_new] mxaccum = 0.0 l_new += 1 if l_new < Nnew: mnew = dm_newres[l_new] else: mxaccum += mold * x_oldres[l_old] mnew -= mold l_old += 1 if l_old < Nold: mold = dm_oldres[l_old] if l_new < Nnew: x_newres[l_new] = mxaccum / dm_newres[l_new] return x_newres ############################################################################### def shell_column_density(r_rshell): """the correction factor f for the average column density through a spherical shell at rshell, as seen by a spherical shell at r the column density is f*mshell/(4*pi*rshell^2). For r->0 f->1. """ x = r_rshell * 1.0 y = x / np.sqrt(np.abs(1 - x**2)) ansx = x * 0.0 ansx[x>1] = np.log(2.0 * (np.sqrt(y[x>1]**2 - 1) + y[x>1])) - np.log(2) ansx[x<1] = (np.arcsinh(y[x<1]) - np.arcsinh(-y[x<1])) / 2.0 ans = ansx / x return ans ############################################################################### def total_column_density_cgs(v_edge, m_cell, XNi56): """ calculate the total, ni56(t=0) weighted, angle averaged, column density (multiplied by t^2 so constant) *****NOTE***** that v_edge, m_cell, XNi56 are in cgs """ mNi56_cell = m_cell * XNi56 N_cell = len(m_cell) def cell_to_edge(a_cell): a_edge = a_cell * 1.0 a_edge[-1] = a_cell[-1] / 2.0 a_edge[:-1] = (a_cell[:-1] + a_cell[1:]) / 2.0 return a_edge def edge_to_mid(a_edge): a_mid = a_edge * 1.0 a_mid[0] = a_edge[0] / 2.0 a_mid[1:] = (a_edge[:-1] + a_edge[1:]) / 2.0 return a_mid v_mid = edge_to_mid(v_edge) m_edge = cell_to_edge(m_cell) SigV_edge = m_edge / (4 * np.pi * v_edge**2) SigV_ave_cell = m_cell * 0.0 for lcell in range(N_cell): SigV_ave_cell[lcell] = np.sum(SigV_edge * shell_column_density(v_mid[lcell] / v_edge)) SigV_tot = np.sum(SigV_ave_cell * mNi56_cell) / np.sum(mNi56_cell) return SigV_tot ############################################################################### if __name__ == '__main__': import argparse import matplotlib.pyplot as plt parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) # # options # parser.add_argument('--mtot', default=MTOT_INIT, type=float, help='total mass (msun)') parser.add_argument('--ekin', default=EKIN_INIT, type=float, help='asymptotic Ekin (1e51 erg)') parser.add_argument('--ekinw07', action='store_true', help='compute Ekin based on W07, Eq. 1') parser.add_argument('--dvel', default=DVEL_INIT, type=float, help='cell size (km/s)') parser.add_argument('--densprof', default=DENSPROF_INIT, type=str, choices=['expon','power'], help="density profile: 'expon' (exponential) or 'power' (broken power-law)") parser.add_argument('--densexp', default=DENSEXP_INIT, type=str, help='exponents for broken power-law density profile: <delta>,<n> e.g. --densexp 0,10') parser.add_argument('--tend', default=T_END, type=float, help='final time for toy model (d)') parser.add_argument('--tempmin', default=TEMP_MIN, type=float, help='minimum allowed temperature (K)') parser.add_argument('--mige', default=MIGE_INIT, type=float, help='stable IGE mass (msun)') parser.add_argument('--mni56', default=MNI56_INIT, type=float, help='56Ni mass at t=0 (msun)') parser.add_argument('--mime', default=MIME_INIT, type=float, help='IME mass (msun)') parser.add_argument('--dmige', default=DMIGE_INIT, type=float, help='mass interval over which stable IGE mass fraction transitions from 1 to 0 (msun)') parser.add_argument('--dmni56', default=DMNI56_INIT, type=float, help='mass interval over which 56Ni mass fraction transitions from 1 to 0 (msun)') parser.add_argument('--dmime', default=DMIME_INIT, type=float, help='mass interval over which IME mass fraction transitions from 1 to 0 (msun)') parser.add_argument('--dmtrans', default=None, type=float, help='to set dmige=dmni56=dmime=dmtrans in one go') parser.add_argument('--dmfine', default=DMFINE_INIT, type=float, help='resolution of fine grid of masses for transitions (msun)') parser.add_argument('--transprof', default=TRANSPROF_INIT, type=str, choices=['linear', 'invexpon','cosine'], help="transition profile for mass fraction variation from 1 to 0: 'linear', 'invexpon' (inverse exponential) or 'cosine' (cosine bell)") parser.add_argument('--transscl', default=TRANSSCL_INIT, type=float, help="scale factor for 'invexpon' (inverse exponential) transition profile") parser.add_argument('--xfracni', default=XIGEFRAC_NI, type=float, help='fraction of stable IGE mass as stable Ni; the rest gets set to stable Fe') parser.add_argument('--xfraco', default=XCOFRAC_O, type=float, help='fraction of unburnt C/O mass as O; the rest gets set to C') parser.add_argument('--xfracti', default=XFRACTI_INIT, type=float, help='fraction of mass in 56Ni and IME zones set to Ti') parser.add_argument('--ime', default=IME_INIT, type=str, help='comma-separated list of IMEs to include') parser.add_argument('--xfracime', default=XFRACIME_INIT, type=str, help='comma-separated list of relative IME fractions') parser.add_argument('--minxfrac', default=None, type=float, help='minimum mass fraction for output to file/plot') parser.add_argument('--fout', default=FOUT_INIT, type=str, help='output file name') parser.add_argument('--noplot', action='store_true', help='disable plotting of density/abundance profiles') parser.add_argument('--nowarn', action='store_true', help='disable warning messages') parser.add_argument('--debug', action='store_true', help='print various stuff for debugging') parser.add_argument('--test', action='store_true', help='for testing purposes') args = parser.parse_args() print('') print('#############################') print(' SN Ia toy model' ) print('#############################') # # check masses make sense # mtot = args.mtot mige = args.mige mni56 = args.mni56 mime = args.mime if (1.0 - (mni56 + mime)/mtot) < EPSILON and mige > EPSILON: print('') print('WARNING - 56Ni mass + IME mass = total mass; setting IGE mass to 0') mige = 0.0 mburnt = mige + mni56 + mime if mburnt > mtot: sys.exit("ERROR - burnt mass exceeds total mass! mtot, mburnt = {:.3f}, {:.3f} Msun".format(mtot, mburnt)) elif mni56 < EPSILON: sys.exit("ERROR - 56Ni mass must be > 0! mni56 = {:.3f} Msun".format(mni56)) elif mime < EPSILON: sys.exit("ERROR - IME mass must be > 0! mime = {:.3f} Msun".format(mime)) else: munbco = mtot - mburnt # unburnt mass # # check IMEs # imes = args.ime.split(',') nime = len(imes) for ii, ime in enumerate(imes): if ime not in IME_INIT: sys.exit("ERROR - IME {:s} not in default IME_INIT: {:s}".format(ime, IME_INIT)) if nime == 1: xfracimestr = ['1.0'] xfracime = [1.0] else: xfracimestr = args.xfracime.split(',')[:nime] xfracime = [float(xx) for xx in xfracimestr] xfracimetot = sum(xfracime) if np.abs(1.0 - 1.0/xfracimetot) > EPSILON: sys.exit("ERROR - relative IME mass fractions don't sum up to 1! sum(xfracime) = {:.5f}".format(xfracimetot)) # # check Ti fraction # xfracti = args.xfracti if (xfracti > MAXFRAC_TI): sys.exit("ERROR - xfracti ({:.4e}) cannot exceed MAXFRAC_TI ({:.4e})".format(xfracti, MAXFRAC_TI)) else: mti_ni56 = xfracti * mni56 # Ti mass in 56Ni zone mti_ime = xfracti * mime # Ti mass in IME zone mti = mti_ni56 + mti_ime print('') print('INFO - user-defined ejecta mass and composition:') print('') print(' Mtot = {:.4e} Msun'.format(mtot)) print(' M(stable IGE) = {:.4e} Msun of which {:.1f}% Fe and {:.1f}% Ni'.format(mige, (1.0-args.xfracni)*1e2, args.xfracni*1e2)) print(' M(56Ni) = {:.4e} Msun'.format(mni56)) sys.stdout.write(' M(IME) = {:.4e} Msun of which'.format(mime)) for ii, ime in enumerate(imes): sys.stdout.write(' {:.1f}% {:s}'.format(xfracime[ii]*1e2, ime.capitalize())) if ii == nime-1: print('') else: if ii == nime-2: sys.stdout.write(' and') else: sys.stdout.write(',') print(' M(unburnt C/O) = {:.4e} Msun of which {:.1f}% C and {:.1f}% O'.format(munbco, (1.0-args.xfraco)*1e2, args.xfraco*1e2)) if (xfracti > 0.0): print('') print(' NOTE: will replace {:.4e} Msun of 56Ni mass and {:.4e} Msun of IME mass with Ti'.format(mti_ni56, mti_ime)) # # check mass intervals dmX # if args.dmtrans is not None: dmige = args.dmtrans dmni56 = args.dmtrans dmime = args.dmtrans else: dmige = args.dmige dmni56 = args.dmni56 dmime = args.dmime # if there are no IGEs or unburnt C/O, set IGE or IME mass intervals to 0 if mige < EPSILON: mige = 0.0 dmige = 0.0 if munbco < EPSILON: munbco = 0.0 dmime = 0.0 # requirements on IGE/56Ni/IME/CO mass given mass intervals if mige < 0.5*dmige: sys.exit("ERROR - Need to increase IGE mass or decrease dM(IGE) as M(IGE) < dM(IGE)/2! mime, dmige = {:.3f}, {:.3f} Msun".format(mige, dmige)) if mni56 < 0.5*(dmige+dmni56): sys.exit("ERROR - Need to increase 56Ni mass or decrease dM(IGE)+dM(56Ni) as M(56Ni) < [dM(IGE)+dM(56Ni)]/2! mni56, dmige, dmni56 = {:.3f}, {:.3f}, {:.3f} Msun".format(mni56, dmige, dmni56)) if mime < 0.5*(dmni56+dmime): sys.exit("ERROR - Need to increase 56Ni mass or decrease dM(56Ni)+dM(IME) as M(56Ni) < [dM(56Ni)+dM(IME)]/2! mime, dmni56, dmime = {:.3f}, {:.3f}, {:.3f} Msun".format(mime, dmni56, dmime)) if munbco < 0.5*dmime: sys.exit("ERROR - Need to increase unburnt C/O mass or decrease dM(IME) as M(C/O) < dM(IME)/2! munbco, dmime = {:.3f}, {:.3f} Msun".format(munbco, dmime)) # compute mass coordinate at which mass fraction starts decreasing from 1 mcoord_ige = mige - 0.5*dmige # IGE mass fraction starts decreasing from 1 at this mass coordinate (unless M(IGE)=0!) mcoord_ni56 = mcoord_ige + mni56 + 0.5*(dmige-dmni56) # 56Ni mass fraction starts decreasing from 1 at this mass coordinate mcoord_ime = mcoord_ni56 + mime + 0.5*(dmni56-dmime) # IME mass fraction starts decreasing from 1 at this mass coordinate if args.debug: print('mcoord_ige, mcoord_ni56, mcoord_ime = {:.3f} {:.3f} {:.3f}'.format(mcoord_ige, mcoord_ni56, mcoord_ime)) # # compute Ekin based on W07, Eq. 1 if --ekinw07 is set # # Ekin = 1.56 M(Ni) + 1.74 M(Fe) + 1.24 M(IME) - Eg + Eint # # (units=1e51 erg for Ekin, Eg, Eint; Msun for masses) # # NOTE: Eg and Eint correspond to MCh ejecta, so a warning is # issued if the requested total mass differs significantly from MCh if args.ekinw07: if np.abs(mtot-1.4) > 0.1: print('') print("WARNING - total mass differs significantly from MCh") zzz = input(" ===> apply Eq. 1 of W07 to determine Ekin anyway? [y/n] (default=n): ") if zzz == 'y': pass else: sys.exit("ERROR - exiting mk_snia_toy_model.py; adjust mtot or remove --ekinw07 option") ebind = 3.35 # gravitational binding energy for MCh WD from W07 (1e51 erg) eint = 2.89 # internal energy of MCh WD from W07 (1e51 erg) ekin = 1.56 * mni56 + 1.74 * mige + 1.24 * mime - ebind + eint print('') print('INFO - computed Ekin based on W07 = {:.4e} erg'.format(ekin*1e51)) else: ekin = args.ekin print('') print('INFO - input Ekin = {:.4e} erg'.format(ekin*1e51)) # # generate density profile at T_END # # NOTE: dens and vel are zone-centered # vel = [] # velocity coordinate in km/s rad = [] # radial coordinate in cm dens = [] # density in g/cm^3 dmass = [] # shell mass in Msun # ejecta are evolved to final time T_END (days) tend = args.tend tend_sec = tend * DAY2SEC # set innermost shell properties dvel = args.dvel # cell size in km/s v0 = 0.0 ; r0 = v0 * tend_sec * 1e5 v1 = v0 + dvel ; r1 = v1 * tend_sec * 1e5 vcen = 0.5*(v0+v1) rcen = 0.5*(r0+r1) if args.densprof == 'expon': print('') print('INFO - using exponential density profile') # compute e-folding velocity for density profile (see J99, line after Eq. A6) # ve = sqrt(Ekin / 6Mtot) (units=cgs) ve_cgs = np.sqrt(ekin*1e51 / (6*mtot*MSUN)) ve = ve_cgs * 1e-5 # cm/s -> km/s print(' computed e-folding velocity based on J99 = {:.0f} km/s'.format(ve)) # compute central density at T_END (see J99, Eq. A7) # rho_c,0 = Mtot / (8 PI ve^3 t^3) (units=cgs) rhoc0 = mtot * MSUN / (8 * np.pi * ve_cgs**3 * tend_sec**3) print(' computed central density based on J99 = {:.2e} gcc at {:.0f} d'.format(rhoc0, tend)) # compute rho @ zone center (rhocen) and mean density over [v0,v1] (rhoave = M/V = Int(rho dV) / V) z0 = v0/ve z1 = v1/ve zcen = 0.5*(z0+z1) rhocen = rhoc0 * np.exp(-zcen) rhoave = rhoc0 * 3.0 * (np.exp(-z0)*(z0**2+2.0*z0+2.0) - np.exp(-z1)*(z1**2+2.0*z1+2.0)) / (z1**3 - z0**3) elif args.densprof == 'power': densexp = args.densexp.split(',') exp_delta, exp_n = int(densexp[0]), int(densexp[1]) print('') print('INFO - using broken power-law density profile with delta, n = {:d}, {:d}'.format(exp_delta, exp_n)) if exp_delta >= 3 or exp_n <= 3: sys.exit("ERROR - we must have delta < 3 and n > 3 for broken power-law density profile! delta, n = {:d}, {:d}".format(exp_delta, exp_n)) # compute transition velocity for broken power-law density profile fac3 = (1.0/(3.0-exp_delta) + 1.0/(exp_n-3.0)) fac5 = (1.0/(5.0-exp_delta) + 1.0/(exp_n-5.0)) fac = fac3 / fac5 vt_cgs = np.sqrt(fac*2.0*ekin*1e51 / (mtot*MSUN)) vt = vt_cgs * 1e-5 # cm/s -> km/s print(' computed transition velocity based on K10 = {:.0f} km/s'.format(vt)) # compute central density at T_END rhoc0 = mtot*MSUN / (4 * np.pi * vt_cgs**3 * tend_sec**3) / fac3 print(' computed central density based on K10 = {:.2e} gcc at {:.0f} d'.format(rhoc0, tend)) # compute rho @ zone center (rhocen) and mean density over [v0,v1] (rhoave = M/V = Int(rho dV) / V) rhocen = rhoc0 * (vcen/vt)**(-exp_delta) rhoave = rhoc0 * 3.0 * (v1**(3.0-exp_delta) - v0**(3.0-exp_delta)) / (vt**(-exp_delta) * (3.0-exp_delta)) / (v1**3 - v0**3) else: sys.exit("ERROR - unknown density profile: {:s}!".format(args.densprof)) if args.debug: rhodiff = 1.0 - rhocen/rhoave print('rhoave, rhocen, diff = {:.4e} {:.4e} {:.4e}'.format(rhoave, rhocen, rhodiff)) dvol = 4./3.*np.pi*(r1**3 - r0**3) dm = dvol * rhoave / MSUN # to be consistent with mean density vel.append(vcen) # velocity at zone center rad.append(rcen) # radius at zone center dens.append(rhoave) # mean density over [v0,v1] dmass.append(dm) # mass in zone = Int(rho dV) while (1.0-sum(dmass)/mtot) > 1e-4: v0 += dvel ; r0 = v0 * tend_sec * 1e5 v1 = v0 + dvel ; r1 = v1 * tend_sec * 1e5 vcen = 0.5*(v0+v1) rcen = 0.5*(r0+r1) # compute rho @ zone center (rhocen) and mean density over [v0,v1] (rhoave = M/V = Int(rho dV) / V) if args.densprof == 'expon': z0 = v0/ve z1 = v1/ve zcen = 0.5*(z0+z1) rhocen = rhoc0 * np.exp(-zcen) rhoave = rhoc0 * 3.0 * (np.exp(-z0)*(z0**2+2.0*z0+2.0) - np.exp(-z1)*(z1**2+2.0*z1+2.0)) / (z1**3 - z0**3) elif args.densprof == 'power': if v1 <= vt: rhocen = rhoc0 * (vcen/vt)**(-exp_delta) rhoave = rhoc0 * 3.0 * (v1**(3.0-exp_delta) - v0**(3.0-exp_delta)) / (vt**(-exp_delta) * (3.0-exp_delta)) / (v1**3 - v0**3) elif v0 >= vt: rhocen = rhoc0 * (vcen/vt)**(-exp_n) rhoave = rhoc0 * 3.0 * (v1**(3.0-exp_n) - v0**(3.0-exp_n)) / (vt**(-exp_n) * (3.0-exp_n)) / (v1**3 - v0**3) else: # special treatment for cell that contains the break if vcen <= vt: rhocen = rhoc0 * (vcen/vt)**(-exp_delta) else: rhocen = rhoc0 * (vcen/vt)**(-exp_n) numer0 = (vt**(3.0-exp_delta) - v0**(3.0-exp_delta)) / (vt**(-exp_delta) * (3.0-exp_delta)) numer1 = (v1**(3.0-exp_n) - vt**(3.0-exp_n)) / (vt**(-exp_n) * (3.0-exp_n)) rhoave = rhoc0 * 3.0 * (numer0 + numer1) / (v1**3 - v0**3) if args.debug: rhodiff = 1.0 - rhocen/rhoave print('rhoave, rhocen, diff = {:.4e} {:.4e} {:.4e}'.format(rhoave, rhocen, rhodiff)) dvol = 4./3.*np.pi*(r1**3 - r0**3) dm = dvol * rhoave / MSUN # to be consistent with mean density vel.append(vcen) # velocity at zone center rad.append(rcen) # radius at zone center dens.append(rhoave) # mean density over [v0,v1] dmass.append(dm) # mass in zone = Int(rho dV) # convert lists to arrays vel = np.array(vel) rad = np.array(rad) dens = np.array(dens) dmass = np.array(dmass) nd = vel.size # number of zones if args.debug: print('nd = ',nd) # Lagrangian mass coordinate (corresponds to outer zone boundary) mass = np.cumsum(dmass) # # set abundances for stable IGEs, 56Ni, IMEs, unburnt C/O # if dmige+dmni56+dmime > EPSILON: print('') print('INFO - connecting abundance profiles with {:s} function'.format(args.transprof)) print('') if mige > EPSILON and dmige > EPSILON: print(' stable IGE -> 56Ni zone over mass interval [{:.4e},{:.4e}] Msun'.format(mcoord_ige, mcoord_ige+dmige)) if dmni56 > EPSILON: print(' 56Ni -> IME zone over mass interval [{:.4e},{:.4e}] Msun'.format(mcoord_ni56, mcoord_ni56+dmni56)) if munbco > EPSILON and dmime > EPSILON: print(' IME -> unburnt C/O zone over mass interval [{:.4e},{:.4e}] Msun'.format(mcoord_ime, mcoord_ime+dmime)) # first calculate the abundance profiles on a high resolution grid of masses dmfine = args.dmfine mass_fine = np.arange(dmfine, mass[-1]+dmfine, dmfine) N_fine = len(mass_fine) dm_fine = np.ones(N_fine)*dmfine xige_fine = np.zeros(N_fine) xni56_fine = np.zeros(N_fine) xime_fine = np.zeros(N_fine) xunbco_fine = np.zeros(N_fine) for i in range(N_fine): if mass_fine[i] <= mcoord_ige: xige_fine[i] = 1.0 elif mass_fine[i] <= mcoord_ige + dmige: if args.transprof == 'linear': xige_fine[i] = (mcoord_ige - mass_fine[i]) / dmige + 1.0 elif args.transprof == 'invexpon': xige_fine[i] = 1.0 / (np.exp(args.transscl * (mass_fine[i] - (mcoord_ige + dmige/2.0))) + 1.0) elif args.transprof == 'cosine': xige_fine[i] = 1.0 - (1.0 - np.cos(np.pi*(mass_fine[i] - mcoord_ige) / dmige)) / 2.0 xni56_fine[i] = 1.0 - xige_fine[i] elif mass_fine[i] < mcoord_ni56: xni56_fine[i] = 1.0 elif mass_fine[i] <= mcoord_ni56 + dmni56: if args.transprof == 'linear': xni56_fine[i] = (mcoord_ni56 - mass_fine[i]) / dmni56 + 1.0 elif args.transprof == 'invexpon': xni56_fine[i] = 1.0 / (np.exp(args.transscl * (mass_fine[i] - (mcoord_ni56 + dmni56/2.0))) + 1.0) elif args.transprof == 'cosine': xni56_fine[i] = 1.0 - (1.0 - np.cos(np.pi*(mass_fine[i] - mcoord_ni56) / dmni56)) / 2.0 xime_fine[i] = 1.0 - xni56_fine[i] elif mass_fine[i] <= mcoord_ime: xime_fine[i] = 1.0 elif mass_fine[i] <= mcoord_ime + dmime: if args.transprof == 'linear': xime_fine[i] = (mcoord_ime - mass_fine[i]) / dmime + 1.0 elif args.transprof == 'invexpon': xime_fine[i] = 1.0 / (np.exp(args.transscl * (mass_fine[i] - (mcoord_ime + dmime/2.0))) + 1.0) elif args.transprof == 'cosine': xime_fine[i] = 1.0 - (1.0 - np.cos(np.pi*(mass_fine[i] - mcoord_ime) / dmime)) / 2.0 xunbco_fine[i] = 1.0 - xime_fine[i] else: xunbco_fine[i] = 1.0 if args.debug: print(mass_fine[i], xige_fine[i], xni56_fine[i], xime_fine[i], xunbco_fine[i]) # Now map the high resolution grid to the actual grid xige = change_mass_res(dm_fine, xige_fine, dmass) xni56 = change_mass_res(dm_fine, xni56_fine, dmass) xime = change_mass_res(dm_fine, xime_fine, dmass) xunbco = change_mass_res(dm_fine, xunbco_fine, dmass) # replace part of 56Ni and IME mass with Ti xti = (xni56 + xime) * xfracti xni56 = xni56 * (1.0 - xfracti) xime = xime * (1.0 - xfracti) # calculate gamma-ray escape time Sig_tot_t2 = total_column_density_cgs((vel + dvel/2.0)*1e5, dmass*MSUN, xni56) t0_gamma = np.sqrt(Sig_tot_t2 * KAPPA_GAMMA) print('') print('INFO - final ejecta has {:d} zones with Vmax = {:.4e} km/s and'.format(nd, vel.max())) print('') print(' Mtot = {:.4e} Msun'.format(np.sum(dmass))) print(' Ekin = {:.4e} erg'.format(5e9 * np.sum(dmass*MSUN * vel**2))) # 5e9 = 0.5 * 1e10 i.e. 1/2 factor * (km/s->cm/s)^2 print(' M(stable IGE) = {:.4e} Msun of which {:.1f}% Fe and {:.1f}% Ni'.format(np.sum(dmass*xige), (1.0-args.xfracni)*1e2, args.xfracni*1e2)) print(' M(56Ni,t=0) = {:.4e} Msun'.format(np.sum(dmass*xni56))) sys.stdout.write(' M(IME) = {:.4e} Msun of which'.format(np.sum(dmass*xime))) for ii, ime in enumerate(imes): sys.stdout.write(' {:.1f}% {:s}'.format(xfracime[ii]*1e2, ime.capitalize())) if ii == nime-1: print('') else: if ii == nime-2: sys.stdout.write(' and') else: sys.stdout.write(',') print(' M(unburnt C/O) = {:.4e} Msun of which {:.1f}% C and {:.1f}% O'.format(np.sum(dmass*xunbco), (1.0-args.xfraco)*1e2, args.xfraco*1e2)) if (xfracti > 0.0): print('') print(' NOTE: M(Ti) = {:.4e} Msun in 56Ni and IME zones'.format(np.sum(dmass*xti))) print('') print('INFO - gamma-ray escape time is t0_gamma = {:.2f} days'.format(t0_gamma/DAY2SEC)) # # account for 56Ni decay between t~0 and T_END # decay_const_ni56 = np.log(2) / THALF_56NI / DAY2SEC decay_const_co56 = np.log(2) / THALF_56CO / DAY2SEC t1 = np.exp(-decay_const_ni56 * tend_sec) t2 = np.exp(-decay_const_co56 * tend_sec) t3 = decay_const_ni56 * (t2-t1) / (decay_const_ni56 - decay_const_co56) xni56_old = xni56.copy() xni56 = xni56_old * t1 xco56 = xni56_old * t3 # assumes X(56Co)=0 at t=0 xfe56 = xni56_old * (1.0-t1-t3) # assumes X(56Co)=X(56Fe from 56Ni decay)=0 at t=0 print('') print('INFO - accounted for 56Ni decay at t = {:.2f} d:'.format(tend)) print('') print(' M(56Ni) = {:.4e} Msun'.format(np.sum(dmass*xni56))) print(' M(56Co) = {:.4e} Msun'.format(np.sum(dmass*xco56))) print(' M(56Fe) = {:.4e} Msun'.format(np.sum(dmass*xfe56))) # # set individual IGE abundances # xni_stable = xige * args.xfracni xfe_stable = xige * (1.0 - args.xfracni) xni = xni_stable + xni56 xco = xco56.copy() xfe = xfe_stable + xfe56 # xfe56 stands for 56Fe from 56Co decay # # set individual IME abundances (Mg, Si, S, Ca) # # initialize individual IME mass fractions ximeindiv = {} # dictionary containing IME name and associated mass fraction array, e.g. ximeindiv['si'] for ime in IME_INIT: ximeindiv[ime] = np.zeros(nd) # set individual IME mass fractions for ii, ime in enumerate(imes): ximeindiv[ime] = xfracime[ii] * xime # # set unburnt C/O abundances # xo = xunbco * args.xfraco xc = xunbco * (1.0 - args.xfraco) # # check mass fraction normalization # (we don't include xti in xtot since Ti simply replaces some 56Ni + IMEs) # xtot = xni + xco + xfe + xo + xc for ime in imes: xtot += ximeindiv[ime] for i in range(nd): t1 = 1.0 - 1.0/xtot[i] if np.abs(t1) > 1e-3: if not args.nowarn: print('WARNING - Mass fraction not normalized at depth '+str(i)+' : (1 - 1/Xtot) is '+str(t1)) # set minimum mass fraction here (after nomalization check!) if args.minxfrac is not None: if args.minxfrac > MAXMINXFRAC: sys.exit("ERROR - cannot set minxfrac > {:.4e}: {:.4e}".format(MAXMINXFRAC, args.minxfrac)) print('') print('INFO - will set mass fractions of > {:.4e} (apart from 56Ni/Co/Fe!)'.format(args.minxfrac)) ### IGEs if np.sum(xni_stable) > 0.0: xni_stable[np.where(xni_stable < args.minxfrac)] = args.minxfrac xni = xni_stable + xni56 if np.sum(xfe_stable) > 0.0: xfe_stable[np.where(xfe_stable < args.minxfrac)] = args.minxfrac xfe = xfe_stable + xfe56 # xfe56 stands for 56Fe from 56Co decay xige = xni_stable + xfe_stable ### Titanium if np.sum(xti) > 0.0: xti[np.where(xti < args.minxfrac)] = args.minxfrac ### IMEs for ime in imes: ximetmp = ximeindiv[ime].copy() ximetmp[np.where(ximetmp < args.minxfrac)] = args.minxfrac ximeindiv[ime] = ximetmp.copy() for i in range(nd): xime[i] = 0.0 for ime in imes: xime[i] += ximeindiv[ime][i] ### unburnt C/O xo[np.where(xo < args.minxfrac)] = args.minxfrac xc[np.where(xc < args.minxfrac)] = args.minxfrac xunbco = xc + xo # # compute temperate at T_END (days), assuming radiation-dominated gas, no diffusion, local deposition # print('') print('INFO - computing final temperature at t = {:.2f} d'.format(tend)) tauni = 1.0 / decay_const_ni56 tauco = 1.0 / decay_const_co56 fco = decay_const_ni56 / (decay_const_ni56 - decay_const_co56) expni = np.exp(-decay_const_ni56 * tend_sec) expco = np.exp(-decay_const_co56 * tend_sec) # integration of exp(-t/tauni)*t from 0 to tend: inttexpni = tauni * (tauni-expni * (tauni+tend_sec)) # integration of exp(-t/tauco)*t from 0 to tend: inttexpco = tauco * (tauco-expco * (tauco+tend_sec)) # time-weighted integral of deposition from Ni (per Ni nucleous at t=0) qtdtni = inttexpni / tauni * EDECAY_56NI * MEV2ERG # time-weighted integral of deposition from Co (per Ni nucleous at t=0) qtdtco = fco / tauco * (inttexpco-inttexpni) * EDECAY_56CO * MEV2ERG # total qtdt = qtdtni + qtdtco # time-weighted integral of deposition (per Ni mass at t=0) qtdt_per_mass = qtdt / (MASS_56NI*AMU) print('') print(' computed time-weighted integral of decay energy:') print('') print(' qtdt_per_nucleous = {:.4e} erg s'.format(qtdt)) print(' qtdt_per_mass = {:.4e} erg s/g'.format(qtdt_per_mass)) # calculate temperature analytically from t*E(t) = Int{t*Q(t) dt} (see K13) # NOTE: we ignore initial internal energy, t0*E(t0) << Int{Q(t) t dt} temp = ( qtdt_per_mass * dens * xni56_old / tend_sec / ARAD )**(0.25) # set minimal temperature temp = (temp >= args.tempmin)*temp + (args.tempmin > temp)*args.tempmin print('') sys.stdout.write(' ===> maximum temperature is {:.4e} K'.format(temp.max())) if temp.argmax().size == 1: print(' at {:.4e} km/s'.format(vel[temp.argmax()])) else: print('') # display final abundances print('') print('INFO - final elemental abundances at t = {:.2f} d'.format(tend)) print('') print(' M(Ni) = {:.4e} Msun'.format(np.sum(dmass*xni))) print(' M(Co) = {:.4e} Msun'.format(np.sum(dmass*xco))) print(' M(Fe) = {:.4e} Msun'.format(np.sum(dmass*xfe))) if (xfracti > 0.0): print(' M(Ti) = {:.4e} Msun in 56Ni and IME zones'.format(np.sum(dmass*xti))) for ime in imes: print(' M({:2s}) = {:.4e} Msun'.format(ime.capitalize(), np.sum(dmass*ximeindiv[ime]))) print(' M(C ) = {:.4e} Msun'.format(np.sum(dmass*xc))) print(' M(O ) = {:.4e} Msun'.format(np.sum(dmass*xo))) # # output model to file # with open(args.fout, 'w') as f: ### header f.write('### SNIa toy model generated with mk_snia_toy_model.py version {:s}\n'.format(VERSION)) f.write('#\n') f.write('## Adopted constants:\n') f.write('#\n') f.write('# MSUN = {:.8e} g\n'.format(MSUN)) f.write('# 56Ni half-life THALF_56NI = {:.8e} day\n'.format(THALF_56NI)) f.write('# 56Co half-life THALF_56CO = {:.8e} day\n'.format(THALF_56CO)) f.write('# Energy per 56Ni decay EDECAY_56NI = {:.8e} MeV\n'.format(EDECAY_56NI)) f.write('# Energy per 56Co decay EDECAY_56CO = {:.8e} MeV\n'.format(EDECAY_56CO)) f.write('#\n') f.write('## Ejecta parameters (actual vs. requested):\n') f.write('#\n') f.write('# Mtot = {:.4e} Msun ({:.4e} requested)\n'.format(np.sum(dmass), mtot)) f.write('# Ekin = {:.4e} erg ({:.4e} requested)'.format(5e9 * np.sum(dmass * vel**2) * MSUN, ekin*1e51)) # 5e9 = 0.5 * 1e10; 1e10 = (km/s->cm/s)^2 if args.ekinw07: f.write(' (computed using Eq. 1 of Woosley et al. (2007), ApJ, 662, 487)\n') else: f.write('\n') f.write('# nzones = {:4d} ; cell size dvel = {:.4e} km/s ; Vmax = {:.4e} km/s\n'.format(nd, dvel, vel.max())) f.write('# density profile = {:s}'.format(args.densprof)) if args.densprof == 'power': f.write(' (with exponents (delta, n) = {:s})\n'.format(args.densexp)) else: f.write('\n') f.write('# time at which radius, temperature and abundances are calculated, tend = {:.2f} DAYS\n'.format(tend)) f.write('# M(stable IGE) = {:.4e} Msun ({:.4e} requested) with relative (Ni, Fe) fractions = {:.2f}, {:.2f}\n'.format(np.sum(dmass*xige), mige, args.xfracni, 1.0-args.xfracni)) f.write('# M(56Ni,t=0) = {:.4e} Msun ({:.4e} requested)\n'.format(np.sum(dmass*xni56_old), mni56)) imestr = ', '.join([ii.capitalize() for ii in imes]) xfracimestr = ', '.join(xfracimestr) f.write('# M(IME) = {:.4e} Msun ({:.4e} requested); IMEs = {:s} with relative fractions = {:s}\n'.format(np.sum(dmass*xime), mime, imestr, xfracimestr)) if (xfracti > 0.0): f.write('# M(Ti) = {:.4e} Msun ({:.4e} requested) in 56Ni and IME zones\n'.format(np.sum(dmass*xti), mti)) f.write('# M(unburnt C/O) = {:.4e} Msun with relative (O, C) fractions = {:.2f}, {:.2f}\n'.format(np.sum(dmass*xunbco), args.xfraco, 1.0-args.xfraco)) f.write('# dM(IGE,56Ni,IME) = {:.2f}, {:.2f}, {:.2f} Msun'.format(dmige, dmni56, dmime)) f.write('; transition profile = {:s}\n'.format(args.transprof)) f.write('# ni56 weighted, average column density={:,.2e} g cm^-2 s^2'.format(Sig_tot_t2)) f.write('; t0_gamma={:.2f} day\n'.format(t0_gamma/DAY2SEC) ) if args.transprof == 'invexpon': f.write(' (with scale factor = {:.2e})\n'.format(args.transscl)) else: f.write('\n') f.write('#\n') f.write('# COLUMNS:\n') f.write('#\n') f.write('# (1) zone index\n') f.write('# (2) velocity at zone center [km/s]\n') f.write('# (3) zone mass [Msun]\n') f.write('# (4) Lagrangian mass coordinate (corresponding to outer zone boundary) [Msun]\n') f.write('# (5) IGE mass fraction at t=0\n') f.write('# (6) 56Ni mass fraction at t=0\n') f.write('# (7) IME mass fraction\n') f.write('# (8) Ti mass fraction\n') f.write('# (9) unburnt CO mass fraction\n') f.write('# (10) radius at zone center [cm] = velocity * time_since_explosion (homologous expansion)\n') f.write('# (11) mean density over zone [g/cm^3] (*not* density at zone center)\n') f.write('# (12) temperature [K]\n') if (xfracti > 0.0): nxfraccols = nime + 19 else: nxfraccols = nime + 18 f.write('# (13)-({:d}) mass fractions\n'.format(nxfraccols)) f.write('#\n') f.write('# NOTES ON MASS FRACTIONS: (14) X_Ni includes X_56Ni; (15) X_Co = X_56Co; (16) X_Fe includes 56Fe from 56Co decay\n') f.write('#\n') # Time-independent variables: f.write('{:4s} {:10s} {:10s} {:10s} {:10s} {:10s} {:10s} {:10s} {:10s}'.format('#idx','vel[km/s]','dmass[Msun]','mass[Msun]','X_IGE0','X_56Ni0','X_IME','X_Ti','X_CO')) # Time-dependent variables: f.write(' {:10s} {:10s} {:10s}'.format('rad[cm]','dens[gcc]','temp[K]')) # Time-dependent abundances: f.write(' {:10s} {:10s} {:10s} {:10s}'.format('X_56Ni','X_Ni','X_Co','X_Fe')) # Time-independent abundances: if (xfracti > 0.0): f.write(' {:10s}'.format('X_Ti')) for ime in imes: f.write(' {:10s}'.format('X_'+ime.capitalize())) f.write(' {:10s} {:10s}'.format('X_O','X_C')) f.write('\n') f.write('{:4s} {:10s} {:10s} {:10s}'.format('#(1)','(2)','(3)','(4)')) f.write(' {:10s} {:10s} {:10s} {:10s} {:10s}'.format('(5)','(6)','(7)','(8)','(9)')) f.write(' {:10s} {:10s} {:10s}'.format('(10)','(11)','(12)')) f.write(' {:10s} {:10s} {:10s} {:10s}'.format('(13)','(14)','(15)','(16)')) idxcol = 16 if (xfracti > 0.0): idxcol += 1 f.write(' {:10s}'.format('('+str(idxcol)+')')) for ime in imes: idxcol += 1 f.write(' {:10s}'.format('('+str(idxcol)+')')) f.write(' {:10s} {:10s}'.format('('+str(idxcol+1)+')', '('+str(idxcol+2)+')')) f.write('\n') ### model for i in range(nd): f.write('{:4d} {:.4e} {:.4e} {:.4e}'.format(i+1, vel[i], dmass[i], mass[i])) f.write(' {:.4e} {:.4e} {:.4e} {:.4e} {:.4e}'.format(xige[i], xni56_old[i], xime[i], xti[i], xunbco[i])) f.write(' {:.4e} {:.4e} {:.4e}'.format(rad[i], dens[i], temp[i])) f.write(' {:.4e} {:.4e} {:.4e} {:.4e}'.format(xni56[i], xni[i], xco[i], xfe[i])) if (xfracti > 0.0): f.write(' {:.4e}'.format(xti[i])) for ime in imes: f.write(' {:.4e}'.format(ximeindiv[ime][i])) f.write(' {:.4e} {:.4e}'.format(xo[i], xc[i])) f.write('\n') if args.debug: os.system('cat '+args.fout) # # plot # if not args.noplot: print('') print('INFO - on to plot') plt.ion() fig = plt.figure(figsize=(10, 8)) fig.subplots_adjust(left=.1, bottom=.1, right=.95, top=.95, hspace=.3) # density profile ax = fig.add_subplot(321) ax.set_xlabel('Velocity [10$^3$ km s$^{-1}$]') ax.set_ylabel('Log$_{10}$ Density [g cm$^{-3}$]') ax.set_xlim(0., vel.max()/1e3) ax.plot(vel/1e3, np.log10(dens), marker='.', label='$t = {:.2f}$ day'.format(tend)) ax.legend() ax = fig.add_subplot(322) ax.set_xlabel('Mass [M$_\odot$]') ax.set_ylabel('Log$_{10}$ Density [g cm$^{-3}$]') ax.set_xlim(0., mtot) ax.plot(mass, np.log10(dens), marker='.', label='$t = {:.2f}$ day'.format(tend)) # temperature profile ax = fig.add_subplot(323) ax.set_xlabel('Velocity [10$^3$ km s$^{-1}$]') ax.set_ylabel('Log$_{10}$ Temperature [K]') ax.set_xlim(0., vel.max()/1e3) ax.plot(vel/1e3, np.log10(temp), marker='.', label='$t = {:.2f}$ day'.format(tend)) ax.legend() ax = fig.add_subplot(324) ax.set_xlabel('Mass [M$_\odot$]') ax.set_ylabel('Log$_{10}$ Temperature [K]') ax.set_xlim(0., mtot) ax.plot(mass, np.log10(temp), marker='.', label='$t = {:.2f}$ day'.format(tend)) # abundance profiles - show grid points to check resolution ax = fig.add_subplot(325) ax.set_xlabel('Velocity [10$^3$ km s$^{-1}$]') ax.set_ylabel('Mass Fraction $X_i$') ax.set_xlim(0., vel.max()/1e3) ax.set_ylim(-.05,1.05) ax.plot(vel/1e3, xige, marker='.', label='stable IGE') ax.plot(vel/1e3, xni56, marker='.', label='$^{56}$Ni') ax.plot(vel/1e3, xime, marker='.', label='IME') ax.plot(vel/1e3, xunbco, marker='.', label='unburnt C/O') ax.legend(fontsize='small', loc='center right') ax = fig.add_subplot(326) ax.set_xlabel('Mass [M$_\odot$]') ax.set_ylabel('Mass Fraction $X_i$') ax.set_xlim(0., mtot) ax.set_ylim(-.05,1.05) ax.plot(mass, xige, marker='.', label='stable IGE') ax.plot(mass, xni56, marker='.', label='$^{56}$Ni') ax.plot(mass, xime, marker='.', label='IME') ax.plot(mass, xunbco, marker='.', label='unburnt C/O') plt.show() print('') zzz = input("===> Hit <return> to quit <===") ax.clear() print('') print('############################' + '#' * len(args.fout)) print(' THE END - see output file ' + args.fout) print('############################' + '#' * len(args.fout)) print('')
{"hexsha": "852429c0ca57ad88bdb2e2a84f5956263a908c7d", "size": 51080, "ext": "py", "lang": "Python", "max_stars_repo_path": "mk_snia_toy_model.py", "max_stars_repo_name": "sblondin2605/snia_toy_model", "max_stars_repo_head_hexsha": "b9ea7e4f4af78147a7df494bbebc25ea2b487b6b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-06-19T08:35:39.000Z", "max_stars_repo_stars_event_max_datetime": "2018-06-19T08:35:39.000Z", "max_issues_repo_path": "mk_snia_toy_model.py", "max_issues_repo_name": "sblondin2605/snia_toy_model", "max_issues_repo_head_hexsha": "b9ea7e4f4af78147a7df494bbebc25ea2b487b6b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mk_snia_toy_model.py", "max_forks_repo_name": "sblondin2605/snia_toy_model", "max_forks_repo_head_hexsha": "b9ea7e4f4af78147a7df494bbebc25ea2b487b6b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-06-19T08:35:50.000Z", "max_forks_repo_forks_event_max_datetime": "2018-06-19T08:35:50.000Z", "avg_line_length": 46.819431714, "max_line_length": 251, "alphanum_fraction": 0.5739036805, "include": true, "reason": "import numpy", "num_tokens": 16004}
import pandas as pd import numpy as np import sys sys.path.append('./') from train_base import write_csv, read_info, convert_to_loader, _run_language from util import argparser full_results = [['lang', 'artificial', 'avg_len', 'test_shannon', 'test_loss', 'test_acc', 'val_loss', 'val_acc', 'best_epoch']] def get_data_loaders(ffolder, lang, is_devoicing, token_map, args, artificial=True): _, _, data_split, _, _ = read_info() return _get_data_loaders(data_split, ffolder, lang, is_devoicing, token_map, args, artificial=artificial) def _get_data_loaders(data_split, ffolder, lang, is_devoicing, token_map, args, artificial=True): df = read_artificial_data(ffolder, lang, is_devoicing) test_split = filter_test_split(df, data_split[2]) train_loader = get_data_loader(df, data_split[0], token_map, 'train', args, artificial=artificial) val_loader = get_data_loader(df, data_split[1], token_map, 'val', args, artificial=artificial) test_loader = get_data_loader(df, test_split, token_map, 'test', args, artificial=artificial) get_phones_info(df, artificial, args) return train_loader, val_loader, test_loader def filter_test_split(df, raw_test_split): df = df[df[0].str.match('.*::N$')] concepts = list(df[0].unique()) test_split = [x for x in raw_test_split if x in concepts] print('Test %d' % (len(test_split))) return test_split def get_data_loader(df, concepts, token_map, mode, args, artificial=True): data = split_data(df, concepts, token_map, mode, args, artificial=artificial) return convert_to_loader(data, mode) def get_phones_info(df, artificial, args): col = args.col_artificial if artificial else args.col_normal phones = set([y for x in df[col].values for y in x]) print('Phones %d' % len(phones)) def read_artificial_data(ffolder, lang, is_devoicing): artificial_folder = 'devoicing' if is_devoicing else 'harmony' return pd.read_csv('%s/artificial/%s/%s2' % (ffolder, artificial_folder, lang), delimiter='\t', header=None) def split_data(df, concepts, token_map, mode, args, artificial=True): col = args.col_artificial if artificial else args.col_normal df_partial = df[df[0].isin(concepts)] data_partial = df_partial[col].values print('Differences %s: %d\tNo difference: %d' % (mode, (df_partial[args.col_normal] != df_partial[args.col_artificial]).sum(), (df_partial[args.col_normal] == df_partial[args.col_artificial]).sum())) max_len = max([len(x) for x in data_partial]) data = np.zeros((len(data_partial), max_len + 2)) data.fill(token_map['PAD']) for i, string in enumerate(data_partial): _data = [token_map['SOW']] + [token_map[x] for x in string.split(' ')] + [token_map['EOW']] data[i, :len(_data)] = _data return data def run_artificial_language(lang, is_devoicing, token_map, concept_ids, ipa_to_concepts, args, artificial=True, embedding_size=None, hidden_size=256, nlayers=1, dropout=0.2): train_loader, val_loader, test_loader = get_data_loaders( args.ffolder, lang, is_devoicing, token_map, args, artificial=artificial) return _run_language( '%s %s' % (lang, 'art' if artificial else 'norm'), train_loader, val_loader, test_loader, token_map, ipa_to_concepts, args, embedding_size=embedding_size, hidden_size=hidden_size, nlayers=nlayers, dropout=dropout) def get_languages(is_devoicing=True): devoicing_langs = ['deu', 'nld'] harmony_langs = ['bua', 'ckt', 'evn', 'fin', 'hun', 'khk', 'mhr', 'mnc', 'myv', 'tel', 'tur'] return devoicing_langs if is_devoicing else harmony_langs def add_new_symbols_to_vocab(token_map): new_symbols = ['g', 'ʉʲ', 'uʲ', 'ɹʲʲ', 'iʲʲ', 'ɵʲ', 'õ̃', 'ʋ̃', 'ũ̃', 'ĩ̃', 'ʌ̃̃', 'ẽ̃'] for symb in new_symbols: token_map[symb] = max(token_map.values()) + 1 return token_map def fill_artificial_args(args): args.is_devoicing = (args.artificial_type == 'devoicing') args.col_artificial = 3 if args.is_devoicing else 2 args.col_normal = 2 if args.is_devoicing else 3 def run_languages(args): print('------------------- Start -------------------') _, token_map, data_split, concept_ids, ipa_to_concepts = read_info() languages = get_languages(is_devoicing=args.is_devoicing) token_map = add_new_symbols_to_vocab(token_map) print('Train %d, Val %d, Test %d' % (len(data_split[0]), len(data_split[1]), len(data_split[2]))) results = [['lang', 'avg_len', 'test_shannon', 'test_loss', 'test_acc', 'val_loss', 'val_acc']] for i, lang in enumerate(languages): for artificial in [True, False]: print() print('%d. %s %s' % (i, lang, 'artificial' if artificial else 'default')) avg_len, shannon, test_shannon, test_loss, \ test_acc, best_epoch, val_loss, val_acc = run_artificial_language( lang, args.is_devoicing, token_map, concept_ids, ipa_to_concepts, args, artificial=artificial) results += [['%s %s' % (lang, 'art' if artificial else 'norm'), avg_len, shannon, test_shannon, test_loss, test_acc, best_epoch, val_loss, val_acc]] write_csv(results, '%s/artificial__%s__results.csv' % (args.rfolder, args.model)) write_csv(results, '%s/artificial__%s__results-final.csv' % (args.rfolder, args.model)) if __name__ == '__main__': args = argparser.parse_args(csv_folder='artificial/%s/normal') assert args.data == 'northeuralex', 'this script should only be run with northeuralex data' fill_artificial_args(args) run_languages(args)
{"hexsha": "a454e2ab72d06fa2eb9341ae81746261c70849b3", "size": 5701, "ext": "py", "lang": "Python", "max_stars_repo_path": "learn_layer/train_artificial.py", "max_stars_repo_name": "tpimentelms/phonotactic-complexity", "max_stars_repo_head_hexsha": "70d0a9e45943096d7640eaf7277033e3920408c9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-04-17T20:46:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-02T10:32:00.000Z", "max_issues_repo_path": "learn_layer/train_artificial.py", "max_issues_repo_name": "tpimentelms/phonotactic-complexity", "max_issues_repo_head_hexsha": "70d0a9e45943096d7640eaf7277033e3920408c9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "learn_layer/train_artificial.py", "max_forks_repo_name": "tpimentelms/phonotactic-complexity", "max_forks_repo_head_hexsha": "70d0a9e45943096d7640eaf7277033e3920408c9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.8538461538, "max_line_length": 114, "alphanum_fraction": 0.6788282757, "include": true, "reason": "import numpy", "num_tokens": 1575}
from math import ceil import os import colorcet as cc import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable import numpy as np from dataset import add_chunk_to_arr, get_test_data, reconstruct from model import get_model from utils import ex, round_down # Ignores TensorFlow CPU messages. os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" @ex.capture def plots(bayesian, predict_dir, images_dir, lower_percentile, upper_percentile): prefix = "/bayesian/bayesian_" if bayesian else "/dropout/dropout_" sigmoids = np.load(predict_dir + prefix + "sigmoids.npy") pred = np.load(predict_dir + prefix + "pred.npy") test = np.load(predict_dir + "/test.npy") test_targets = np.load(predict_dir + "/test_targets.npy") lower, upper = np.percentile(sigmoids, [lower_percentile, upper_percentile], axis=0) unc = upper - lower # Plots four slices from each output numpy array. four_slices = range(test.shape[0] // 5, test.shape[0], test.shape[0] // 5) for i in four_slices: pred_slice = pred[i, :, :].squeeze() unc_slice = unc[i, :, :].squeeze() trg = test_targets[i, :, :].squeeze() img = test[i, :, :].squeeze() # Adds color bar to uncertainty map and saves. fig, ax = plt.subplots() divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) im = ax.imshow(unc_slice, cmap=cc.cm.CET_L19) fig.colorbar(im, cax=cax, orientation="vertical") plt.savefig(images_dir + prefix + "unc_{}.png".format(i)) plt.close() # Saves image plots. plt.imsave(images_dir + prefix + "pred_{}.png".format(i), pred_slice, cmap="Greys") plt.imsave(images_dir + "/img_{}.png".format(i), img, cmap="Greys") plt.imsave(images_dir + "/target_{}.png".format(i), trg, cmap="Greys") @ex.capture def save_predictions(sigmoids, pred, test, test_targets, bayesian, predict_dir, images_dir): """Saves results of predictions.""" os.makedirs(predict_dir + "/bayesian", exist_ok=True) os.makedirs(predict_dir + "/dropout", exist_ok=True) os.makedirs(images_dir + "/bayesian", exist_ok=True) os.makedirs(images_dir + "/dropout", exist_ok=True) prefix = "/bayesian/bayesian_" if bayesian else "/dropout/dropout_" # Saves output numpy arrays. np.save(predict_dir + prefix + "sigmoids.npy", sigmoids) np.save(predict_dir + prefix + "pred.npy", pred) np.save(predict_dir + "/test.npy", test) np.save(predict_dir + "/test_targets.npy", test_targets) # Plots four slices from each output numpy array. #four_slices = range(test.shape[0] // 5, test.shape[0], test.shape[0] // 5) #for i in four_slices: #pred_slice = pred[i, :, :].squeeze() #img = test[i, :, :].squeeze() #trg = test_targets[i, :, :].squeeze() # Adds color bar to uncertainty map and saves. #fig, ax = plt.subplots() #divider = make_axes_locatable(ax) #cax = divider.append_axes("right", size="5%", pad=0.05) #im = ax.imshow(unc_slice, cmap=cc.cm.CET_L19) #fig.colorbar(im, cax=cax, orientation="vertical") #plt.savefig(images_dir + prefix + "unc_{}.png".format(i)) #plt.close() # Saves image plots. #plt.imsave(images_dir + prefix + "pred_{}.png".format(i), # pred_slice, cmap="Greys") #plt.imsave(images_dir + "/img_{}.png".format(i), img, cmap="Greys") #plt.imsave(images_dir + "/target_{}.png".format(i), trg, cmap="Greys") @ex.capture def predict(model, test, test_targets, test_coords, test_shape, input_shape, vnet, bayesian, batch_size, border_trim, mc_samples): """Uses given model to predict on test data.""" # Initializes prediction variables. sigmoids = None if vnet: # Initializes V-Net specific prediction variables. sigmoids = np.zeros((mc_samples,) + test_shape) for i in range(mc_samples): sigmoid = np.zeros(test_shape) counts = np.zeros(test_shape) print("MC Sample {}/{}".format(i+1, mc_samples)) # Predicts on individual chunks. for j, (chunk, coords) in enumerate(zip(test, test_coords)): # Performs Monte Carlo sampling. chunk_pred = model.predict(np.expand_dims(chunk, axis=0))[0] # Discards poor edge predictions. trimmed_shape = input_shape border1 = ceil(input_shape[0] * border_trim) border2 = ceil(input_shape[1] * border_trim) border3 = ceil(input_shape[2] * border_trim) # Checks edge cases on edge discarding. # For example, we don't want to throw away an edge # if it is the very edge of the volume, because that # edge may only get predicted on once. if coords[0] != 0 and coords[0] != test_shape[0] - input_shape[0]: chunk_pred = chunk_pred[border1:-border1, :, :, :] coords = [coords[0] + border1, coords[1], coords[2]] trimmed_shape = [trimmed_shape[0] - (2 * border1), trimmed_shape[1], trimmed_shape[2], 1] elif coords[0] != 0: chunk_pred = chunk_pred[border1:, :, :, :] coords = [coords[0] + border1, coords[1], coords[2]] trimmed_shape = [trimmed_shape[0] - border1, trimmed_shape[1], trimmed_shape[2], 1] elif coords[0] != test_shape[0] - input_shape[0]: chunk_pred = chunk_pred[:-border1, :, :, :] trimmed_shape = [trimmed_shape[0] - border1, trimmed_shape[1], trimmed_shape[2], 1] if coords[1] != 0 and coords[1] != test_shape[1] - input_shape[1]: chunk_pred = chunk_pred[:, border2:-border2, :, :] coords = [coords[0], coords[1] + border2, coords[2]] trimmed_shape = [trimmed_shape[0], trimmed_shape[1] - (2 * border2), trimmed_shape[2], 1] elif coords[1] != 0: chunk_pred = chunk_pred[:, border2:, :, :] coords = [coords[0], coords[1] + border2, coords[2]] trimmed_shape = [trimmed_shape[0], trimmed_shape[1] - border2, trimmed_shape[2], 1] elif coords[1] != test_shape[1] - input_shape[1]: chunk_pred = chunk_pred[:, :-border2, :, :] trimmed_shape = [trimmed_shape[0], trimmed_shape[1] - border2, trimmed_shape[2], 1] if coords[2] != 0 and coords[2] != test_shape[2] - input_shape[2]: chunk_pred = chunk_pred[:, :, border3:-border3, :] coords = [coords[0], coords[1], coords[2] + border3] trimmed_shape = [trimmed_shape[0], trimmed_shape[1], trimmed_shape[2] - (2 * border3), 1] elif coords[2] != 0: chunk_pred = chunk_pred[:, :, border3:, :] coords = [coords[0], coords[1], coords[2] + border3] trimmed_shape = [trimmed_shape[0], trimmed_shape[1], trimmed_shape[2] - border3, 1] elif coords[2] != test_shape[2] - input_shape[2]: chunk_pred = chunk_pred[:, :, :-border3, :] trimmed_shape = [trimmed_shape[0], trimmed_shape[1], trimmed_shape[2] - border3, 1] # Increments each voxel in the counts array. counts = add_chunk_to_arr(counts, np.ones(trimmed_shape), coords, trimmed_shape) # Updates the sigmoid volume with the voxel means. sigmoid = add_chunk_to_arr(sigmoid, chunk_pred, coords, trimmed_shape) # Divides each voxel by the number of times it was predicted. sigmoid = sigmoid / counts sigmoids[i] = sigmoid else: # Predicts on entire slices. print() sigmoids = np.zeros((mc_samples,) + test_shape) # Performs Monte Carlo sampling. for i in range(mc_samples): sigmoids[i] = model.predict(test, batch_size=batch_size) # Calculates prediction. pred = np.mean(sigmoids, axis=0) pred[pred > 0.5] = 1. pred[pred <= 0.5] = 0. # If data was chunked, turn it back into the original size. if vnet and test_coords is not None and test_shape is not None: test = reconstruct(test, test_coords, test_shape) test_targets = reconstruct(test_targets, test_coords, test_shape) # Saves predictions. save_predictions(sigmoids, pred, test, test_targets) @ex.automain def test(weights_path, batch_size): """Tests a model.""" try: # Loads or creates test data. input_shape, test, test_targets, \ test_coords, orig_test_shape = get_test_data() except FileNotFoundError as e: print(e) print("Could not find test files in data_dir. " "Did you specify the correct orig_test_data_dir?") return # Loads or creates model. model, checkpoint_path, _ = get_model(input_shape, scale_factor=len(test)/batch_size, weights_path=weights_path) # Predicts on test data and saves results. predict(model, test, test_targets, test_coords, orig_test_shape, input_shape) plots()
{"hexsha": "580700dd07cc77f4d368be2af57716791d505eca", "size": 9545, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "sandialabs/bcnn", "max_stars_repo_head_hexsha": "a64dd8e4dc439d77a700c8e35048ac7ebfc49ef3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 47, "max_stars_repo_stars_event_min_datetime": "2019-11-16T01:37:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T17:21:38.000Z", "max_issues_repo_path": "test.py", "max_issues_repo_name": "sandialabs/bcnn", "max_issues_repo_head_hexsha": "a64dd8e4dc439d77a700c8e35048ac7ebfc49ef3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:20:13.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:33:29.000Z", "max_forks_repo_path": "test.py", "max_forks_repo_name": "sandialabs/bcnn", "max_forks_repo_head_hexsha": "a64dd8e4dc439d77a700c8e35048ac7ebfc49ef3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2020-01-12T23:53:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-13T12:08:12.000Z", "avg_line_length": 43.3863636364, "max_line_length": 109, "alphanum_fraction": 0.5917234154, "include": true, "reason": "import numpy", "num_tokens": 2341}
"""Basic operations using matplotlib plots and synthetic trig data. """ import numpy as np #python's array proccesing / linear algebra library import pandas as pd #data processing / stats library import matplotlib.pyplot as plt #data visualization import matplotlib.dates as dates import csv import datetime from py_utils import printme #home-made formatting utilities #set this to True to show all the plots; False for dev/debugging live=True #a few marker codes cf. matplotlib.org/api/colors_api.html red='r'; blue='b'; green='g'; greenish='chartreuse'; magenta='m';black='b' circle='o'; x='x'; #get some stuff to plot - permutations of a sine wave here... # ... these use list comprehensions and scalar operations s1=pd.Series([np.sin(x/10) for x in range(0, 300)]) s2=s1.copy() s2.index=s2.index-50 s3=s1*2 s1=pd.Series([np.cos(x/10) for x in range(0, 300)]) #create a new figure (high-level container), add a plot to it plt.figure() plt.plot(s1) if live: plt.show() plt.close() #jazz it up a bit with colors and markers plt.figure() plt.plot(s1, red+circle, s1, black) if live: plt.show() plt.close() #add some more data series plt.figure() plt.plot(s1, red+circle, s1, black, s2, green+x, s2, blue, s3, greenish) if live: plt.show() plt.close() #add 'subplot' containers to arrange plots rows=2; cols=2 #subplots fill row-wise #we'll stack 'em in one at a time plt.subplot(rows, cols, 1) plt.plot(s1, red+circle, s1, black, s2, green+x, s2, blue) plt.subplot(rows, cols, 2) plt.plot(s2, green+x, s2, blue) plt.subplot(rows, cols, 3) plt.plot(s3, greenish, s1, black+circle) plt.subplot(rows, cols, 4) plt.scatter(s1, s2) if live: plt.show() plt.close() #Both Series and DataFrame objects provide some wrapping of # matplotlib. So you could go: mypic=s1.plot(kind='line') mypic.figure.show() #Series can be loaded into a Dataframe object df = pd.DataFrame(s1) df['s2']=s2 df['s3']=s3 #... and data can be plotted from there bar=df.plot.line() bar.xaxis.set_label_text("hey there!") bar.set_title("Who thought trig could be cool?") bar.figure.show() #Series can be referenced directly from the DataFrame and # the plotting operations work the same. Pandas Series # and DataFrame objects wrap matplotlib. df[0].plot(kind='line', color='g') plt.axhline(0, color='r') if live: plt.show() x=1
{"hexsha": "59ad51d453a146a6671b2ed3b2bb9efe37403ef8", "size": 2384, "ext": "py", "lang": "Python", "max_stars_repo_path": "dkr-py310/docker-student-portal-310/course_files/pandas/py_pandas_time_series_2.py", "max_stars_repo_name": "pbarton666/virtual_classroom", "max_stars_repo_head_hexsha": "a9d0dc2eb16ebc4d2fd451c3a3e6f96e37c87675", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dkr-py310/docker-student-portal-310/course_files/pandas/py_pandas_time_series_2.py", "max_issues_repo_name": "pbarton666/virtual_classroom", "max_issues_repo_head_hexsha": "a9d0dc2eb16ebc4d2fd451c3a3e6f96e37c87675", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dkr-py310/docker-student-portal-310/course_files/pandas/py_pandas_time_series_2.py", "max_forks_repo_name": "pbarton666/virtual_classroom", "max_forks_repo_head_hexsha": "a9d0dc2eb16ebc4d2fd451c3a3e6f96e37c87675", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6344086022, "max_line_length": 83, "alphanum_fraction": 0.7026006711, "include": true, "reason": "import numpy", "num_tokens": 679}
# Copyright (c) 2018-2019, NVIDIA CORPORATION # Copyright (c) 2017- Facebook, Inc # Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the BSD 3-Clause License (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://opensource.org/licenses/BSD-3-Clause # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch import numpy as np import torchvision.datasets as datasets import torchvision.transforms as transforms from PIL import Image DATA_BACKEND_CHOICES = ['pytorch', 'syntetic'] def load_jpeg_from_file(path, cuda=True, fp16=False): img_transforms = transforms.Compose( [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()] ) img = img_transforms(Image.open(path)) with torch.no_grad(): # mean and std are not multiplied by 255 as they are in training script # torch dataloader reads data into bytes whereas loading directly # through PIL creates a tensor with floats in [0,1] range mean = torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1) std = torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1) if cuda: mean = mean.cuda() std = std.cuda() img = img.cuda() if fp16: mean = mean.half() std = std.half() img = img.half() else: img = img.float() input = img.unsqueeze(0).sub_(mean).div_(std) return input class DALIWrapper(object): def gen_wrapper(dalipipeline, num_classes, one_hot): for data in dalipipeline: input = data[0]["data"] target = torch.reshape(data[0]["label"], [-1]).cuda().long() if one_hot: target = expand(num_classes, torch.float, target) yield input, target dalipipeline.reset() def __init__(self, dalipipeline, num_classes, one_hot): self.dalipipeline = dalipipeline self.num_classes = num_classes self.one_hot = one_hot def __iter__(self): return DALIWrapper.gen_wrapper(self.dalipipeline, self.num_classes, self.one_hot) def fast_collate(batch): imgs = [img[0] for img in batch] targets = torch.tensor([target[1] for target in batch], dtype=torch.int64) w = imgs[0].size[0] h = imgs[0].size[1] tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8 ) for i, img in enumerate(imgs): nump_array = np.asarray(img, dtype=np.uint8) if(nump_array.ndim < 3): nump_array = np.expand_dims(nump_array, axis=-1) nump_array = np.rollaxis(nump_array, 2) tensor[i] += torch.from_numpy(nump_array) return tensor, targets def expand(num_classes, dtype, tensor): e = torch.zeros(tensor.size(0), num_classes, dtype=dtype, device=torch.device('cuda')) e = e.scatter(1, tensor.unsqueeze(1), 1.0) return e class PrefetchedWrapper(object): def prefetched_loader(loader, num_classes, fp16, one_hot): mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1) std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1) if fp16: mean = mean.half() std = std.half() stream = torch.cuda.Stream() first = True for next_input, next_target in loader: with torch.cuda.stream(stream): next_input = next_input.cuda(non_blocking=True) next_target = next_target.cuda(non_blocking=True) if fp16: next_input = next_input.half() if one_hot: next_target = expand(num_classes, torch.half, next_target) else: next_input = next_input.float() if one_hot: next_target = expand(num_classes, torch.float, next_target) next_input = next_input.sub_(mean).div_(std) if not first: yield input, target else: first = False torch.cuda.current_stream().wait_stream(stream) input = next_input target = next_target yield input, target def __init__(self, dataloader, num_classes, fp16, one_hot): self.dataloader = dataloader self.fp16 = fp16 self.epoch = 0 self.one_hot = one_hot self.num_classes = num_classes def __iter__(self): if (self.dataloader.sampler is not None and isinstance(self.dataloader.sampler, torch.utils.data.distributed.DistributedSampler)): self.dataloader.sampler.set_epoch(self.epoch) self.epoch += 1 return PrefetchedWrapper.prefetched_loader(self.dataloader, self.num_classes, self.fp16, self.one_hot) def get_pytorch_train_loader(data_path, batch_size, num_classes, one_hot, workers=5, _worker_init_fn=None, fp16=False): traindir = os.path.join(data_path, 'train') train_dataset = datasets.ImageFolder( traindir, transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), ])) if torch.distributed.is_initialized(): train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: train_sampler = None train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=batch_size, shuffle=(train_sampler is None), num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate, drop_last=True) return PrefetchedWrapper(train_loader, num_classes, fp16, one_hot), len(train_loader) def get_pytorch_val_loader(data_path, batch_size, num_classes, one_hot, workers=5, _worker_init_fn=None, fp16=False): valdir = os.path.join(data_path, 'val') val_dataset = datasets.ImageFolder( valdir, transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), ])) if torch.distributed.is_initialized(): val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset) else: val_sampler = None val_loader = torch.utils.data.DataLoader( val_dataset, sampler=val_sampler, batch_size=batch_size, shuffle=False, num_workers=workers, worker_init_fn=_worker_init_fn, pin_memory=True, collate_fn=fast_collate) return PrefetchedWrapper(val_loader, num_classes, fp16, one_hot), len(val_loader) class SynteticDataLoader(object): def __init__(self, fp16, batch_size, num_classes, num_channels, height, width, one_hot): input_data = torch.empty(batch_size, num_channels, height, width).cuda().normal_(0, 1.0) if one_hot: input_target = torch.empty(batch_size, num_classes).cuda() input_target[:, 0] = 1.0 else: input_target = torch.randint(0, num_classes, (batch_size,)) input_target=input_target.cuda() if fp16: input_data = input_data.half() self.input_data = input_data self.input_target = input_target def __iter__(self): while True: yield self.input_data, self.input_target def get_syntetic_loader(data_path, batch_size, num_classes, one_hot, workers=None, _worker_init_fn=None, fp16=False): return SynteticDataLoader(fp16, batch_size, 1000, 3, 224, 224, one_hot), -1
{"hexsha": "ee61fe073dcd00cbefc3d6fc2e1bb2f5bef1493e", "size": 7906, "ext": "py", "lang": "Python", "max_stars_repo_path": "built-in/PyTorch/Official/cv/image_classification/ResNet50_for_PyTorch/DistributedResnet50/image_classification/dataloaders.py", "max_stars_repo_name": "Ascend/modelzoo", "max_stars_repo_head_hexsha": "f018cfed33dbb1cc2110b9ea2e233333f71cc509", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-12-13T08:34:24.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T15:17:17.000Z", "max_issues_repo_path": "built-in/PyTorch/Official/cv/image_classification/ResNet50_for_PyTorch/DistributedResnet50/image_classification/dataloaders.py", "max_issues_repo_name": "Ascend/modelzoo", "max_issues_repo_head_hexsha": "f018cfed33dbb1cc2110b9ea2e233333f71cc509", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-20T03:11:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-20T06:53:39.000Z", "max_forks_repo_path": "built-in/PyTorch/Official/cv/image_classification/ResNet50_for_PyTorch/DistributedResnet50/image_classification/dataloaders.py", "max_forks_repo_name": "Ascend/modelzoo", "max_forks_repo_head_hexsha": "f018cfed33dbb1cc2110b9ea2e233333f71cc509", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-07-10T12:40:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-17T07:55:15.000Z", "avg_line_length": 37.8277511962, "max_line_length": 145, "alphanum_fraction": 0.6405261826, "include": true, "reason": "import numpy", "num_tokens": 1875}
""" Aggregate results for a single dataset. """ import os import sys import argparse from datetime import datetime from itertools import product import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from scipy.stats import sem from tqdm import tqdm here = os.path.abspath(os.path.dirname(__file__)) sys.path.insert(0, here + '/../') import util from experiments import util as exp_util from config import post_args def process(args, exp_hash, out_dir, logger): color, line, label = util.get_plot_dicts() results = [] for strategy in args.strategy: exp_dir = os.path.join(args.in_dir, args.dataset, args.tree_type, f'exp_{exp_hash}', strategy) res_list = util.get_results(args, exp_dir, logger, progress_bar=False) res_list = util.filter_results(res_list, args.skip) for method, res in res_list: results.append((f'{label[method]}_{strategy}', res)) # plot fig, axs = plt.subplots(1, 4, figsize=(20, 5)) for method, res in results: check_pct = np.array(res['check_frac']) * 100 ls = '--' if 'self' in method else '-' ax = axs[0] ax.errorbar(x=check_pct, y=res['frac_detected'] * 100, label=method, linestyle=ls) ax.set_xlabel('% train data checked') ax.set_ylabel('% noisy examples detected') ax.set_title(f'Detection') ax.legend(fontsize=6) ax = axs[1] ax.errorbar(x=check_pct, y=res['loss'], label=method, linestyle=ls) ax.set_xlabel('% train data checked') ax.set_ylabel('Test loss') ax.set_title(f'Loss') ax = axs[2] ax.errorbar(x=check_pct, y=res['acc'], label=method, linestyle=ls) ax.set_xlabel('% train data checked') ax.set_ylabel('Test acc.') ax.set_title(f'Accuracy') ax = axs[3] ax.errorbar(x=check_pct, y=res['auc'], label=method, linestyle=ls) ax.set_xlabel('% train data checked') ax.set_ylabel('Test AUC') ax.set_title(f'AUC') logger.info(f'\nSaving results to {out_dir}/...') plt.tight_layout() plt.savefig(os.path.join(out_dir, f'{args.dataset}.png'), bbox_inches='tight') def main(args): args.method += ['loss'] exp_dict = {'noise_frac': args.noise_frac, 'val_frac': args.val_frac, 'check_frac': args.check_frac} exp_hash = exp_util.dict_to_hash(exp_dict) out_dir = os.path.join(args.out_dir, args.tree_type, f'exp_{exp_hash}') log_dir = os.path.join(out_dir, 'logs') # create logger os.makedirs(out_dir, exist_ok=True) os.makedirs(log_dir, exist_ok=True) logger = exp_util.get_logger(os.path.join(log_dir, f'{args.dataset}.txt')) logger.info(args) logger.info(f'\ntimestamp: {datetime.now()}') process(args, exp_hash, out_dir, logger) if __name__ == '__main__': main(post_args.get_noise_set_args().parse_args())
{"hexsha": "6f21dd0df1b3f6b91bca49341251c510dbc2b4a3", "size": 3107, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/postprocess/noise_set.py", "max_stars_repo_name": "jjbrophy47/tree_influence", "max_stars_repo_head_hexsha": "245ff369ed3f4df3ddba243c7e3172423f385505", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/postprocess/noise_set.py", "max_issues_repo_name": "jjbrophy47/tree_influence", "max_issues_repo_head_hexsha": "245ff369ed3f4df3ddba243c7e3172423f385505", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/postprocess/noise_set.py", "max_forks_repo_name": "jjbrophy47/tree_influence", "max_forks_repo_head_hexsha": "245ff369ed3f4df3ddba243c7e3172423f385505", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.7685185185, "max_line_length": 90, "alphanum_fraction": 0.6144190537, "include": true, "reason": "import numpy,from scipy", "num_tokens": 736}
#!/usr/bin/env python import os import rospy import cv2 import numpy as np from nav_msgs.srv import GetMap, GetMapRequest class MapLoader: def __init__(self, start=None, target=None, crop_image=False): self.occupancy_grid = self.request_occupancy_grid() self.start = start # tuple with x and y coordinates in m self.target = target # in respect to origin of maze (upper left corner) # assert that both values are either provided or not provided assert (self.start and self.target) \ or (not self.start and not self.target) # if parameters are provided, matrix needs to be cropped to find origin if self.start: self.crop_image = True else: self.crop_image = crop_image def request_occupancy_grid(self): # Make request to map_loader service rospy.wait_for_service('static_map') try: get_map_service = rospy.ServiceProxy('static_map', GetMap) req = GetMapRequest() resp = get_map_service(req) rospy.loginfo("Successfully loaded occupancy grid from map_server") return resp.map except rospy.ServiceException, e: rospy.loginfo("Service call failed: %s"%e) def loadMap(self): # Load image (alternatively use occupancy_grid data and reshape) scans_dir = os.path.join(os.path.expanduser("~"),"catkin_ws/src/robotcraft-pathfinding/scans/") self.orig_img = cv2.imread(os.path.join(scans_dir, "map.pgm"), cv2.IMREAD_GRAYSCALE) if self.crop_image == True: img = self.autocrop(self.orig_img) else: img = self.orig_img # Convert colors into 0 and 1 dark_colors = np.where(img <= 220) img[dark_colors] = 0 light_colors = np.where(img > 220) img[light_colors] = 1 # Flip number so that 0 = free and 1 = occupied img = np.logical_not(img).astype(int) # Save filtered image to scans folder cv2.imwrite(os.path.join(scans_dir, "map_filtered.pgm"), img*255) img = self.place_robot(img) img = self.place_target(img) np.savetxt(os.path.join(os.path.expanduser("~"), 'catkin_ws/src/robotcraft-pathfinding/scans/map_matrix.txt'), img, delimiter='', fmt='%d') rospy.loginfo('Saved map matrix to text file...') return img def place_robot(self, img): origin_x = self.occupancy_grid.info.origin.position.x origin_y = self.occupancy_grid.info.origin.position.y resolution = self.occupancy_grid.info.resolution height = self.occupancy_grid.info.height width = self.occupancy_grid.info.width if not self.start: # Place robot at origin of map if self.crop_image == True: n_rows_removed_top = self.cropped_rows[0][1]-self.cropped_rows[0][0] n_cols_removed_left = self.cropped_cols[0][1]-self.cropped_cols[0][0] row = (self.orig_img.shape[1]-1) - int(round((abs(origin_y) / resolution))) - n_rows_removed_top # flipped coordinate system on y-axis column = int(round((abs(origin_x) / resolution))) - n_cols_removed_left else: # Calculate row and column of cell row = (height-1) - int(round((abs(origin_y) / resolution))) # flipped coordinate system on y-axis column = int(round((abs(origin_x) / resolution))) else: print("Placed robot from launch file") row = int(round(-self.start[1] / resolution)) column = int(round(self.start[0] / resolution)) # Mark robot start cell with -1 img[row, column] = -1 # changes value in place return img def place_target(self, img): resolution = self.occupancy_grid.info.resolution if not self.start: x_pos = 0 y_pos = 0 with open(os.path.join(os.path.expanduser("~"), 'catkin_ws/src/robotcraft-pathfinding/scans/robot_position.txt'), 'r') as f: x_pos = float(f.readline()) y_pos = float(f.readline()) # Get matrix coordinates of initial robot position result = np.where(img == -1) initial_pos = (result[0][0], result[1][0]) # extract indices # Calculate target cell using final pose and starting cell target_row = initial_pos[0] + int(round(-y_pos / resolution)) target_col = initial_pos[1] + int(round(x_pos / resolution)) else: target_row = int(round(-self.target[1] / resolution)) target_col = int(round(self.target[0] / resolution)) # Extend matrix in case target cell is outside of maze if target_row > (img.shape[0]-1): diff = target_row - (img.shape[0]-1) zeros = np.zeros(shape=(diff, img.shape[1])) img = np.r_[img, zeros] if target_row < 0: diff = -target_row zeros = np.zeros(shape=(diff, img.shape[1])) img = np.r_[zeros, img] target_row = 0 if target_col > (img.shape[1]-1): diff = target_col - (img.shape[1]-1) zeros = np.zeros(shape=(img.shape[0], diff)) img = np.c_[img, zeros] if target_col < 0: diff = -target_col zeros = np.zeros(shape=(img.shape[0], diff)) img = np.c_[zeros, img] target_col = 0 # Mark target cell with -2 img[target_row, target_col] = -2 # changes value in place, no need to return return img def autocrop(self, image, lower_threshold=100, upper_threshold=220): """Crops any edges within to threshold boundaries (used for crop gray/unkown area) Crops blank image to 1x1. Returns cropped image. """ if len(image.shape) == 3: flatImage = np.max(image, 2) else: flatImage = image assert len(flatImage.shape) == 2 rows = np.where((np.min(flatImage, 0) < lower_threshold) | (np.max(flatImage, 0) > upper_threshold))[0] if rows.size: cols = np.where((np.max(flatImage, 1) > upper_threshold) | (np.min(flatImage, 1) < lower_threshold))[0] self.cropped_rows = [(0, cols[0]), (cols[-1], image.shape[1])] self.cropped_cols = [(0, rows[0]), (rows[-1], image.shape[0])] image = image[cols[0]: cols[-1] + 1, rows[0]: rows[-1] + 1] else: image = image[:1, :1] return image
{"hexsha": "e5ed9070f7915de5a612bb565b5dc64d0c8b705b", "size": 6616, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/map_loader.py", "max_stars_repo_name": "Robotcraft19/amazebot-pathfinding", "max_stars_repo_head_hexsha": "f84f966959cb396e05cf121313362edf4c0bb41c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-04-15T12:08:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T06:01:24.000Z", "max_issues_repo_path": "src/map_loader.py", "max_issues_repo_name": "Robotcraft19/amazebot-pathfinding", "max_issues_repo_head_hexsha": "f84f966959cb396e05cf121313362edf4c0bb41c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/map_loader.py", "max_forks_repo_name": "Robotcraft19/amazebot-pathfinding", "max_forks_repo_head_hexsha": "f84f966959cb396e05cf121313362edf4c0bb41c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-05-15T15:37:27.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-22T16:51:25.000Z", "avg_line_length": 39.6167664671, "max_line_length": 151, "alphanum_fraction": 0.5917472793, "include": true, "reason": "import numpy", "num_tokens": 1593}
\documentclass[10pt]{beamer} \usepackage[utf8]{inputenc} \usepackage{url} \usepackage{listings} \usepackage{drawstack} \lstset{ basicstyle=\ttfamily\scriptsize, showtabs=false, showspaces=false, showstringspaces=false, columns=fixed, showstringspaces=false, extendedchars=true, } \usetheme{Copenhagen} \usecolortheme{default} \title{The advanced return-into-lib(c) exploits} \subtitle{\url{http://phrack.org/issues/58/4.html}} \author{Anders Kiel Hovgaard \and Daniel Gavin \and Rúni Klein Hansen} \institute{Department of Computer Science, University of Copenhagen} \date{May 22, 2015} \begin{document} \frame{\titlepage} \section{Classical return-into-libc} % Rúni \begin{frame}[fragile] \frametitle{Classical return-into-libc} A method commonly used to circumvent non-executable stack by returning to a dynamic library instead of returning to code located on the stack. \begin{verbatim} | ... | arg_2 |--------------------| | addr. of "/bin/sh" | arg_1 |--------------------| | dummy ret. addr. | dummy_int32 |--------------------| | addr. of system() | funcion_in_lib |--------------------| | 0x41414141 | buffer fill-up | 0x41414141 | | ... | |--------------------| \end{verbatim} \end{frame} \begin{frame} \frametitle{Return address} A graceful exit or crash and burn.\\ \vspace{5mm} Choosing a offset instead of a ``random'' return address, and the consequences. \end{frame} \section{Chaining return-into-libc calls} % Anders \subsection{Problems with the classical approach} \begin{frame}[fragile] \frametitle{Problems with the classical approach} \begin{itemize} \item Not possible to call another funtion, which takes arguments, after the first call, since the first argument will be the new return address etc. \end{itemize} \begin{lstlisting} ------------------------------------------------------- | buffer fill-up | f1 | f2 | arg_1/f2_ret | arg_2 | ... ------------------------------------------------------- \end{lstlisting} \begin{itemize} \item Multiple function calls often necessary, e.g. for: \begin{itemize} \item regaining privileges with \texttt{setuid} \item mapping a known memory location with \texttt{mmap} \item copying or reading code to mapped location \item returning to mapped location \item etc. \end{itemize} \end{itemize} \end{frame} \begin{frame} \frametitle{Problems with the classical approach} \begin{itemize} \item The overflow can typically not contain \texttt{NUL} bytes and that limits the arguments to the function.\\ \hfill\\ Example: \begin{itemize} \item \texttt{mmap(0x414140\textcolor{red}{00}, 0x20\textcolor{red}{00}, \dots)} \hfill (pagesize = 0x1000) \end{itemize} \end{itemize} \end{frame} \begin{frame} \frametitle{Chaining return-into-libc calls} Two methods for chaining multiple function calls: \begin{itemize} \item ``\texttt{esp} lifting'' method \item frame faking \end{itemize} \end{frame} \subsection{``\texttt{esp} lifting'' method} \begin{frame}[fragile,shrink] \frametitle{``\texttt{esp} lifting'' method} \begin{columns}[c] \column{0.5\textwidth} \begin{itemize} \item Designed for attacking binaries compiled with the \texttt{-fomit-frame-pointer} flag. \item Using gadgets that ``lift'' \texttt{ESP} to clean up arguments on the stack in between function calls. \end{itemize} \begin{lstlisting} eplg: add esp, SIZE ret \end{lstlisting} \begin{lstlisting} eplg: pop ebx pop esi pop edi pop ebp ret \end{lstlisting} \column{0.5\textwidth} \begin{drawstack}[scale=0.50] \cell{f2\_args} \cell{dummy} \cell{f2} \startframe \cell{(padding)} \cell{f1\_argn} \cell{\dots} \cell{f1\_arg2} \cell{f1\_arg1} \finishframe{{\scriptsize SIZE}\phantom{XXX}} \cell{epilogue} \cell{f1} \cell{0x41414141} \end{drawstack} \end{columns} \end{frame} \subsection{frame faking} \begin{frame}[fragile] \frametitle{frame faking} Designed to attack binaries compiled \emph{without} the \texttt{-fomit-frame-pointer} flag. \begin{lstlisting} leaveret: leave ret \end{lstlisting} \texttt{ESP} lifting epilogues might still be available with GCC. \end{frame} \begin{frame}[fragile] \begin{lstlisting} saved FP vuln. function's return address -------------------------------------------- | buffer fill-up(*) | fake_ebp0 | leaveret | -------------------------|------------------ | +---------------------+ | v ----------------------------------------------- | fake_ebp1 | f1 | leaveret | f1_arg1 | f1_arg2 ... -----|----------------------------------------- | the first frame +-+ | v ------------------------------------------------ | fake_ebp2 | f2 | leaveret | f2_arg1 | f2_argv2 ... -----|------------------------------------------ | the second frame +-- ... \end{lstlisting} \end{frame} \begin{frame} \frametitle{\dots but ASLR} Frame faking requires some special conditions because of ASLR. We must know the exact location of the fake stack frame. \begin{itemize} \item A predictable location to place the stack frames and chained function calls (ROP chain), e.g.: \begin{itemize} \item the address of a static variable. \end{itemize} \item An information leak of \texttt{ESP}. \end{itemize} \end{frame} \subsection{Inserting \texttt{NUL} bytes} \begin{frame}[fragile] \frametitle{Inserting \texttt{NUL} bytes} If a function in the chain needs an argument that contains \texttt{0x00}, that might be problematic to include in the overflow (e.g. \texttt{strcpy} stopping on \texttt{NUL} byte). \hfill\\ \hfill\\ Solution: Insert \texttt{NUL} bytes with returns to \texttt{strcpy} with the second argument pointing to some \texttt{NUL} byte in the program: \begin{verbatim} strcpy(addr in ROP chain, addr of NUL byte in program) ^^ ?? \end{verbatim} \emph{\dots well}, then we need the exact address on the stack or fake frame. \end{frame} \section{PaX features} % Rúni \subsection{What is PaX?} \begin{frame} \frametitle{What is PaX?} \begin{itemize} \item Implementing read, write and execution privileges to IA-32 where no such differentiation exists. \item A programmatic way to prevent buffer overflows by making part of the stack non executable (data part). \end{itemize} \vspace{4mm} In short:\\ \vspace{2mm} \hspace{6mm}``To prevent executing code that was smuggled into data areas'' \end{frame} \begin{frame}[fragile] \frametitle{Privileges on the stack} Originally PaX only tried to implemented execution privileges, but due to \texttt{MMAP} and the privilege \texttt{PROT\_EXEC} more is necessary and using \texttt{strcpy(address, shellcode)} one can get shellcode to run even with PaX and thus gain access to the stack. \end{frame} \subsection{ASLR} \begin{frame} \frametitle{Address Space Layout Randomization} The first loaded library is loaded at:\\ \vspace{1mm} \hspace{10mm}\texttt{0x40000000+random*4k} \vspace{1mm} and the next library will be mmaped after the first and so on\\ \vspace{3mm} And stack is at the following place:\\ \vspace{1mm} \hspace{10mm}\texttt{0xc0000000-random*16}\\ \vspace{3mm} Random is a unsigned 16-bit integer fetched via \texttt{get\_random\_bytes()} which yields cryptographically strong data. \end{frame} \begin{frame}[fragile] \frametitle{Example of address randomization} Different address for each new program: \begin{lstlisting} 7fb8d95fd000-7fb8d961f000 r-xp 00000000 08:02 134376 /usr/lib/ld-2.21.so 7fb8d981e000-7fb8d981f000 r--p 00021000 08:02 134376 /usr/lib/ld-2.21.so 7fb8d981f000-7fb8d9820000 rw-p 00022000 08:02 134376 /usr/lib/ld-2.21.so 7ff6b2f69000-7ff6b2f8b000 r-xp 00000000 08:02 134376 /usr/lib/ld-2.21.so 7ff6b318a000-7ff6b318b000 r--p 00021000 08:02 134376 /usr/lib/ld-2.21.so 7ff6b318b000-7ff6b318c000 rw-p 00022000 08:02 134376 /usr/lib/ld-2.21.so \end{lstlisting} \end{frame} \begin{frame} If \texttt{CONFIG\_PAX\_RANDMMAP} is activated the follo \begin{itemize} \item The first loaded library has a new address at each boot \item Functions are randomized each time a binary is run \end{itemize} \end{frame} \subsection{Drawbacks/failures of PaX} \begin{frame} \frametitle{Drawbacks and Failures of PaX} The drawbacks/failures of PaX \begin{itemize} \item Local access to /proc/\$\$/maps - Restrict access \item Bruteforce the way to libc - Reactive guards after logged attacks \item Information leak due to formatting \item Using position-dependant functions i.e. cannot be mmaped \end{itemize} \end{frame} \section{The dynamic linker's dl-resolve() function} % Daniel \begin{frame}[fragile] \frametitle{Procedure linkage table(PLT)} \begin{itemize} \item PLT's purpose is to provide a level of indirection when calling shared library functions. \item PLT is lazy binding. \item PLT ensures that code remains read-only, and that is because all shared functions are not directly called from code. \item PLT is in the code segment, and the addresses that PLT modifies are in the global offset table(GOT). \end{itemize} \end{frame} \begin{frame}[fragile] \frametitle{Procedure linkage table entry} A typical PLT entry for elf32-i386. \begin{verbatim} 080484c0 <mmap@plt>: 80484c0: ff 25 30 a0 04 08 jmp DWORD PTR ds:0x804a030 80484c6: 68 48 00 00 00 push 0x48 80484cb: e9 50 ff ff ff jmp 8048420 <_init+0x24> \end{verbatim} \end{frame} \begin{frame}[fragile] \frametitle{elf32 types} \footnotesize \begin{verbatim} typedef uint32_t Elf32_Addr; typedef uint32_t Elf32_Word; typedef struct { Elf32_Addr r_offset; /* Address */ Elf32_Word r_info; /* Relocation type and symbol index */ } Elf32_Rel; /* How to extract and insert information held in the r_info field.*/ #define ELF32_R_SYM(val) ((val) >> 8) #define ELF32_R_TYPE(val) ((val) & 0xff) \end{verbatim} \normalsize \end{frame} \begin{frame}[fragile] \frametitle{elf32 types} \footnotesize \begin{verbatim} typedef struct { Elf32_Word st_name; /* Symbol name (string tbl index) */ Elf32_Addr st_value; /* Symbol value */ Elf32_Word st_size; /* Symbol size */ unsigned char st_info; /* Symbol type and binding */ unsigned char st_other; /* Symbol visibility under glibc>=2.2 */ Elf32_Section st_shndx; /* Section index */ } Elf32_Sym; The fields st_size, st_info and st_shndx are not used during symbol resolution. \end{verbatim} \normalsize \end{frame} \begin{frame}[fragile] \frametitle{elf32 structure} \scriptsize \begin{verbatim} pcs2015@pcs2015:~/share/ret2libc/demo$ readelf -d vuln Dynamic section at offset 0x80c contains 24 entries: Tag Type Name/Value ... more stuff ... 0x00000005 (STRTAB) 0x804824c string table (type char *) ... 0x00000006 (SYMTAB) 0x80481ac symbol table (type Elf32_Sym*) ... 0x00000017 (JMPREL) 0x80482f0 table of relocation entries related to PLT (type Elf32_Rel*) ... 0x6ffffff0 (VERSYM) 0x80482b2 array of version table indices (type uint16_t*) ... \end{verbatim} \normalsize \end{frame} \begin{frame}[fragile] \frametitle{dl-resolve() algorithm} This is the simplified explanation of how the algorithm works. \begin{itemize} \item Calculate some\_func's relocation entry \\ Elf32\_Rel * reloc = JMPREL + reloc\_offset. \item Calculate some\_func's symtab entry \\ Elf32\_Sym * sym = \&SYMTAB[ ELF32\_R\_SYM (reloc $->$ r\_info) ]; \item Sanity check \\ assert(ELF32\_R\_TYPE(reloc$->$r\_info)==R\_386\_JMP\_SLOT); \end{itemize} \end{frame} \begin{frame}[fragile] \frametitle{dl-resolve() algorithm} \begin{itemize} \item Check if sym$->$st\_other \& 3 == 0, then algorithm presumes that the symbol has not been resolved before. \item If symbol versioning is enabled, determine the version table index, and use it to find version information. \\ uint16\_t ndx = VERSYM[ ELF32\_R\_SYM (reloc$->$r\_info) ]; \\ const struct r\_found\_version *version =\&l$->$l\_versions[ndx]; \item Determine function name (an asciiz string) \\ name = STRTAB + sym$->$st\_name; \end{itemize} \end{frame} \begin{frame}[fragile] \frametitle{dl-resolve() algorithm} \begin{itemize} \item Algorithm has enough information to retrieve the address for the function, and caches it in reloc$->$r\_offset and sym$->$st\_value. \item The GOT value for the function address is modified. \item The retrieved function address is called. \end{itemize} \end{frame} \begin{frame}[fragile] \frametitle{Exploiting dl-resolve()} \begin{itemize} \item How can we exploit dl-resolve()? \item We could prepare an appropriate Elf32\_Sym and Elf32\_Rel, and calculate the reloc\_offset to fit with where Elf32\_Rel is placed. \item We would then call .plt start with the correct reloc\_offset. \item The exploit would also require some data copying function to be in PLT(strcpy, sprintf, etc). \end{itemize} \scriptsize \begin{verbatim} |----------------------------------------------------------------------| | buffer_overflow | .plt start | reloc_offset | ret_addr | arg1 | .... | |----------------------------------------------------------------------| \end{verbatim} \normalsize \end{frame} \begin{frame}[fragile] \frametitle{Exploiting dl-resolve()} We need to ensure that the structures are placed correctly in memory. \begin{verbatim} Elf32_Rel reloc. Elf32_Sym sym. unsigned short verind (which should be 0). reloc is at address JMPREL+reloc_offset. real_index = ELF32_R_SYM (reloc->r_info) sym is at address SYMTAB+real_index*sizeof(Elf32_Sym) verind is at address VERSYM+real_index*sizeof(short) function_name is at address STRTAB + sym->st_name \end{verbatim} \end{frame} \section{Demo} \frametitle \begin{frame} Demo time! \end{frame} \end{document}
{"hexsha": "1f7c2d1094636980fb451fdd2479db2f70e0832b", "size": 14526, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "presentation/slides.tex", "max_stars_repo_name": "ahovgaard/ret2libc", "max_stars_repo_head_hexsha": "5542c7c84e83cde99d0b785a4917852a7ed9ac24", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2015-11-06T08:26:34.000Z", "max_stars_repo_stars_event_max_datetime": "2015-11-06T08:26:34.000Z", "max_issues_repo_path": "presentation/slides.tex", "max_issues_repo_name": "ahovgaard/ret2libc", "max_issues_repo_head_hexsha": "5542c7c84e83cde99d0b785a4917852a7ed9ac24", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "presentation/slides.tex", "max_forks_repo_name": "ahovgaard/ret2libc", "max_forks_repo_head_hexsha": "5542c7c84e83cde99d0b785a4917852a7ed9ac24", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.9499072356, "max_line_length": 142, "alphanum_fraction": 0.6570976181, "num_tokens": 4277}
from brightics.common.report import ReportBuilder, strip_margin, plt2MD, dict2MD from brightics.function.utils import _model_dict import numpy as np import pandas as pd from sklearn.neighbors import LocalOutlierFactor from brightics.common.groupby import _function_by_group from brightics.common.utils import check_required_parameters def outlier_detection_tukey_carling(table, group_by=None, **params): check_required_parameters(_outlier_detection_tukey_carling, params, ['table']) if group_by is not None: return _function_by_group(_outlier_detection_tukey_carling, table, group_by=group_by, **params) else: return _outlier_detection_tukey_carling(table, **params) def _outlier_detection_tukey_carling(table, input_cols, outlier_method="tukey", multiplier=None, number_of_removal=1, choice='add_prediction', new_column_prefix='is_outlier_'): out_table = table.copy() if multiplier is None and outlier_method == "tukey": multiplier = 1.5 elif multiplier is None and outlier_method == "carling": multiplier = 2.3 mean = table.mean() q1s = table.quantile(0.25) q3s = table.quantile(0.75) iqrs = q3s - q1s new_column_names = ['{prefix}{col}'.format(prefix=new_column_prefix, col=col) for col in input_cols] def _tukey(x, q1, q3, iqr, multiplier): return 'out' if x < q1 - multiplier * iqr or x > q3 + multiplier * iqr else 'in' def _carling(x, mean, iqr, multiplier): return 'out' if x < mean - multiplier * iqr or x > mean + multiplier * iqr else 'in' if outlier_method == "tukey": for col in input_cols: output_col_name = '{prefix}{col}'.format(prefix=new_column_prefix, col=col) out_table[output_col_name] = table[col].apply(lambda _: _tukey(_, q1s[col], q3s[col], iqrs[col], multiplier)) elif outlier_method == "carling": if multiplier is None: multiplier = 2.3 for col in input_cols: output_col_name = '{prefix}{col}'.format(prefix=new_column_prefix, col=col) out_table[output_col_name] = table[col].apply(lambda _: _carling(_, mean[col], iqrs[col], multiplier)) prediction = out_table[new_column_names].apply(lambda row: np.sum(row == 'out') < number_of_removal, axis=1) rb = ReportBuilder() params = { 'Input Columns': input_cols, 'Outlier Method': outlier_method, 'Multiplier': multiplier, 'Number of Outliers in a Row': number_of_removal, 'Result Type': choice, 'New Column Prefix': new_column_prefix } rb.addMD(strip_margin(""" | ## Outlier Detection (Tukey/Carling) Result | ### Parameters | | {display_params} """.format(display_params=dict2MD(params)))) if choice == 'add_prediction': pass elif choice == 'remove_outliers': out_table = out_table.drop(new_column_names, axis=1) out_table = out_table[prediction.values] elif choice == 'both': out_table = out_table[prediction.values] model = _model_dict('outlier_detection_tukey_carling') model['params'] = params model['mean'] = mean model['q1'] = q1s model['q3'] = q3s model['iqr'] = iqrs model['multiplier'] = multiplier model['report'] = rb.get() return {'out_table': out_table, 'model' : model} def outlier_detection_lof(table, group_by=None, **params): check_required_parameters(_outlier_detection_lof, params, ['table']) if group_by is not None: return _function_by_group(_outlier_detection_lof, table, group_by=group_by, **params) else: return _outlier_detection_lof(table, **params) def _outlier_detection_lof(table, input_cols, choice='add_prediction', n_neighbors=20, new_column_name='is_outlier'): # algorithm='auto', leaf_size=30, # metric='minkowski', p=2, contamination=0.1, out_table = table.copy() lof_model = LocalOutlierFactor(n_neighbors, algorithm='auto', leaf_size=30, metric='minkowski', p=2, contamination=0.1) lof_model.fit_predict(out_table[input_cols]) isinlier = lambda _: 'in' if _ == 1 else 'out' out_table[new_column_name] = [isinlier(lof_predict) for lof_predict in lof_model.fit_predict(out_table[input_cols])] if choice == 'add_prediction': pass elif choice == 'remove_outliers': out_table = out_table[out_table[new_column_name] == 'in'] out_table = out_table.drop(new_column_name, axis=1) elif choice == 'both': out_table = out_table[out_table[new_column_name] == 'in'] params = { 'Input Columns': input_cols, 'Result Type': choice, 'Number of Neighbors': n_neighbors, # 'Algorithm': algorithm, # 'Metric': metric, # 'Contamination': contamination } rb = ReportBuilder() rb.addMD(strip_margin(""" | ## Outlier Detection (Local Outlier Factor) Result | ### Parameters | | {display_params} """.format(display_params=dict2MD(params)))) model = _model_dict('outlier_detection_lof') model['params'] = params model['lof_model'] = lof_model model['report'] = rb.get() return {'out_table':out_table, 'model':model}
{"hexsha": "b6173cc52887691d88bed7c106e2a60a8900b14c", "size": 5329, "ext": "py", "lang": "Python", "max_stars_repo_path": "function/python/brightics/function/manipulation/outlier_detection.py", "max_stars_repo_name": "power4454/studio", "max_stars_repo_head_hexsha": "d8115a8f483edab8d674f567e277863ea1bb3f79", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "function/python/brightics/function/manipulation/outlier_detection.py", "max_issues_repo_name": "power4454/studio", "max_issues_repo_head_hexsha": "d8115a8f483edab8d674f567e277863ea1bb3f79", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "function/python/brightics/function/manipulation/outlier_detection.py", "max_forks_repo_name": "power4454/studio", "max_forks_repo_head_hexsha": "d8115a8f483edab8d674f567e277863ea1bb3f79", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.897810219, "max_line_length": 152, "alphanum_fraction": 0.6597860762, "include": true, "reason": "import numpy", "num_tokens": 1362}
# Ok, here we read parses from the CATH8 corpus and we try to reconstruct them. # Let's give it a try. import cky_constituent_copy import pandas as pd import numpy as np # First, let's read the grammar dat = pd.DataFrame.from_csv('input/00001_fitted_grammars.txt',sep=' ').reset_index() rule_probabilities = dat[ dat["grammar"]=="NoCopy" ] grammar = {} ruleprobs = {} for i,row in rule_probabilities.iterrows(): lhs=row["lhs"] rhs=row["rhs"].split(".") p =row["log.prob"] #if len(rhs)==1: # if this is a lexical rule # # So as to enter into the format # rhs = [rhs] if lhs not in grammar.keys(): grammar[lhs]=[] grammar[lhs].append((rhs,False)) ruleprobs["%s->%s"%(lhs,".".join(rhs))]=p # Convert back to list so as to ensure that we keep the order of the rules from now on grammar = grammar.items() #print grammar # All right, let's take a sentence and parse it sentences = pd.DataFrame.from_csv('input/00001_test.txt',sep=' ').reset_index() sentences["check.prob.nocopy"]=0 for i,row in sentences.iterrows(): sentence = row["sentence"].split(".") print sentence chart,backpoints = ckypy.parse(sentence,grammar) # ckypy.print_chart(chart) parses = ckypy.collect_trees(0,len(sentence),"S",chart,backpoints,grammar,sentence) # Suppose we have collected a number of trees, let's draw them! # ckypy.tree_to_pdf(tree,"tree.pdf") #rules_used = ckypy.rules_used(parses[0],grammar) # Ok, let's extract rules used in a particular parse sentence_prob = [] print "# of parses = %i"%len(parses) for parse in parses: rules_used = ckypy.rules_used(parse) log_ps = map(lambda x: ruleprobs[x],rules_used) parseprob = sum(log_ps) # the probability of the parse is simply the sum of the log probabilities of the rules used sentence_prob.append( parseprob ) sentences.loc[i,"check.prob.nocopy"]=sentence_prob[0] corpus_likelihood = sum(sentences["check.prob.nocopy"]) corpus_likelihood_meaghan = sum(sentences["logprob.NoCopy"]) # Ok, now for something more challenging: the copy grammar """ dat = pd.DataFrame.from_csv('input/00001_fitted_grammars.txt',sep=' ').reset_index() rule_probabilities = dat[ dat["grammar"]=="Copy+SS" ] grammar = {} ruleprobs = {} for i,row in rule_probabilities.iterrows(): lhs=row["lhs"] rhs=row["rhs"].split(".") p =row["log.prob"] #if len(rhs)==1: # if this is a lexical rule # # So as to enter into the format # rhs = [rhs] copy = False if lhs not in grammar.keys(): grammar[lhs]=[] ruleprobs["%s->%s"%(lhs,".".join(rhs))]=p if rhs[-1]=="copy": # if this is "the" copy rule copy = True rhs = rhs[:-1] grammar[lhs].append((rhs,copy)) # Convert back to list so as to ensure that we keep the order of the rules from now on grammar = grammar.items() #print grammar sentences = pd.DataFrame.from_csv('input/00001_test.txt',sep=' ').reset_index() sentences["length"]=np.array([ len(x.split(".")) for x in sentences["sentence"] ]) """ """ sentences["check.prob.copy+ss"]=0 if True: #sentence = sentences.ix[0] sentence = sentences.ix[0]["sentence"].split(".") print sentence chart,backpoints = ckypy.parse(sentence,grammar) # ckypy.print_chart(chart) parses = ckypy.collect_trees(0,len(sentence),"S",chart,backpoints,grammar,sentence) # Suppose we have collected a number of trees, let's draw them! # ckypy.tree_to_pdf(tree,"tree.pdf") #rules_used = ckypy.rules_used(parses[0],grammar) # Ok, let's extract rules used in a particular parse sentence_prob = [] print "# of parses = %i"%len(parses) for i,parse in enumerate(parses): rules_used = ckypy.rules_used(parse) ckypy.tree_to_pdf(parse,'output/parse%i.pdf'%i) log_ps = map(lambda x: ruleprobs[x],rules_used) parseprob = sum(log_ps) # the probability of the parse is simply the sum of the log probabilities of the rules used sentence_prob.append( parseprob ) sentences.loc[i,"check.prob.nocopy"]=sentence_prob[0] """ """ sentence = "aiw.aix.aiw.aix".split(".") """ #sentence = ["aiw","aiw"] # sentences[ sentences["length"]<4 ] # getprob("bbb") """ sentences["check.prob.copy+ss"]=0 sentences["n.parses.copy+ss"]=0 for i,row in sentences.iterrows(): sentence = row["sentence"] print sentence if row["length"]<12: #if True: p,prob_per_parse = getprob(sentence) sentences.loc[i,"check.prob.copy+ss"]=p sentences.loc[i,"n.parses.copy+ss"]=len(prob_per_parse) sentences.to_csv('checkprobs.csv') """ # getprob("agb.aaw",output_trees=True) def getprob(sent,output_trees=False): sentence = sent.split(".") chart,backpoints = ckypy.parse(sentence,grammar) # ckypy.print_chart(chart) parses = ckypy.collect_trees(0,len(sentence),"S",chart,backpoints,grammar,sentence) parse_probs = [] for i,parse in enumerate(parses): if output_trees: ckypy.tree_to_pdf(parse,'output/parse%05i.pdf'%i) rules_used = ckypy.rules_used(parse) log_ps = map(lambda x: ruleprobs[x],rules_used) parseprob = sum(log_ps) # the probability of the parse is simply the sum of the log probabilities of the rules used parse_probs.append( parseprob ) # Add the probabilities of the parses, using a smart # bit of algebra to prevent underflow # (essentially what we are trying to compute is log(exp(X)+exp(Y))). total_prob = parse_probs[0] for logp in parse_probs[1:]: total_prob = ckypy.log_add(total_prob,logp) return (total_prob,parse_probs) sentences["check.prob.copy+ss"]=0 sentences["n.parses.copy+ss"]=0 for i,row in sentences.iterrows(): sentence = row["sentence"].split(".") if True: print sentence, chart,backpoints = ckypy.parse(sentence,grammar) nparses_cache = ckypy.n_parses("S",chart,backpoints,grammar,sentence) print "Parses (caching): ",nparses_cache, probchart,logprob = ckypy.probability("S",chart,backpoints,grammar,sentence,ruleprobs) print "Log Probability (with caching): ",logprob sentences.loc[i,"check.prob.copy+ss"]=logprob sentences.loc[i,"n.parses.copy+ss"]=nparses_cache if len(sentence)<7: #if True: parses = ckypy.collect_trees(0,len(sentence),"S",chart,backpoints,grammar,sentence) logprob_fromparses = ckypy.find_prob_from_parses(parses,ruleprobs,output_trees=False) print "Log Probability (from parses): ",logprob_fromparses nparses = ckypy.n_parses_nocache("S",chart,backpoints,grammar,sentence) print "Parses (classic): ",nparses, if len(sentence)<10: print "N of trees: ",len(parses), print # ckypy.print_chart(chart) sentences.to_csv('interim/cath8_sentences.csv')
{"hexsha": "539e61b29d38bc5dacaf1d47a1a70a1e82374d96", "size": 7097, "ext": "py", "lang": "Python", "max_stars_repo_path": "ckypy/read_cath8_parses.py", "max_stars_repo_name": "megodoonch/birdsong", "max_stars_repo_head_hexsha": "582e7ddecf6c9c1b75f17418097f7bcbf6784d31", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ckypy/read_cath8_parses.py", "max_issues_repo_name": "megodoonch/birdsong", "max_issues_repo_head_hexsha": "582e7ddecf6c9c1b75f17418097f7bcbf6784d31", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ckypy/read_cath8_parses.py", "max_forks_repo_name": "megodoonch/birdsong", "max_forks_repo_head_hexsha": "582e7ddecf6c9c1b75f17418097f7bcbf6784d31", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.104815864, "max_line_length": 123, "alphanum_fraction": 0.6491475271, "include": true, "reason": "import numpy", "num_tokens": 1853}
# -*- coding: utf-8 -*- # dcf # --- # A Python library for generating discounted cashflows. # # Author: sonntagsgesicht, based on a fork of Deutsche Postbank [pbrisk] # Version: 0.7, copyright Sunday, 22 May 2022 # Website: https://github.com/sonntagsgesicht/dcf # License: Apache License 2.0 (see LICENSE file) from collections import OrderedDict from inspect import signature from math import exp from warnings import warn from .. import interpolation as _interpolations from ..compounding import continuous_compounding, continuous_rate from ..interpolation import linear_scheme, log_linear_scheme from ..daycount import day_count as _default_day_count def rate_table(curve, x_grid=None, y_grid=None): r""" table of calculated rates :param curve: function $f$ :param x_grid: vertical date axis $x_0, \dots, x_m$ :param y_grid: horizontal period axis $y_1, \dots, y_n$ (implicitly added a non-period $y_0=0$) :return: list(list(float)) matrix $T=(t_{i,j})$ with $t_{i,j}=f(x_i+y_j) \text{ if } x_i+y_j < x_{i+1}$. >>> from tabulate import tabulate >>> from dcf import Curve, rate_table >>> curve = Curve([1, 4], [0, 1]) >>> table = rate_table(curve, x_grid=(0, 1, 2, 3, 4, 5), y_grid=(.0, .25, .5, .75)) >>> print(tabulate(table, headers='firstrow', floatfmt='.4f')) 0.0 0.25 0.5 0.75 -- ------ ------ ------ ------ 0 0.0000 0.0000 0.0000 0.0000 1 0.0000 0.0833 0.1667 0.2500 2 0.3333 0.4167 0.5000 0.5833 3 0.6667 0.7500 0.8333 0.9167 4 1.0000 1.0000 1.0000 1.0000 5 1.0000 1.0000 1.0000 1.0000 >>> from businessdate import BusinessDate, BusinessPeriod >>> from dcf import ZeroRateCurve >>> term = '1m', '3m', '6m', '1y', '2y', '5y', >>> rates = -0.008, -0.0057, -0.0053, -0.0036, -0.0010, 0.0014, >>> today = BusinessDate(20211201) >>> tenor = BusinessPeriod('1m') >>> dates = [today + t for t in term] >>> f = ZeroRateCurve(dates, rates, origin=today, forward_tenor=tenor) >>> print(tabulate(f.table, headers='firstrow', floatfmt=".4f", tablefmt='latex')) \begin{tabular}{lrrrrrrr} \hline & 0D & 1M & 2M & 3M & 6M & 1Y & 2Y \\ \hline 20211201 & -0.0080 & & & & & & \\ 20220101 & -0.0080 & -0.0068 & & & & & \\ 20220301 & -0.0057 & -0.0056 & -0.0054 & & & & \\ 20220601 & -0.0053 & -0.0050 & -0.0047 & -0.0044 & & & \\ 20221201 & -0.0036 & -0.0034 & -0.0032 & -0.0030 & -0.0023 & & \\ 20231201 & -0.0010 & -0.0009 & -0.0009 & -0.0008 & -0.0006 & -0.0002 & 0.0006 \\ 20261201 & 0.0014 & 0.0014 & 0.0014 & 0.0014 & 0.0014 & 0.0014 & 0.0014 \\ \hline \end{tabular} """ # noqa: E501 if x_grid is None: x_grid = list(curve.domain) if curve.origin not in x_grid: x_grid = [curve.origin] + x_grid if y_grid is None: diff = list(e-s for s, e in zip(x_grid[:-1], x_grid[1:])) step = diff[0] y_grid = [step * 0] for span in diff: line = [step] while line[-1] + step < span: line.append(line[-1] + step) y_grid.extend(line) step = span y_grid = tuple(sorted(set(y_grid))) # fill table grid = list() grid.append(('',) + tuple(y_grid)) for i, x in enumerate(x_grid): lst = x_grid[i+1] if i < len(x_grid)-1 \ else x_grid[-1] + y_grid[-1] + y_grid[-1] grid.append(((x,) + tuple(curve(x+y) for y in y_grid if x + y < lst))) return grid class Price(object): """Price object for assets""" @property def value(self): """ asset price value """ return float(self._value) @property def origin(self): """ asset price date """ return self._origin def __init__(self, value=0., origin=None): r""" :param value: price value :param origin: price date >>> from businessdate import BusinessDate >>> from dcf import Price >>> p=Price(100, BusinessDate(20201212)) >>> p.value 100.0 >>> float(p) 100.0 >>> p Price(100.000000; origin=BusinessDate(20201212)) """ self._value = value self._origin = origin def __float__(self): return float(self.value) def __str__(self): return '%s(%f; origin=%s)' % \ (self.__class__.__name__, self.value, repr(self.origin)) def __repr__(self): return str(self) class Curve(object): """Curve function object""" INTERPOLATIONS = dict() """mapping (dict) of availiable interpolations additional to |dcf.interpolation|""" _INTERPOLATION = linear_scheme # default interpolation @property def kwargs(self): """ returns constructor arguments as ordered dictionary """ kw = type(self.__class__.__name__ + 'Kwargs', (OrderedDict,), {})() for name in signature(self.__class__).parameters: attr = self(self.domain) if name == 'data' else None attr = getattr(self, '_' + name, attr) attr = getattr(attr, '__name__', attr) if attr is not None: kw[name] = attr setattr(kw, name, attr) return kw @property def domain(self): """coordinates and date of given (not interpolated) x-values""" return self._domain @property def table(self): r""" table of interpolated rates (pretty printable) given by |rate_table()|. """ # print(tabulate(curve.table, headers='firstrow')) # for pretty print return rate_table(self) def __init__(self, domain=(), data=(), interpolation=None): r""" :param list(float) domain: source values $x_1 \dots x_n$ :param list(float) data: target values $y_1 \dots y_n$ :param function interpolation: (optional, default is defined on class level) Interpolation function $\gamma$ such that $\gamma(x_i)=y_i$ for $i=1 \dots n$. If **interpolation** is a string, the interpolation function is taken from class member dictionary |Curve.INTERPOLATIONS|. Interpolation functions $\gamma$ can be constructed piecewise using via |interpolation_scheme|. Curve function object $$f:\mathbb{R} \rightarrow \mathbb{R}, x \mapsto f(x)=y$$ build from finite point vectors $x$ and $y$ using piecewise various interpolation functions. >>> from dcf import Curve >>> c = Curve([0, 1, 2], [1, 2, 3]) get the grid of x values >>> c.domain [0, 1, 2] get the grid of y values >>> c(c.domain) (1.0, 2.0, 3.0) get a interpolated curve value >>> c(1.5) 2.5 update existing values >>> c[2] = 4 >>> c(c.domain) (1.0, 2.0, 4.0) add new points >>> c[3] = 5 >>> c(c.domain) (1.0, 2.0, 4.0, 5.0) """ # cast/extract inputs from Curve if given as argument if isinstance(domain, Curve): data = domain domain = data.domain if isinstance(data, Curve): interpolation = \ interpolation or data.kwargs.get('interpolation', None) _data = data(domain) # assuming domain is a list of dates ! if isinstance(data, DateCurve): domain = [data.day_count(d) for d in domain] data = _data # sort data by domain values if not len(domain) == len(data): raise ValueError('%s requires equal length input ' 'for domain (%d) and data (%d) ' % (self.__class__.__name__, len(domain), len(data))) self._interpolation = interpolation self._update(domain, data) def _update(self, domain, data): interpolation = self._interpolation if interpolation in self.INTERPOLATIONS: func = self.INTERPOLATIONS[interpolation] elif interpolation is None: func = self._INTERPOLATION else: func = vars(_interpolations).get(interpolation, interpolation) if domain: domain, data = map(list, zip(*sorted(zip(*(domain, data))))) self._func = func(domain, data) self._domain = domain def __contains__(self, item): return item in self.domain def __iter__(self): return self.domain def __getitem__(self, item): if item in self: return self(item) raise KeyError(item) def __setitem__(self, key, value): domain = list(self.domain) data = list(self(domain)) if key in domain: data[domain.index(key)] = value else: domain.append(key) data.append(value) self._update(domain, data) def __call__(self, x): if isinstance(x, (tuple, list)): return tuple(self(xx) for xx in x) return self._func(x) def __add__(self, other): x_list = sorted(set(self.domain + other.domain)) y_list = [self(x) + other(x) for x in x_list] return self.__class__(x_list, y_list, self._interpolation) def __sub__(self, other): x_list = sorted(set(self.domain + other.domain)) y_list = [self(x) - other(x) for x in x_list] return self.__class__(x_list, y_list, self._interpolation) def __mul__(self, other): x_list = sorted(set(self.domain + other.domain)) y_list = [self(x) * other(x) for x in x_list] return self.__class__(x_list, y_list, self._interpolation) def __truediv__(self, other): return self.__div__(other) def __div__(self, other): x_list = sorted(set(self.domain + other.domain)) if any(not other(x) for x in x_list): raise ZeroDivisionError("Division with %s requires on " "zero values." % other.__class__.__name__) y_list = [self(x) / other(x) for x in x_list] return self.__class__(x_list, y_list, self._interpolation) def __str__(self): inner = tuple() if self.domain: s, e = self.domain[0], self.domain[-1] inner = f'[{s!r} ... {e!r}]', f'[{self(s)!r} ... {self(e)!r}]' kw = self.kwargs kw.pop('data') kw.pop('domain') inner += tuple(f"{k!s}={v!r}" for k, v in kw.items()) s = self.__class__.__name__ + '(' + ', '.join(inner) + ')' return s def __repr__(self): s = self.__class__.__name__ + '()' if self.domain: fill = ',\n' + ' ' * (len(s) - 1) kw = self.kwargs inner = str(kw.pop('domain')), str(kw.pop('data')) inner += tuple(f"{k!s}={v!r}" for k, v in kw.items()) s = self.__class__.__name__ + '(' + fill.join(inner) + ')' return s def shifted(self, delta=0.0): """ build curve object with shifted **domain** by **delta** :param delta: shift size :return: curve object with shifted **domain** by **delta** """ if delta: x_list = [x + delta for x in self.domain] else: x_list = self.domain # y_list = self(self.domain) # return self.__class__(x_list, y_list, self.interpolation) return self.__class__(x_list, self) class DateCurve(Curve): """Curve function object with dates as domain (points)""" DAY_COUNT = dict() """mapping (dict) of availiable day count functions additional to |dcf.daycount|""" _TIME_SHIFT = '1D' """default time shift""" def __init__(self, domain=(), data=(), interpolation=None, origin=None, day_count=None): """curve function object with dates as domain (points) :param domain: squences of date points :param data: squence of curve values :param interpolation: interpolation function (see |Curve|) :param origin: inital origin of date points (used to calculate year fractions of poins in domain) :param day_count: day count function to derive year fractions from time periods >>> from dcf import DateCurve **domain** given as date/time measured in year fraction (float) >>> domain = 0.5, 1.0, 1.5, 2.0 >>> data = 1, 2, 3, 4 >>> c = DateCurve(domain, data) >>> c.domain (0.5, 1.0, 1.5, 2.0) >>> c(0.75) 1.5 **domain** given as date/time measured in dates (date) >>> from datetime import date >>> domain = date(2022, 8, 12), date(2023, 2, 12), date(2023, 8, 12), date(2024, 2, 12) >>> data = 1, 2, 3, 4 >>> c = DateCurve(domain, data) >>> c.domain (datetime.date(2022, 8, 12), datetime.date(2023, 2, 12), datetime.date(2023, 8, 12), datetime.date(2024, 2, 12)) >>> c(date(2022, 11, 12)) 1.5 **domain** given as date/time measured in dates (BusinessDate) >>> from businessdate import BusinessDate >>> t = BusinessDate(20220212) >>> domain = tuple(t + p for p in ('6m', '12m', '18m', '24m')) >>> data = 1, 2, 3, 4 >>> c = DateCurve(domain, data) >>> c.domain (BusinessDate(20220812), BusinessDate(20230212), BusinessDate(20230812), BusinessDate(20240212)) >>> c(t + '9m') 1.5 """ # noqa 501 if isinstance(domain, DateCurve): data = domain domain = data.domain elif isinstance(data, DateCurve): interpolation = interpolation or data.kwargs.interpolation origin = origin or data.kwargs.origin day_count = day_count or data.kwargs.day_count data = data(domain) # assuming data is a list of dates ! self._domain = domain self._origin = origin self._day_count = day_count super().__init__(domain, data, interpolation) self.fixings = dict() @property def domain(self): r""" domain of curve $t_1 \dots t_n$ as list of dates where curve values are given explicit """ return self._domain @property def origin(self): """ date of origin (date zero) as curve reference date for time calucations """ if self._origin is not None: return self._origin return self._domain[0] if self._domain else None def _update(self, domain, data): flt_domain = tuple(self.day_count(d) for d in domain) super()._update(flt_domain, data) self._domain = domain def __call__(self, x): if isinstance(x, (list, tuple)): return tuple(self(xx) for xx in x) if x in self.fixings: return self.fixings[x] return super(DateCurve, self).__call__(self.day_count(x)) def __add__(self, other): new = super(DateCurve, self).__add__( other.shifted(self.origin - other.origin)) self.__class__(new.domain, new(new.domain), new._interpolation, self.origin, self._day_count) return new def __sub__(self, other): new = super(DateCurve, self).__sub__( other.shifted(self.origin - other.origin)) self.__class__(new.domain, new(new.domain), new._interpolation, self.origin, self._day_count) return new def __mul__(self, other): new = super(DateCurve, self).__mul__( other.shifted(self.origin - other.origin)) self.__class__(new.domain, new(new.domain), new._interpolation, self.origin, self._day_count) return new def __div__(self, other): new = super(DateCurve, self).__div__( other.shifted(self.origin - other.origin)) new.origin = self.origin return new def day_count(self, start, end=None): """ day count function to calculate a year fraction of time period :param start: first date of period :param end: last date of period :return: (float) year fraction """ if end is None: return self.day_count(self.origin, start) if self._day_count is None: return _default_day_count(start, end) if self._day_count in self.DAY_COUNT: day_count = self.DAY_COUNT.get(self._day_count) return day_count(start, end) return self._day_count(start, end) def to_curve(self): """deprecated method to cast to |Curve()| object""" cls = self.__class__.__name__ msg = "\n%s().cast(cast_type, **kwargs) is deprecated.\n" \ "Please use for casting an object `curve` of type %s\n" \ " cast_type(curve, **kwargs)\n" \ "instead." % (cls, cls) warn(msg) return Curve(self) def integrate(self, start, stop): r""" integrates curve and returns results as annualized rates :param start: lower integration boundary :param stop: upper integration boundary :return: (float) integral value$ If $\gamma$ is this the curve. **integrate** returns $$\int_a^b \gamma(t)\ dt$$ where $a$ is **start** and $b$ is **stop**. if available **integrate** uses `scipy.integrate.quad <https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.quad.html>`_ """ # noqa E501 # try use result, error = scipy.integrate(self, start, stop) try: from scipy.integrate import quad # raise ImportError() s = self.day_count(start) e = self.day_count(stop) f = super(DateCurve, self).__call__ value, *_ = quad(f, s, e) except ImportError: value = 0.0 step = self._TIME_SHIFT current = start while current + step < stop: value += self(current) * \ self.day_count(current, current + step) current += step value += self(current) * self.day_count(current, stop) result = value / self.day_count(start, stop) return result def derivative(self, start): r""" calculates numericaly the first derivative :param start: curve point to calcuate derivative at this point :return: (float) first derivative If $\gamma$ is this the curve **derivative** returns $$\frac{d}{dt}\gamma(t)$$ where $t$ is **start** but derived numericaly. if available **derivative** uses `scipy.misc.derivative <https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.derivative.html>`_ """ # noqa E501 try: from scipy.misc import derivative s = self.day_count(start) dx = self.day_count(start, start + self._TIME_SHIFT) f = super(DateCurve, self).__call__ result = derivative(f, s, dx) except ImportError: stop = start + self._TIME_SHIFT value = self(stop) - self(start) result = value / self.day_count(start, stop) return result class ForwardCurve(DateCurve): """Forward price curve with yield extrapolation """ _INTERPOLATION = log_linear_scheme def __init__(self, domain=(), data=(), interpolation=None, origin=None, day_count=None, yield_curve=0.0): r""" curve of future asset prices i.e. asset forward prices :param domain: dates of given asset prices $t_1 \dots t_n$ :param data: actual asset prices $p_{t_1} \dots p_{t_n}$ :param interpolation: interpolation method for interpolating given asset prices :param origin: origin of curve :param day_count: day count method resp. function $\tau$ to calculate year fractions :param yield_curve: yield $y$ to extrapolate by continous compounding $$p_T = p_{t_n} \cdot \exp(y \cdot \tau(t_n, T))$$ or yield curve function $\gamma_c$ to extrapolate by $$p_T = p_{t_n} \cdot \gamma_c(T)/\gamma_c(t_n)$$ or interest rate curve $c$ extrapolate by $$p_T = p_{t_n} \cdot df_{c}^{-1}(t_n, T)$$ """ if not data: if isinstance(domain, float): # build lists from single spot price value data = [domain] domain = [origin] elif isinstance(domain, Price): # build lists from single spot price origin = domain.origin data = [domain.value] domain = [domain.origin] super().__init__(domain, data, interpolation, origin, day_count) if isinstance(yield_curve, float) and self.origin is not None: yc = (lambda x: exp(-self.day_count(x) * yield_curve)) else: yc = yield_curve self.yield_curve = yc """ yield curve for extrapolation using discount factors """ def __call__(self, x): if isinstance(x, (list, tuple)): return [self(xx) for xx in x] else: return self.get_forward_price(x) def get_forward_price(self, value_date): """ asset forward price at **value_date** derived by interpolation on given forward prices and extrapolation by given discount_factor resp. yield curve :param value_date: future date of asset price :return: asset forward price at **value_date** """ last_date = self.domain[-1] if value_date <= last_date: return super().__call__(value_date) last_price = super().__call__(last_date) if self.yield_curve is None: df = 1.0 elif hasattr(self.yield_curve, 'get_discount_factor'): df = self.yield_curve.get_discount_factor(last_date, value_date) else: df = self.yield_curve(value_date) / self.yield_curve(last_date) return last_price / df class RateCurve(DateCurve): """Interest rate curve and credit curve""" _FORWARD_TENOR = '3M' @staticmethod def _get_storage_value(curve, x): raise NotImplementedError() def cast(self, cast_type, **kwargs): """deprecated method to cast a curve""" cls = self.__class__.__name__ msg = "\n%s().cast(cast_type, **kwargs) is deprecated.\n" \ "Please use for casting an object `curve` of type %s\n" \ " cast_type(curve, **kwargs)\n" \ "instead." % (cls, cls) warn(msg) if 'domain' in kwargs: kwargs['data'] = self else: kwargs['domain'] = self return cast_type(**kwargs) @property def forward_tenor(self): """tenor (time period) associated to the rates of the curve""" return self._FORWARD_TENOR \ if self._forward_tenor is None else self._forward_tenor @property def spread(self): """spread curve to add spreads to curve""" return self._spread @spread.setter def spread(self, curve): """spread curve to add spreads to curve""" if curve is not None and self._spread is not None: raise TypeError("direct re-setting of spread curve not allowed." "first re-set spread curve to None.") self._spread = curve def __init__(self, domain=(), data=(), interpolation=None, origin=None, day_count=None, forward_tenor=None): r""" :param domain: either curve points $t_1 \dots t_n$ or a curve object $C$ :param data: either curve values $y_1 \dots y_n$ or a curve object $C$ :param interpolation: (optional) interpolation scheme :param origin: (optional) curve points origin $t_0$ :param day_count: (optional) day count convention function $\tau(s, t)$ :param forward_tenor: (optional) forward rate tenor period $\tau^*$ If **data** is a |RateCurve| instance $C$, it is casted to this new class type with domain grid given by **domain**. If **domain** is a |RateCurve| instance $C$, it is casted to this new class type with domain grid given **domain** property of $C$. Further arguments **interpolation**, **origin**, **day_count**, **forward_tenor** will replace the ones given by $C$ if not given explictly. """ other = None # either domain or data can be RateCurve too. # if given extract arguments for casting if isinstance(domain, RateCurve): if data: raise TypeError("If first argument is %s, " "data argument must not be given." % domain.__class__.__name__) other = domain domain = other.domain if isinstance(data, RateCurve): other = data domain = other.domain if domain is None else domain if other: # get data as self._get_storage_value data = [self._get_storage_value(other, x) for x in domain] # use other properties if not give explicitly # interpolation should default to class defaults # interpolation = other.interpolation # interpolation = \ # interpolation or other.kwargs.get('interpolation', None) origin = origin or other.kwargs.origin day_count = day_count or other.kwargs.day_count super(RateCurve, self).__init__( domain, data, interpolation, origin, day_count) self._forward_tenor = forward_tenor self._spread = None def __call__(self, x): if isinstance(x, (list, tuple)): return tuple(self(xx) for xx in x) s = self._spread(x) if self._spread else 0.0 return super().__call__(x) + s def __add__(self, other): new = super(RateCurve, self).__add__(self.__class__(other)) return self.__class__(new, forward_tenor=self._forward_tenor) def __sub__(self, other): new = super(RateCurve, self).__sub__(self.__class__(other)) return self.__class__(new, forward_tenor=self._forward_tenor) def __mul__(self, other): new = super(RateCurve, self).__mul__(self.__class__(other)) return self.__class__(new, forward_tenor=self._forward_tenor) def __div__(self, other): new = super(RateCurve, self).__div__(self.__class__(other)) return self.__class__(new, forward_tenor=self._forward_tenor) def _get_compounding_factor(self, start, stop): # aka discount factor if start == stop: return 1. ir = self._get_compounding_rate(start, stop) t = self.day_count(start, stop) return continuous_compounding(ir, t) def _get_compounding_rate(self, start, stop): # aka zero rate if start == stop: return self._get_compounding_rate( start, start + self._TIME_SHIFT) df = self._get_compounding_factor(start, stop) t = self.day_count(start, stop) return continuous_rate(df, t)
{"hexsha": "95ada81b4536acdca691d887a9703c86a87923c9", "size": 27683, "ext": "py", "lang": "Python", "max_stars_repo_path": "dcf/curves/curve.py", "max_stars_repo_name": "pbrisk/dcf", "max_stars_repo_head_hexsha": "c585e173e5ea3b529be7463787ddcd5cb93fffd3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-07-22T10:12:25.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-11T08:54:15.000Z", "max_issues_repo_path": "dcf/curves/curve.py", "max_issues_repo_name": "pbrisk/dcf", "max_issues_repo_head_hexsha": "c585e173e5ea3b529be7463787ddcd5cb93fffd3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dcf/curves/curve.py", "max_forks_repo_name": "pbrisk/dcf", "max_forks_repo_head_hexsha": "c585e173e5ea3b529be7463787ddcd5cb93fffd3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-04-10T13:41:05.000Z", "max_forks_repo_forks_event_max_datetime": "2017-04-10T13:41:05.000Z", "avg_line_length": 35.2201017812, "max_line_length": 120, "alphanum_fraction": 0.5721200737, "include": true, "reason": "from scipy", "num_tokens": 7066}
/****************************************************************************** * Copyright 2017 Baidu Robotic Vision Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include "feature_utils.h" #include "timer.h" #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/calib3d/calib3d.hpp> #include <boost/lexical_cast.hpp> #include <brisk/scale-space-feature-detector.h> #include <brisk/internal/uniformity-enforcement.h> #include <math.h> #include <iostream> #include <algorithm> #include <unordered_set> #include <mutex> #include <atomic> #include <Eigen/Dense> using std::vector; #ifndef __DEVELOPMENT_DEBUG_MODE__ #define __FEATURE_UTILS_NO_DEBUG__ #endif // #define VERIFY_NEON namespace XP { void build_pyramids(const cv::Mat& img, int max_level, std::vector<cv::Mat>* _pyramids, uchar* const pyra_buf_ptr) { CHECK_NOTNULL(_pyramids); std::vector<cv::Mat>& pyramids = *_pyramids; pyramids.resize(max_level + 1); const int size = img.rows * img.cols; // no fixed buffer specified, will allocate memory dynamically if (pyra_buf_ptr == nullptr) { pyramids[0] = img.clone(); for (int i = 1; i <= max_level; ++i) { pyramids[i] = fast_pyra_down(pyramids[i - 1]); } } else { // fixed buffer provided // the pyramids kept in the fixed buffer in this way: // level n | level n-1 | level 1 | level 0 // so we can use them in a very cache-friendly way // and no need to call malloc for (int lvl = 0; lvl <= max_level; ++lvl) { int offset = 0; // compute pyramid start address for (int i = lvl + 1; i <= max_level; ++i) { offset += size >> (2 * i); } if (lvl != 0) { pyramids[lvl] = fast_pyra_down(pyramids[lvl - 1], pyra_buf_ptr + offset); } else { cv::Mat tmp(img.rows, img.cols, img.type(), pyra_buf_ptr + offset); img.copyTo(tmp); pyramids[lvl] = tmp; } } } } // make sure 0x00 keeps 0x00 in the next level inline cv::Mat fast_mask_pyra_down(const cv::Mat& mask) { constexpr int compress_ratio = 2; cv::Mat mask_small(mask.rows / compress_ratio, mask.cols / compress_ratio, CV_8U); #ifndef __FEATURE_UTILS_NO_DEBUG__ CHECK_EQ(mask.type(), CV_8U); #endif // use our own pyra down for faster performance const int width_step_in = mask.step1(); const int width_step_small = mask_small.step1(); for (int y = 0; y < mask_small.rows; y++) { for (int x = 0; x < mask_small.cols; x++) { // do not use .at<char> which is slow const int shift0 = (y * compress_ratio) * width_step_in + x * compress_ratio; const int shift1 = shift0 + width_step_in; if (*(mask.data + shift0) == 0x00 || *(mask.data + shift1) == 0x00 || *(mask.data + shift0 + 1) == 0x00 || *(mask.data + shift1 + 1) == 0x00) { *(mask_small.data + y * width_step_small + x) = 0x00; } else { *(mask_small.data + y * width_step_small + x) = 0xff; } } } return mask_small; } // 1. refine_kp_in_larger_img takes the keypoints in the small image (higher pyramid level), // and search in a local region in the large image (lower pyramid level). The refined // keypoint location is computed as the weighted average of local points by harris response. // 2. The response of the refined keypoint is passed from the response from the original detection. // May NOT be harris response though. inline bool refine_kp_in_larger_img(const cv::Mat& img_in_smooth, const std::vector<cv::KeyPoint>& kp_in_small_img, std::vector<cv::KeyPoint>* refined_kp_in_large_img_ptr) { CHECK_NOTNULL(refined_kp_in_large_img_ptr); std::vector<cv::KeyPoint>& refined_kp_in_large_img = *refined_kp_in_large_img_ptr; std::vector<cv::KeyPoint> local_kps; constexpr int harris_block_size = 7; // 5 does not give correct results refined_kp_in_large_img.clear(); refined_kp_in_large_img.reserve(kp_in_small_img.size()); constexpr int compress_ratio = 2; for (int kp_small_idx = 0 ; kp_small_idx < kp_in_small_img.size(); ++kp_small_idx) { const cv::KeyPoint& key_pnt_small = kp_in_small_img[kp_small_idx]; local_kps.clear(); local_kps.reserve(compress_ratio * 2 * compress_ratio * 2); for (int y = key_pnt_small.pt.y * compress_ratio - compress_ratio + 1; y < key_pnt_small.pt.y * compress_ratio + compress_ratio; y++) { for (int x = key_pnt_small.pt.x * compress_ratio - compress_ratio + 1; x < key_pnt_small.pt.x * compress_ratio + compress_ratio; x++) { if (y > harris_block_size / 2 && x > harris_block_size / 2 && y < img_in_smooth.rows - harris_block_size / 2 && x < img_in_smooth.cols - harris_block_size / 2) { cv::KeyPoint local_kp = key_pnt_small; local_kp.pt.x = x; local_kp.pt.y = y; local_kps.push_back(local_kp); } } } // If the local keypoints (in large image) are NOT empty, we compute the weighted average // of the point location and response. if (local_kps.size() > 0) { ORBextractor::HarrisResponses(img_in_smooth, harris_block_size, 0.04f, &local_kps); float score_total = 0; float x_weighted_sum = 0; float y_weighted_sum = 0; float highest_score = - std::numeric_limits<float>::max(); int best_local_kp_id = -1; for (size_t i = 0; i < local_kps.size(); i++) { if (local_kps[i].response > 0) { // ignore points whose harris response is less than 0 score_total += local_kps[i].response; x_weighted_sum += local_kps[i].pt.x * local_kps[i].response; y_weighted_sum += local_kps[i].pt.y * local_kps[i].response; if (local_kps[i].response > highest_score) { highest_score = local_kps[i].response; best_local_kp_id = i; } } #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(3) << "local_kp.response " << local_kps[i].response << " local_kp.pt " << local_kps[i].pt << " score_total " << score_total; #endif } if (best_local_kp_id < 0) { #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(3) << "refine best_local_kp for kp_small[" << kp_small_idx << "] fails: no positive harris response"; #endif continue; } cv::KeyPoint best_local_kp = local_kps[best_local_kp_id]; // copy scale score etc. // use weighted average if (highest_score > 1e-9) { best_local_kp.pt.x = round(x_weighted_sum / score_total); best_local_kp.pt.y = round(y_weighted_sum / score_total); #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(3) << "refine best_local_kp for kp_small[" << kp_small_idx << "]: pt = " << best_local_kp.pt; #endif refined_kp_in_large_img.push_back(best_local_kp); } else { #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(3) << "refine best_local_kp for kp_small[" << kp_small_idx << "] fails: weak harris response = " << highest_score; #endif } } else { auto kp = key_pnt_small; kp.pt.x *= compress_ratio; kp.pt.y *= compress_ratio; #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(3) << "refine best_local_kp for kp_small[" << kp_small_idx << "] direct pass through (local_kps is empty)"; #endif refined_kp_in_large_img.push_back(kp); } } return true; } // The design concept is to trace rays along the diagonal of FOV, say // bottom left to top right passing through the pinhole center. // Then take the average of the farthest ray that is still projected inside the image // as the cropping FOV. // However, we limit the FOV upper bound to 160 degrees (for fisheye case). bool generate_cam_mask(const cv::Matx33f& K, const cv::Mat_<float>& dist_coeffs, const cv::Size& mask_size, cv::Mat_<uchar>* cam_mask, float* fov_deg) { CHECK_GT(mask_size.width, 0); CHECK_GT(mask_size.height, 0); CHECK_EQ(cam_mask->rows, 0); cam_mask->create(mask_size); cam_mask->setTo(0xff); const float theta = std::atan2(mask_size.height, mask_size.width); std::vector<int> viewable_degs = {0, 0}; for (int i = 0; i < 2; ++i) { int direction = i * 2 - 1; for (int deg = 30; deg < 90; deg += 5) { float d = std::tan(deg * M_PI / 180.f); float a = direction * d * cos(theta); float b = direction * d * sin(theta); std::vector<cv::Vec3f> ray(1); std::vector<cv::Point2f> dist_pt; ray[0] = cv::Vec3f(a, -b, 1); cv::projectPoints(ray, cv::Vec3f(), cv::Vec3f(), K, dist_coeffs, dist_pt); if (dist_pt[0].x > 0 && dist_pt[0].y > 0 && dist_pt[0].x < mask_size.width && dist_pt[0].y < mask_size.height) { viewable_degs[i] = deg; } else { break; } } } if (viewable_degs[0] == 0 || viewable_degs[1] == 0) { // FOV cannot be estimated. Return an empty mask. LOG(ERROR) << "Cannot find proper FOV to generate camera mask"; return false; } *fov_deg = viewable_degs[0] + viewable_degs[1]; if (*fov_deg > 160) { *fov_deg = 160; // cap FOV to 160 degree } float half_fov = *fov_deg / 2; float d = std::tan(half_fov * M_PI / 180.f); std::vector<cv::Vec3f> ray(1); std::vector<cv::Point2f> dist_pt; ray[0] = cv::Vec3f(d, 0, 1); cv::projectPoints(ray, cv::Vec3f(), cv::Vec3f(), K, dist_coeffs, dist_pt); cam_mask->create(mask_size); cam_mask->setTo(0xff); const int r = static_cast<int>(dist_pt[0].x - K(0, 2)); const int cx = static_cast<int>(K(0, 2)); const int cy = static_cast<int>(K(1, 2)); for (int i = 0; i < mask_size.height; ++i) { for (int j = 0; j < mask_size.width; ++j) { int rx = j - cx; int ry = i - cy; if (rx * rx + ry * ry > r * r) { (*cam_mask)(i, j) = 0x00; } } } return true; } namespace internal { struct { bool operator()(const cv::KeyPoint& a, const cv::KeyPoint& b) { return a.response > b.response; } } kp_compare; } // namespace internal bool detect_orb_features(const cv::Mat& img_in_smooth, const cv::Mat_<uchar>& mask, int request_feat_num, int pyra_level, // Total pyramid levels, including the base image int fast_thresh, bool use_fast, // or TomasShi int enforce_uniformity_radius, // less than 0 means no enforcement std::vector<cv::KeyPoint>* key_pnts_ptr, cv::Mat* orb_feat_ptr, FeatureTrackDetector* feat_track_detector, float refine_harris_threshold) { CHECK_GT(pyra_level, 0); // Only detect feature at det_pyra_level, and then look for refined corner position at level 0. // For now, all the features are *refined* to pyramid0 as octave is 0 for all detected features // ORBextractor runs detection at det_pyra_level with levels orb_pyra_levels = 1, which means // no multi-pyramids within ORBextractor. // TODO(mingyu): Pre-compute the pyramids if needed to avoid duplicate computation when calling // multiple times in vio_mapper const int det_pyra_level = pyra_level - 1; const int compress_ratio = 1 << det_pyra_level; vector<cv::Mat> img_pyramids(pyra_level); vector<cv::Mat_<uchar>> mask_pyramids(pyra_level); img_pyramids[0] = img_in_smooth; mask_pyramids[0] = mask; for (int i = 1; i < pyra_level; i++) { img_pyramids[i] = fast_pyra_down(img_pyramids[i - 1]); mask_pyramids[i] = fast_mask_pyra_down(mask_pyramids[i - 1]); } // TODO(mingyu): Reduce more_points_ratio as it seems too conservative. float more_points_ratio = 1.5; // because refinement may reduce points number enforce_uniformity_radius = enforce_uniformity_radius >> det_pyra_level; if (enforce_uniformity_radius > 5) { more_points_ratio = 3; // hueristic fast_thresh /= 2; // usually its set to be 10 - 20 } std::vector<std::vector<cv::KeyPoint>> kp_in_pyramids(pyra_level); constexpr int orb_pyra_levels = 1; // The pyramid levels used in ORBextractor // [NOTE] We need the score (keypoint.response) computed as harris score instead // of FAST. It's slower but more discriminative to sort / refine keypoints. ORBextractor orb(request_feat_num * more_points_ratio, 2, orb_pyra_levels, ORBextractor::HARRIS_SCORE, fast_thresh, use_fast); orb.detect(img_pyramids[det_pyra_level], mask_pyramids[det_pyra_level], &kp_in_pyramids[det_pyra_level]); #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(1) << "Before uniformaty check ORBextractor gets " << kp_in_pyramids[det_pyra_level].size() << " pnts from " << " level = " << det_pyra_level << " mask_pyramids[" << det_pyra_level << "].size " << mask_pyramids[det_pyra_level].size(); if (mask_pyramids[det_pyra_level].rows > 0) { for (auto& kp : kp_in_pyramids[det_pyra_level]) { if (mask_pyramids[det_pyra_level](kp.pt.y, kp.pt.x) == 0x00) { LOG(FATAL) << "mask_pyramids[" << det_pyra_level << "]" << kp.pt << " = 0x00"; } } } #endif if (enforce_uniformity_radius > 5 && kp_in_pyramids[det_pyra_level].size() > 1) { // Copied from scale-space-layer-inl.h // Basically, this weight_LUT can suppress at most a radius of 15 pixels. In pracice, // it is possible to see features that are 2 to 4 pixels apart without being suppressed. cv::Mat weight_LUT = cv::Mat::zeros(2 * 16 - 1, 2 * 16 - 1, CV_32F); for (int x = 0; x < 2 * 16 - 1; ++x) { for (int y = 0; y < 2 * 16 - 1; ++y) { weight_LUT.at<float>(y, x) = std::max(1 - static_cast<float>((15 - x) * (15 - x) + (15 - y) * (15 - y)) / static_cast<float>(15 * 15), 0.f); } } vector<brisk::ScoreCalculator<float>::PointWithScore> points; points.resize(kp_in_pyramids[det_pyra_level].size()); for (size_t i = 0; i < kp_in_pyramids[det_pyra_level].size(); i++) { points[i].x = kp_in_pyramids[det_pyra_level][i].pt.x; points[i].y = kp_in_pyramids[det_pyra_level][i].pt.y; points[i].score = kp_in_pyramids[det_pyra_level][i].response; } // TODO(mingyu): Implement a simple minded binary mask instead of using weighted mask // TODO(mingyu): Rewrite XpEnforceKeyPointUniformity to take vector of cv::KeyPoint directly XpEnforceKeyPointUniformity(weight_LUT, enforce_uniformity_radius, img_pyramids[det_pyra_level].rows, img_pyramids[det_pyra_level].cols, request_feat_num, points); kp_in_pyramids[det_pyra_level].clear(); kp_in_pyramids[det_pyra_level].reserve(points.size()); for (const auto& pnt_and_score : points) { cv::KeyPoint kp; kp.pt.x = pnt_and_score.x; kp.pt.y = pnt_and_score.y; kp.response = pnt_and_score.score; // brisk score here // hueristics: 12, 18, 24, 36, etc. // We choose 12 here (for octave 0) to match the brisk detector. kp.size = 12; kp_in_pyramids[det_pyra_level].push_back(kp); } // Compute orientation only when orb descriptors are requested. // Copied from ORBextractor.cc if (orb_feat_ptr != nullptr) { vector<int> umax; constexpr int HALF_PATCH_SIZE = 15; umax.resize(HALF_PATCH_SIZE + 1); int v, v0, vmax = cvFloor(HALF_PATCH_SIZE * std::sqrt(2.f) / 2 + 1); int vmin = cvCeil(HALF_PATCH_SIZE * std::sqrt(2.f) / 2); const double hp2 = HALF_PATCH_SIZE * HALF_PATCH_SIZE; for (v = 0; v <= vmax; ++v) { umax[v] = cvRound(std::sqrt(hp2 - v * v)); } // Make sure we are symmetric for (v = HALF_PATCH_SIZE, v0 = 0; v >= vmin; --v) { while (umax[v0] == umax[v0 + 1]) ++v0; umax[v] = v0; ++v0; } ORBextractor::computeOrientation(img_pyramids[det_pyra_level], umax, &kp_in_pyramids[det_pyra_level]); } } #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(1) << "After uniformity ORBextractor gets " << kp_in_pyramids[det_pyra_level].size() << " pnts from " << " level = " << det_pyra_level; if (VLOG_IS_ON(4)) { cv::Mat small_debug = img_pyramids[det_pyra_level].clone(); for (const auto& kp : kp_in_pyramids[det_pyra_level]) { small_debug.at<uchar>(kp.pt.y, kp.pt.x) = 0xff; VLOG(2) << "kp " << kp.pt << " at level " << det_pyra_level << " score " << kp.response; } cv::imwrite("/tmp/img_level_" + boost::lexical_cast<std::string>(det_pyra_level) + ".png", small_debug); } #endif // Refine the corner response. Push keypoints from higher pyramids to pyramid 0. // Look for the corner with the highest harris response. // If a kp in small img has weak response in large img, it will be dumped. // [NOTE] This operation IGNORES and OVERWRITES existing keypoints in lower pyramids if any! for (int it_pyra = det_pyra_level; it_pyra > 0; it_pyra--) { refine_kp_in_larger_img(img_pyramids[it_pyra - 1], kp_in_pyramids[it_pyra], &kp_in_pyramids[it_pyra - 1]); CHECK_GE(kp_in_pyramids[it_pyra].size(), kp_in_pyramids[it_pyra - 1].size()); #ifndef __FEATURE_UTILS_NO_DEBUG__ if (VLOG_IS_ON(4)) { cv::Mat small_debug = img_pyramids[it_pyra - 1].clone(); for (const auto& kp : kp_in_pyramids[it_pyra - 1]) { small_debug.at<uchar>(kp.pt.y, kp.pt.x) = 0xff; VLOG(2) << "kp " << kp.pt << " at level " << it_pyra - 1; } cv::imwrite("/tmp/img_level_" + boost::lexical_cast<std::string>(it_pyra - 1) + ".png", small_debug); } #endif } // The original pattern bit after rotation can exceed half-window size(16), 17, or 18. // We set feat_half_size to 20 here to keep the KeyPoint away from // the possible dangerous place (see the code below) before extracting ORB descriptors. // The feat_half_size is large enough to satisfy the harris margin even after moving up // one pyramid level to within_bound_kps_small. const int feat_half_size = 20; // 20 pixels at pyramid 0 std::vector<cv::KeyPoint> within_bound_kps; std::vector<cv::KeyPoint> within_bound_kps_small; within_bound_kps.reserve(kp_in_pyramids[0].size()); within_bound_kps.reserve(kp_in_pyramids[0].size()); for (const auto& kp : kp_in_pyramids[0]) { if (kp.pt.x > feat_half_size && kp.pt.y > feat_half_size && kp.pt.x < img_in_smooth.cols - feat_half_size && kp.pt.y < img_in_smooth.rows - feat_half_size) { within_bound_kps.push_back(kp); within_bound_kps_small.push_back(cv::KeyPoint(kp.pt / 2, kp.size)); // default response is 0 } } CHECK_EQ(within_bound_kps.size(), within_bound_kps_small.size()); // Check the harris response at pyramid1 (match the behavior in propagate_with_optical_flow, // and remove the corners with weak responses. // Note that the default response value in within_bound_kps_small is 0. // TODO(mingyu): Refactor the code to re-use the harris response computed earlier. if (refine_harris_threshold > 0) { cv::Mat img_in_smooth_small = (pyra_level == 1) ? fast_pyra_down(img_pyramids[0]) : img_pyramids[1]; ORBextractor::HarrisResponses(img_in_smooth_small, 7, 0.04f, &within_bound_kps_small); } else { // Keep the default 0 response in within_bound_kps_small. } // Fill in key_pnts_ptr CHECK_NOTNULL(key_pnts_ptr); key_pnts_ptr->clear(); if (request_feat_num > within_bound_kps.size()) { key_pnts_ptr->reserve(within_bound_kps.size()); } else { key_pnts_ptr->reserve(request_feat_num); // TODO(mingyu): This sort is dummy as XpEnforceUniformity has already sorted. std::sort(within_bound_kps.begin(), within_bound_kps.end(), internal::kp_compare); } #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(2) << "request_feat_num = " << request_feat_num << " within_bound_kps.size() = " << within_bound_kps.size(); #endif for (int i = 0, count = 0; count < request_feat_num && i < within_bound_kps.size(); ++i) { if (within_bound_kps_small[i].response < refine_harris_threshold) { #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(2) << "within_bound[" << i << "].response = " << within_bound_kps_small[i].response << " < thres = " << refine_harris_threshold; #endif continue; } cv::KeyPoint& kp = within_bound_kps[i]; if (feat_track_detector) { kp.class_id = feat_track_detector->add_new_feature_track(kp.pt); } else { kp.class_id = -1; // Feature track is NOT used. } #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(2) << "kps[" << count << "] = within_bound[" << i << "] id = " << kp.class_id << " response: " << within_bound_kps_small[i].response; #endif // TODO(mingyu): use 0 as we have refined to pyra0 or use the real octave at detection kp.octave = 0; key_pnts_ptr->push_back(kp); ++count; } // get orb if (orb_feat_ptr != nullptr) { // detect orb at pyra 0 // since orb radius is 15, which is alraedy pretty big #ifdef __ARM_NEON__ ORBextractor::computeDescriptorsN512(img_in_smooth, *key_pnts_ptr, orb_feat_ptr); #else ORBextractor::computeDescriptors(img_in_smooth, *key_pnts_ptr, orb_feat_ptr); #endif } return true; } bool detect_harris_features( const cv::Mat& img_in_smooth, const cv::Mat_<uchar> mask, int request_feat_num, int pyra_level, int fast_thresh, std::vector<cv::KeyPoint>* key_pnts_ptr, cv::Mat* orb_feat_ptr ) { CHECK_NOTNULL(key_pnts_ptr); CHECK_NOTNULL(orb_feat_ptr); ORBextractor orb(request_feat_num, 2, pyra_level, 1, fast_thresh); orb.detect(img_in_smooth, mask, key_pnts_ptr, orb_feat_ptr); return true; } SlaveImgFeatureDetector::SlaveImgFeatureDetector(int block_size, DetectSlaveFeatureType method, float min_feature_distance_over_baseline_ratio, float max_feature_distance_over_baseline_ratio) : block_size_(block_size), half_block_size_(block_size / 2), method_(method), min_feature_distance_over_baseline_ratio_(min_feature_distance_over_baseline_ratio), max_feature_distance_over_baseline_ratio_(max_feature_distance_over_baseline_ratio) { if (block_size_ % 4 != 0) { LOG(FATAL) << "block_size = " << block_size; } gaussion_weights_ = new float[block_size * block_size]; gaussion_weight_sum_ = 0; for (int v = - half_block_size_; v < half_block_size_; ++v) { for (int u = - half_block_size_; u < half_block_size_; ++u) { const float w = std::exp(- static_cast<float>(u * u + v * v) / 9.f); gaussion_weights_[(v + half_block_size_) * block_size + u + half_block_size_] = w; gaussion_weight_sum_ += w; } } } SlaveImgFeatureDetector::~SlaveImgFeatureDetector() { delete [] gaussion_weights_; } bool SlaveImgFeatureDetector::detect_features_on_slave_img_helper( const cv::Mat& master_image, const cv::Mat& slave_image, const DuoCalibParam& duo_calib_param, int max_pixel_val_diff, int master_x, int master_y, const cv::Mat_<float>& s_R_m, const cv::Mat_<float>& s_t_m, const cv::Mat_<float>& pnt_master, const cv::Mat_<float>& search_range, #ifdef DEBUG_DETECT_FEATURES_ON_SLAVE_IMG cv::Mat* slave_image_debug_ptr, #endif int* min_patch_diff2_ptr, int* second_min_patch_diff2_ptr, int* best_slave_x_ptr, int* best_slave_y_ptr, int* best_search_dist_id_ptr, int* second_best_search_dist_id_ptr) { const int scale = 2; // since we compute diff in pyr level 1, we use block_size_ rather than half_block_size_ as radius // orb desc needs 16 pixels margin const int img_margin = (half_block_size_ * scale) > 16 ? (half_block_size_ * scale) : 16; // since we don't know where this point is, try to move it to the furthest dis cv::Mat pnts_slave = search_range * pnt_master.t() * s_R_m.t() + search_range.ones(search_range.size()) * s_t_m.t(); cv::Mat pixels_slave; #ifndef __FEATURE_UTILS_NO_DEBUG__ CHECK_EQ(pnts_slave.type(), CV_32F); CHECK_GT(pnts_slave.checkVector(3), 0); CHECK_EQ(pnts_slave.depth(), CV_32F); CHECK_GT(pnts_slave.rows, search_range.cols); int avg_diff_count = 0; int early_break_count = 0; int candidate_count = 0; int find_count = 0; int skip_count = 0; #endif cv::projectPoints(pnts_slave, cv::Matx31d(), cv::Matx31d(), duo_calib_param.Camera.cv_camK_lr[1], duo_calib_param.Camera.cv_dist_coeff_lr[1], pixels_slave); #ifndef __FEATURE_UTILS_NO_DEBUG__ CHECK_EQ(pixels_slave.type(), CV_32FC2); CHECK_EQ(pixels_slave.cols, 1); CHECK_EQ(pixels_slave.rows, search_range.rows); #endif const int max_patch_val_diff = gaussion_weight_sum_ * max_pixel_val_diff; // so second_min_patch_val_diff2 will be assigned to this val for the first time int min_patch_val_diff = max_patch_val_diff * 10; int second_min_patch_val_diff = min_patch_val_diff * 10; const int slave_col = slave_image.step1(); const int master_col = master_image.step1(); #ifndef __FEATURE_UTILS_NO_DEBUG__ CHECK_EQ(slave_col, master_col); #endif // cache float master_sum = 0; for (int v = - half_block_size_; v < half_block_size_; ++v) { for (int u = - half_block_size_; u < half_block_size_; ++u) { const float w = gaussion_weights_[(v + half_block_size_)* block_size_ + u + half_block_size_]; master_sum += w * static_cast<float>(master_image.data[(master_y + v * scale) * master_col + master_x + u * scale]); } } float master_avg = master_sum / gaussion_weight_sum_; #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(2) << "master_avg " << master_avg << " max_pixel_val_diff " << max_pixel_val_diff; #endif int best_slave_x = 0; int best_slave_y = 0; int best_search_dist_id = -1; int second_best_search_dist_id = -1; // during fine search stage, there may be sample points having the same xy. Quickly jump over int pre_slave_x = -1, pre_slave_y = -1; for (int it_slave = 0; it_slave < pixels_slave.rows; ++it_slave) { int x = std::roundf(pixels_slave.at<cv::Vec2f>(it_slave)[0] + 0.5); int y = std::roundf(pixels_slave.at<cv::Vec2f>(it_slave)[1] + 0.5); if (x == pre_slave_x && y == pre_slave_y) { #ifndef __FEATURE_UTILS_NO_DEBUG__ ++skip_count; #endif continue; } pre_slave_x = x; pre_slave_y = y; if (x >= img_margin && y >= img_margin && x < slave_image.cols - img_margin && y < slave_image.rows - img_margin) { #ifdef DEBUG_DETECT_FEATURES_ON_SLAVE_IMG slave_image_debug_ptr->at<uchar>(y, x) = 0xff; ++candidate_count; #endif // compute patch avg float slave_sum = 0; for (int v = - half_block_size_; v < half_block_size_; ++v) { for (int u = - half_block_size_; u < half_block_size_; ++u) { const float w = gaussion_weights_[(v + half_block_size_) * block_size_ + u + half_block_size_]; slave_sum += w * static_cast<int>(slave_image.data[(y + v * scale) * slave_col + x + u * scale]); } } float slave_avg = slave_sum / gaussion_weight_sum_; #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(2) << "it_slave " << it_slave << " slave_avg " << slave_avg << " slave x y [" << x << ", " << y << "]"; #endif // allow avg value diff 2x max pixel diff since the exposure of left and right cam // may be very different if (master_avg - slave_avg > max_pixel_val_diff * 2 || master_avg - slave_avg < - max_pixel_val_diff * 2) { #ifndef __FEATURE_UTILS_NO_DEBUG__ avg_diff_count++; #endif continue; } // compute patch value diff float patch_diff = 0; for (int v = - half_block_size_; v < half_block_size_; ++v) { for (int u = - half_block_size_; u < half_block_size_; ++u) { int slave_val = static_cast<int>(slave_image.data[(y + v * scale) * slave_col + x + u * scale]); int master_val = static_cast<int>(master_image.data[(master_y + v * scale) * master_col + master_x + u * scale]); int diff = (master_val - master_avg) - (slave_val - slave_avg); const float w = gaussion_weights_[(v + half_block_size_)* block_size_ + u + half_block_size_]; patch_diff += std::abs(diff) * w; } if (patch_diff > min_patch_val_diff) { #ifndef __FEATURE_UTILS_NO_DEBUG__ early_break_count++; #endif break; } } if (patch_diff < min_patch_val_diff) { #ifndef __FEATURE_UTILS_NO_DEBUG__ find_count++; #endif second_min_patch_val_diff = min_patch_val_diff; min_patch_val_diff = patch_diff; best_slave_x = x; best_slave_y = y; second_best_search_dist_id = best_search_dist_id; best_search_dist_id = it_slave; } } } if (min_patch_diff2_ptr != nullptr) { *min_patch_diff2_ptr = min_patch_val_diff; } if (second_min_patch_diff2_ptr != nullptr) { *second_min_patch_diff2_ptr = second_min_patch_val_diff; } #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(2) << " avg_diff_count " << avg_diff_count << " skip_count " << skip_count << " early_break_count " << early_break_count << " candidate_count " << candidate_count << " find_count " << find_count; #endif if (min_patch_val_diff > max_patch_val_diff) { #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(2) << " min_patch_val_diff = " << min_patch_val_diff << " max_patch_val_diff " << max_patch_val_diff; #endif return false; } if (best_slave_x_ptr != nullptr) { *best_slave_x_ptr = best_slave_x; } if (best_slave_y_ptr != nullptr) { *best_slave_y_ptr = best_slave_y; } if (best_search_dist_id_ptr != nullptr) { *best_search_dist_id_ptr = best_search_dist_id; } if (second_best_search_dist_id_ptr != nullptr) { *second_best_search_dist_id_ptr = second_best_search_dist_id; } return true; } bool SlaveImgFeatureDetector::detect_features_on_slave_img( const cv::Mat& master_image, const cv::Mat& slave_image, const std::vector<cv::KeyPoint>& master_kps, const DuoCalibParam& duo_calib_param, std::vector<cv::KeyPoint>* slave_kps_ptr, cv::Mat* slave_orb_feat_ptr, int max_pixel_val_diff) { if (master_kps.empty()) { // nothing bad happen return true; } if (master_image.type() != CV_8U) { LOG(ERROR) << "master_image.type() " << master_image.type(); return false; } if (slave_image.type() != CV_8U) { LOG(ERROR) << "slave_image.type() " << slave_image.type(); return false; } #ifdef __FEATURE_UTILS_NO_DEBUG__ CHECK_EQ(master_image.cols, master_image.step1()); CHECK_EQ(slave_image.cols, slave_image.step1()); #endif std::vector<cv::Point2f> master_pnts(master_kps.size()); for (size_t i = 0; i < master_kps.size(); ++i) { master_pnts[i] = master_kps[i].pt; } std::vector<cv::Point2f> master_pnts_undistorted; cv::undistortPoints(master_pnts, master_pnts_undistorted, duo_calib_param.Camera.cv_camK_lr[0], duo_calib_param.Camera.cv_dist_coeff_lr[0]); Eigen::Matrix4f s_T_m = duo_calib_param.Camera.D_T_C_lr[1].inverse() * duo_calib_param.Camera.D_T_C_lr[0]; cv::Mat_<float> s_R_m(3, 3); cv::Mat_<float> s_t_m(3, 1); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { s_R_m(i, j) = s_T_m(i, j); } s_t_m(i) = s_T_m(i, 3); } const float cam_baseline_dis = s_T_m.topRightCorner<3, 1>().norm(); // coarse search min_search_range x cam_baseline_dis -> max_search_range x cam_baseline_dis // Note: if min_search_range < 3, it greatly increases the search range in image // which reduces speed and increases the likelyhood of false alarm constexpr int range_bin_num = 25; cv::Mat_<float> coarse_search_range(range_bin_num, 1); // in the inverse depth space, uniformally generate [min_range, max_range] search range // i = 0 -> min_range // i = (range_bin_num - 1) -> max_range // ask sid for the math const float tmp_x = (max_feature_distance_over_baseline_ratio_ / min_feature_distance_over_baseline_ratio_ - 1.f) / (range_bin_num - 1); const float tmp_y = 1 - tmp_x; for (int i = 0; i < coarse_search_range.rows; ++i) { coarse_search_range(i) = max_feature_distance_over_baseline_ratio_ * cam_baseline_dis / ((range_bin_num - i) * tmp_x + tmp_y); } #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(2) << "coarse_search_range " << coarse_search_range.t(); #endif #ifdef DEBUG_DETECT_FEATURES_ON_SLAVE_IMG cv::Mat slave_image_debug = slave_image.clone(); #endif CHECK_NOTNULL(slave_kps_ptr); slave_kps_ptr->clear(); slave_kps_ptr->reserve(master_pnts_undistorted.size()); for (size_t it_master = 0; it_master < master_pnts_undistorted.size(); ++it_master) { const int master_x = std::roundf(master_pnts[it_master].x + 0.5); const int master_y = std::roundf(master_pnts[it_master].y + 0.5); if (master_x < half_block_size_ || master_y < half_block_size_ || master_x >= master_image.cols - half_block_size_ || master_y >= master_image.rows - half_block_size_) { continue; } const auto& kp_master_undist = master_pnts_undistorted[it_master]; cv::Mat_<float> pnt_master(3, 1); // coarse search pnt_master(0) = kp_master_undist.x; pnt_master(1) = kp_master_undist.y; pnt_master(2) = 1; int min_patch_diff2 = 99999; int second_min_patch_diff2 = 99999; int best_search_dist_id = -1; int second_best_search_dist_id = -1; #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(2) << "before coarse det it_master " << it_master << " kp id " << master_kps[it_master].class_id << " master pixel [" << master_x << ", " << master_y << "]"; #endif if (!detect_features_on_slave_img_helper(master_image, slave_image, duo_calib_param, max_pixel_val_diff * 2, // coarse master_x, master_y, s_R_m, s_t_m, pnt_master, coarse_search_range, #ifdef DEBUG_DETECT_FEATURES_ON_SLAVE_IMG &slave_image_debug, #endif &min_patch_diff2, &second_min_patch_diff2, nullptr, // &best_slave_x, nullptr, // &best_slave_y, &best_search_dist_id, &second_best_search_dist_id)) { #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(2) << "coarse det failed min_patch_diff2 " << min_patch_diff2; #endif continue; } #ifdef DEBUG_DETECT_FEATURES_ON_SLAVE_IMG CHECK_GE(best_search_dist_id, 0); // cv::circle(slave_image_debug, cv::Point2i(best_slave_x, best_slave_y), 8, 255); #endif // threshold test if (second_min_patch_diff2 * 2 < min_patch_diff2 * 3) { #ifndef __FEATURE_UTILS_NO_DEBUG__ CHECK_GE(second_best_search_dist_id, 0); #endif // if the sampled pos are dense, a couple of samples may get close to the true position // So they all have low pixel diff values, which should not be discouraged. if (second_best_search_dist_id > best_search_dist_id + 1 || second_best_search_dist_id < best_search_dist_id - 1) { #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(2) << "thresh test failed min_patch_diff2 / second_min_patch_diff2 " << min_patch_diff2 << " / " << second_min_patch_diff2 << " = " << static_cast<float>(min_patch_diff2) / second_min_patch_diff2 << " best id / second id " << best_search_dist_id << " / " << second_best_search_dist_id; #endif continue; } } // fine search float min_search_dis = coarse_search_range(0); if (best_search_dist_id > 0) { min_search_dis = coarse_search_range(best_search_dist_id - 1); } float max_search_dis = coarse_search_range(coarse_search_range.rows - 1); if (best_search_dist_id < coarse_search_range.rows - 1) { max_search_dis = coarse_search_range(best_search_dist_id + 1); } cv::Mat_<float> search_range_fine(10, 1); // Do not use uniform distance // Solve the following equation // min_search_dis = X / (Y + search_range_fine.rows) // max_search_dis = X / (Y + 1) // -> // X = max_search_dis * Y + max_search_dis // -> // min_search_dis * Y + min_search_dis * search_range_fine.rows // = max_search_dis * Y + max_search_dis // -> const float Y = (max_search_dis - min_search_dis * search_range_fine.rows) / (min_search_dis - max_search_dis); const float X = max_search_dis * Y + max_search_dis; for (int i = 0; i < search_range_fine.rows; ++i) { search_range_fine(i) = X / (Y + search_range_fine.rows - i); } #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(2) << "fine det it_master " << it_master << " coarse det min_patch_diff2 " << min_patch_diff2 << " best_search_dist_id " << best_search_dist_id << " search range " << search_range_fine(0) << " " << search_range_fine(1) << " .. " << search_range_fine(8) << " " << search_range_fine(9); #endif int best_slave_x, best_slave_y; if (!detect_features_on_slave_img_helper(master_image, slave_image, duo_calib_param, max_pixel_val_diff, master_x, master_y, s_R_m, s_t_m, pnt_master, search_range_fine, #ifdef DEBUG_DETECT_FEATURES_ON_SLAVE_IMG &slave_image_debug, #endif &min_patch_diff2, nullptr, // second_min_patch_diff2 &best_slave_x, &best_slave_y, nullptr /* best_search_dist_id */, nullptr /* second_best_search_dist_id */)) { #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(2) << "fine det failed. min_patch_diff2 " << min_patch_diff2 << " > " << gaussion_weight_sum_ * max_pixel_val_diff; #endif continue; } CHECK_GE(best_search_dist_id, 0); #ifdef DEBUG_DETECT_FEATURES_ON_SLAVE_IMG cv::circle(slave_image_debug, cv::Point2i(best_slave_x, best_slave_y), 8, 255); cv::putText(slave_image_debug, boost::lexical_cast<std::string>(master_kps[it_master].class_id), cv::Point2i(best_slave_x, best_slave_y), cv::FONT_HERSHEY_PLAIN, 1, 255, 1); #endif // push this good point cv::KeyPoint kp_slave = master_kps[it_master]; kp_slave.pt.x = best_slave_x; kp_slave.pt.y = best_slave_y; slave_kps_ptr->push_back(kp_slave); } #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(1) << "master kp # " << master_kps.size() << " slave kp # " << slave_kps_ptr->size(); #ifdef DEBUG_DETECT_FEATURES_ON_SLAVE_IMG cout << "write to /tmp/slave_image_debug.png" << endl; cv::imwrite("/tmp/slave_image_debug.png", slave_image_debug); #endif #endif if (slave_kps_ptr->empty()) { return false; } // compute desc if (slave_orb_feat_ptr != nullptr) { slave_orb_feat_ptr->create(slave_kps_ptr->size(), 32, CV_8U); #ifdef __ARM_NEON__ ORBextractor::computeDescriptorsN512( slave_image, *slave_kps_ptr, slave_orb_feat_ptr); #else ORBextractor::computeDescriptors(slave_image, *slave_kps_ptr, slave_orb_feat_ptr); #endif } return true; } vector<vector<cv::DMatch> > neon_orb_match( const cv::Mat& desc_query, const cv::Mat& matching_mask, const cv::Mat& orb_desc_training) { vector<vector<cv::DMatch>> matches; matches.resize(desc_query.rows); CHECK_EQ(orb_desc_training.step1(), 32); for (int it_query = 0; it_query < desc_query.rows; ++it_query) { int d1 = 256; int d2 = 256; int trainIdx1 = -1; int trainIdx2 = -1; #ifdef __ARM_NEON__ // const unsigned char *a = desc_query.ptr<unsigned char>(); const unsigned char *a = desc_query.data + it_query * 32; for (int it_orb_this_rig = 0; it_orb_this_rig < orb_desc_training.rows; ++it_orb_this_rig) { if (matching_mask.at<uchar>(it_query, it_orb_this_rig) == 0x00) { continue; } // copied from OpenCV // .row is very slow // const unsigned char *b = orb_desc_training.row(it_orb_this_rig).ptr<unsigned char>(); const unsigned char *b = orb_desc_training.data + it_orb_this_rig * 32; uint32x4_t bits = vmovq_n_u32(0); for (size_t i = 0; i < 32; i += 16) { uint8x16_t A_vec = vld1q_u8(a + i); uint8x16_t B_vec = vld1q_u8(b + i); uint8x16_t AxorB = veorq_u8(A_vec, B_vec); uint8x16_t bitsSet = vcntq_u8(AxorB); uint16x8_t bitSet8 = vpaddlq_u8(bitsSet); uint32x4_t bitSet4 = vpaddlq_u16(bitSet8); bits = vaddq_u32(bits, bitSet4); } uint64x2_t bitSet2 = vpaddlq_u32(bits); int dist = vgetq_lane_s32(vreinterpretq_s32_u64(bitSet2), 0); dist += vgetq_lane_s32(vreinterpretq_s32_u64(bitSet2), 2); // float dist_float = float(dist) / 255.f; if (d1 > dist) { trainIdx2 = trainIdx1; d2 = d1; trainIdx1 = it_orb_this_rig; d1 = dist; } else if (d2 > dist) { trainIdx2 = it_orb_this_rig; d2 = dist; } } #else LOG(FATAL) << "neon_orb_match is called without __ARM_NEON__"; #endif matches[it_query].resize(2); matches[it_query][0].distance = d1; matches[it_query][0].trainIdx = trainIdx1; matches[it_query][1].distance = d2; matches[it_query][1].trainIdx = trainIdx2; } return matches; } // only match once (knn=1) vector<vector<cv::DMatch> > neon_orb_match_nn(const cv::Mat& desc_query, const cv::Mat& matching_mask, const cv::Mat& orb_desc_training) { vector<vector<cv::DMatch>> matches; matches.resize(desc_query.rows); CHECK_EQ(desc_query.step1(), 32); CHECK_EQ(orb_desc_training.step1(), 32); for (int it_query = 0; it_query < desc_query.rows; ++it_query) { int d1 = 256; int trainIdx1 = -1; #ifdef __ARM_NEON__ // const unsigned char *a = desc_query.ptr<unsigned char>(); const unsigned char *a = desc_query.data + it_query * 32; for (int it_orb_this_rig = 0; it_orb_this_rig < orb_desc_training.rows; ++it_orb_this_rig) { if (matching_mask.at<uchar>(it_query, it_orb_this_rig) == 0x00) { continue; } // copied from OpenCV // .row is very slow // const unsigned char *b = orb_desc_training.row(it_orb_this_rig).ptr<unsigned char>(); const unsigned char *b = orb_desc_training.data + it_orb_this_rig * 32; uint32x4_t bits = vmovq_n_u32(0); for (size_t i = 0; i < 32; i += 16) { uint8x16_t A_vec = vld1q_u8(a + i); uint8x16_t B_vec = vld1q_u8(b + i); uint8x16_t AxorB = veorq_u8(A_vec, B_vec); uint8x16_t bitsSet = vcntq_u8(AxorB); uint16x8_t bitSet8 = vpaddlq_u8(bitsSet); uint32x4_t bitSet4 = vpaddlq_u16(bitSet8); bits = vaddq_u32(bits, bitSet4); } uint64x2_t bitSet2 = vpaddlq_u32(bits); int dist = vgetq_lane_s32(vreinterpretq_s32_u64(bitSet2), 0); dist += vgetq_lane_s32(vreinterpretq_s32_u64(bitSet2), 2); // float dist_float = float(dist) / 255.f; if (d1 > dist) { trainIdx1 = it_orb_this_rig; d1 = dist; } } #else LOG(FATAL) << "neon_orb_match_nn is called without __ARM_NEON__"; #endif if (trainIdx1 >= 0) { matches[it_query].resize(1); matches[it_query][0].distance = d1; matches[it_query][0].trainIdx = trainIdx1; } } return matches; } int neon_find_close_points( float query_u, float query_v, int training_num, float * training_u, float * training_v, float range_sq, cv::Mat * within_range_mask_ptr) { int in_range_count = 0; #ifdef __ARM_NEON__ float32x4_t query_u_32x4 = {query_u, query_u, query_u, query_u}; float32x4_t query_v_32x4 = {query_v, query_v, query_v, query_v}; float32x4_t range2_32x4 = {range_sq, range_sq, range_sq, range_sq}; int processed_num = 0; for (size_t i = 0; i <= training_num - 4; i += 4) { float32x4_t vecU = vld1q_f32(training_u + i); float32x4_t vecV = vld1q_f32(training_v + i); float32x4_t vecUres = vsubq_f32(vecU, query_u_32x4); float32x4_t vecVres = vsubq_f32(vecV, query_v_32x4); float32x4_t vecUres2Vres2 = vmlaq_f32(vmulq_f32(vecUres, vecUres), vecVres, vecVres); uint32x4_t within_range = vcleq_f32(vecUres2Vres2, range2_32x4); within_range_mask_ptr->at<uchar>(0, i + 0) = within_range[0] != 0; within_range_mask_ptr->at<uchar>(0, i + 1) = within_range[1] != 0; within_range_mask_ptr->at<uchar>(0, i + 2) = within_range[2] != 0; within_range_mask_ptr->at<uchar>(0, i + 3) = within_range[3] != 0; /* for (int j = 0; j < 4; j++) { LOG(ERROR) << "vecU " << float(vecU[j]) << " vecV " << float(vecV[j]) << " vecUres " << float(vecUres[j]) << " vecVres " << float(vecVres[j]) << " vecUres2Vres2 " << float(vecUres2Vres2[j]) << " query_u_32x4 " << float(query_u_32x4[j]) << " query_v_32x4 " << float(vecUres[j]) << " within_range " << (unsigned int)(within_range[j]) << " reprojDis2 " << float(reprojDis2[j]) << " matching_mask.at<uchar>(0, i + j) " << (int)(matching_mask.at<uchar>(0, i + j)); }*/ in_range_count += within_range[0] != 0; in_range_count += within_range[1] != 0; in_range_count += within_range[2] != 0; in_range_count += within_range[3] != 0; processed_num += 4; } for (size_t i = processed_num; i < training_num; i++) { const float distance2 = (query_u - training_u[i]) * (query_u - training_u[i]) + (query_v - training_v[i]) * (query_v - training_v[i]); if (distance2 < range_sq) { within_range_mask_ptr->at<uchar>(0, i) = 0x01; in_range_count++; } else { within_range_mask_ptr->at<uchar>(0, i) = 0x00; } } #else LOG(FATAL) << "neon_find_close_points is called without __ARM_NEON__"; #endif return in_range_count; } // copied from HarrisScoreCalculatorFloat void OpencvHarrisScoreCalculator::Get2dMaxima(std::vector<PointWithScore>& points, // NOLINT Score_t absoluteThreshold) const { // Do the 8-neighbor nonmax suppression. const int stride = _scores.step1(); const int rows_end = _scores.rows - 2; points.reserve(4000); for (int j = 2; j < rows_end; ++j) { const float* p = &_scores.at<float>(j, 2); const float* const p_begin = p; const float* const p_end = &_scores.at<float>(j, stride - 2); bool last = false; while (p < p_end) { const float* const center = p; ++p; if (last) { last = false; continue; } if (*center < absoluteThreshold) continue; if (*(center + 1) > *center) continue; if (*(center - 1) >= *center) continue; const float* const p1 = (center + stride); const float* const p2 = (center - stride); if (*p1 > *center) continue; if (*p2 >= *center) continue; if (*(p1 + 1) > *center) continue; if (*(p1 - 1) >= *center) continue; if (*(p2 + 1) > *center) continue; if (*(p2 - 1) >= *center) continue; const int i = p - p_begin; #ifdef USE_SIMPLE_POINT_WITH_SCORE points.push_back(PointWithScore(*center, i, j - 2)); #else #error points.push_back(PointWithScore(cv::Point2i(i, j - 2), *center)); #endif } } #ifndef __FEATURE_UTILS_NO_DEBUG__ VLOG(1) << "OpencvHarrisScoreCalculator::Get2dMaxima points.size() " << points.size(); #endif } void OpencvHarrisScoreCalculator::InitializeScores() { // kappa = 1 / 16 is used by brisk harris _scores.create(_img.size(), CV_32F); cv::cornerHarris(_img, _scores, 5, 3, 0.04f); // double k, int borderType ) #ifndef __FEATURE_UTILS_NO_DEBUG__ if (VLOG_IS_ON(2)) { cv::imwrite("/tmp/harris_score.png", _scores * 10000); std::cout << "_img" << std::endl; for (int i = 95; i < 105; i++) { for (int j = 95; j < 105; j++) { std::cout << int(_img.at<uchar>(i, j)) << " "; } std::cout << std::endl; } std::cout << "harris" << std::endl; for (int i = 95; i < 105; i++) { for (int j = 95; j < 105; j++) { std::cout << _scores.at<float>(i, j) << " "; } std::cout << std::endl; } } #endif } } // namespace XP
{"hexsha": "e70149d9b4f1c95dc26082b12eb14ea2e93c985a", "size": 50283, "ext": "cc", "lang": "C++", "max_stars_repo_path": "Frontend/feature_utils.cc", "max_stars_repo_name": "zjcs/ICE-BA", "max_stars_repo_head_hexsha": "b004bb5afc0d554d49742aae8503d231213f7e6d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 606.0, "max_stars_repo_stars_event_min_datetime": "2018-04-09T07:41:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T12:48:46.000Z", "max_issues_repo_path": "Frontend/feature_utils.cc", "max_issues_repo_name": "zjcs/ICE-BA", "max_issues_repo_head_hexsha": "b004bb5afc0d554d49742aae8503d231213f7e6d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 34.0, "max_issues_repo_issues_event_min_datetime": "2018-06-19T13:21:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-06T09:56:36.000Z", "max_forks_repo_path": "Frontend/feature_utils.cc", "max_forks_repo_name": "zjcs/ICE-BA", "max_forks_repo_head_hexsha": "b004bb5afc0d554d49742aae8503d231213f7e6d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 226.0, "max_forks_repo_forks_event_min_datetime": "2018-03-19T04:15:21.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T03:02:09.000Z", "avg_line_length": 40.682038835, "max_line_length": 100, "alphanum_fraction": 0.631923314, "num_tokens": 14121}
using Printf using Random using LinearAlgebra using Distributed using MAT @everywhere using DistributedArrays @everywhere using RCAM @everywhere using Random @everywhere Random.seed!(123) #Load the data fid = matopen("../data/X.mat") d = read(fid) X = d["X"] function pMNtest(X) # Choltest is just a wrapper for this script # Params r = 4 #Rank alt = 5 #Iterations perc = 0.8 #Percent of missing values s1,s2 = size(X) # Subsample ind = randperm(s1*s2) inds = ind[1:Int(round(perc*s1*s2))] b = copy(X) b[inds] .= 0. #Gen Noise N = rand(Float64,size(b)) noise = 0.5*norm(vec(X))*N/norm(vec(N)) noise[inds] .= 0. eta = norm(vec(noise))/sqrt(size(X)[1]) # Interpolate ownerL, ownerR = dclrMN(b+noise, eta, r, alt) # Gather L and R results L = fetch(@spawnat ownerL Main.L) R = fetch(@spawnat ownerR Main.R) return L,R end snr(raw,interp) = -20*log10(norm(interp-raw)/norm(raw)) L,R = pMNtest(X) println(snr(X,L*R'))
{"hexsha": "d53d1be418541b7ce2702093e8da6878998da439", "size": 925, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/pMNsimpletest.jl", "max_stars_repo_name": "slimgroup/RCAM.jl", "max_stars_repo_head_hexsha": "54c4ff3891087300b3e46b5643b38e8eafa2234e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/pMNsimpletest.jl", "max_issues_repo_name": "slimgroup/RCAM.jl", "max_issues_repo_head_hexsha": "54c4ff3891087300b3e46b5643b38e8eafa2234e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/pMNsimpletest.jl", "max_forks_repo_name": "slimgroup/RCAM.jl", "max_forks_repo_head_hexsha": "54c4ff3891087300b3e46b5643b38e8eafa2234e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.7884615385, "max_line_length": 55, "alphanum_fraction": 0.6897297297, "num_tokens": 307}
import numpy as np import matplotlib.pyplot as plt from seaborn import kdeplot import matplotlib.patheffects as mpe import utils from sklearn.metrics import precision_score, recall_score, roc_auc_score, label_ranking_average_precision_score from sklearn.metrics import label_ranking_loss, confusion_matrix, average_precision_score, auc, precision_recall_curve from scipy.stats import norm from tqdm import tqdm ##Set plotting parameters: utils.set_mpl_params() import pandas as pd df = pd.read_csv('./processed_data/replicate_AVE/results.csv') aves = df['ave'] ap = df['ap'] mcc = df['mcc'] ef = df['ef'] auroc = df['auroc'] fig, ax = plt.subplots(2,2) fig.set_figheight(8) fig.set_figwidth(12) metrics = [auroc, ap, mcc, ef] labels = ['A.', 'B.', 'C.', 'D.'] names = ['AUROC', 'Average precision', 'Matthews correlation\ncoefficient' ,'Enrichment factor'] lims = [[-0.05,1.05], [-0.05,1.05], [-0.05,1.05], [-0.5,21]] for a, metric, label, name, lim in zip(ax.flatten(), metrics, labels, names, lims): a.scatter(aves, metric) a.set_ylabel(name) a.set_xlabel('AVE') a.grid() a.set_ylim(lim) utils.plot_fig_label(a, label) a.axvline(0, linestyle= '--', c='k') fig.savefig('./processed_data/replicate_AVE/rep_AVE.png') fig.savefig('./processed_data/replicate_AVE/rep_AVE.tif') plt.close(fig)
{"hexsha": "bb171d2fd37871054d4614432eddd33473fec20a", "size": 1335, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/replicate_AVE_figures.py", "max_stars_repo_name": "ljmartin/fp_generalizability", "max_stars_repo_head_hexsha": "091a34a6f19f68cc6245345083dc4c15fcbbcbfc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-08T13:27:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-14T02:16:02.000Z", "max_issues_repo_path": "code/replicate_AVE_figures.py", "max_issues_repo_name": "ljmartin/fp_generalizability", "max_issues_repo_head_hexsha": "091a34a6f19f68cc6245345083dc4c15fcbbcbfc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/replicate_AVE_figures.py", "max_forks_repo_name": "ljmartin/fp_generalizability", "max_forks_repo_head_hexsha": "091a34a6f19f68cc6245345083dc4c15fcbbcbfc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-27T01:14:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-27T01:14:58.000Z", "avg_line_length": 26.1764705882, "max_line_length": 118, "alphanum_fraction": 0.7161048689, "include": true, "reason": "import numpy,from scipy", "num_tokens": 385}
[STATEMENT] lemma sumset_empty [simp]: "sumset A {} = {}" "sumset {} A = {}" [PROOF STATE] proof (prove) goal (1 subgoal): 1. sumset A {} = {} &&& sumset {} A = {} [PROOF STEP] by (auto simp: sumset_eq)
{"llama_tokens": 86, "file": "Pluennecke_Ruzsa_Inequality_Pluennecke_Ruzsa_Inequality", "length": 1}
# coding=utf-8 """mathematical algorithms for the particle pusher, Leapfrog and Boris""" import numpy as np from numba import jit, njit @jit() def boris_velocity_kick(v, eff_q, E, B, dt, eff_m): """ The velocity update portion of the Boris pusher. Updates the velocity in place so as to conserve memory. Parameters ---------- v : `numpy.ndarray` Array of velocities, of shape `(N, 3)`, `N` being the number of macroparticles eff_q : `float` The effective charge of the particles (total charge in the macroparticle) E : `numpy.ndarray` Interpolated or calculated values of the electric field. Shape `(N, 3)`. B : `numpy.ndarray` Interpolated or calculated values of the magnetic field. Shape `(N, 3)`. dt : `float` Timestep duration. eff_m : `float` The effective mass of the particles (total mass in the macroparticle) Returns ------- float The kinetic energy of the particles being pushed. """ # calculate u # gamma = 1 / () # u = v / vminus = v + eff_q * E / eff_m * dt * 0.5 # rotate to add magnetic field t = B * eff_q / eff_m * dt * 0.5 s = 2 * t / (1 + (t * t).sum(axis=1, keepdims=True)) vprime = vminus + np.cross(vminus, t) vplus = vminus + np.cross(vprime, s) v_new = vplus + eff_q * E / eff_m * dt * 0.5 energy = (v_new * v * (0.5 * eff_m)).sum() v[:] = v_new return energy @jit("f8(f8[:,:],f8,f8,f8[:,:],f8[:,:],f8,f8)") def rela_boris_velocity_kick(v, c, eff_q, E, B, dt, eff_m): """ The velocity update portion of the Boris pusher. Updates the velocity in place so as to conserve memory. Parameters ---------- v : `numpy.ndarray` Array of velocities, of shape `(N, 3)`, `N` being the number of macroparticles c : `float` The speed of light eff_q : `float` The effective charge of the particles (total charge in the macroparticle) E : `numpy.ndarray` Interpolated or calculated values of the electric field. Shape `(N, 3)`. B : `numpy.ndarray` Interpolated or calculated values of the magnetic field. Shape `(N, 3)`. dt : `float` Timestep duration. eff_m : `float` The effective mass of the particles (total mass in the macroparticle) Returns ------- float The kinetic energy of the particles being pushed. """ # calculate u v /= np.sqrt(1 - (v ** 2).sum(axis=1, keepdims=True) / c ** 2) # below eq 22 LPIC half_force = (eff_q * 0.5 / eff_m * dt) * E # eq. 21 LPIC # array of shape (N_particles, 3) # add first half of electric force # calculate uminus: initial velocity with added half impulse v += half_force # rotate to add magnetic field # this effectively takes relativistic mass into account t = B * eff_q * dt / (2 * eff_m * np.sqrt(1 + (v ** 2).sum(axis=1, keepdims=True) / c ** 2)) # u' = u- + u- x t uprime = v + np.cross(v, t) # rotate second time, by s = 2t/(1+t*t) t *= 2 t /= 1 + (t * t).sum(axis=1, keepdims=True) # u+ = u- + u' x s v += np.cross(uprime, t) # add second half of electric force v += half_force final_gamma = np.sqrt(1 + ((v ** 2).sum(axis=1, keepdims=True) / c ** 2)) v /= final_gamma total_velocity = final_gamma - 1 return total_velocity.sum() * eff_m * c ** 2 def boris_push(species, E: np.ndarray, dt: float, B: np.ndarray): """ Implements the relativistic Boris pusher. Mostly a wrapper function for the compiled version in `rela_boris_velocity_kick`. Note that velocity is updated in-place to conserve memory! Parameters ---------- species : `pythonpic.classes.Species` E : `numpy.ndarray` Interpolated or calculated values of the electric field. Shape `(N, 3)`. dt : float Timestep duration B : `numpy.ndarray` Interpolated or calculated values of the magnetic field. Shape `(N, 3)`. Returns ------- `numpy.ndarray` Updated positions, shape `(N, )`. `numpy.ndarray` Updated velocity, shape `(N, 3)`. `float` Total kinetic energy of the particles. """ energy = boris_velocity_kick(species.v, species.eff_q, E, B, dt, species.eff_m) return energy def rela_boris_push(species, E: np.ndarray, dt: float, B: np.ndarray): """ Implements the relativistic Boris pusher. Mostly a wrapper function for the compiled version in `rela_boris_velocity_kick`. Note that velocity is updated in-place to conserve memory! Parameters ---------- species : `pythonpic.classes.Species` E : `numpy.ndarray` Interpolated or calculated values of the electric field. Shape `(N, 3)`. dt : float Timestep duration B : `numpy.ndarray` Interpolated or calculated values of the magnetic field. Shape `(N, 3)`. Returns ------- `numpy.ndarray` Updated positions, shape `(N, )`. `numpy.ndarray` Updated velocity, shape `(N, 3)`. `float` Total kinetic energy of the particles. """ energy = rela_boris_velocity_kick(species.v, species.c, species.eff_q, E, B, dt, species.eff_m) return energy
{"hexsha": "f1c1ed571f2b09a0e623e9ced2f59271d1ef3d1c", "size": 5316, "ext": "py", "lang": "Python", "max_stars_repo_path": "pythonpic/algorithms/particle_push.py", "max_stars_repo_name": "StanczakDominik/PIC3", "max_stars_repo_head_hexsha": "583262cff0edfaee48b9540505bcd68983ec53ec", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2016-03-29T09:07:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-27T07:59:17.000Z", "max_issues_repo_path": "pythonpic/algorithms/particle_push.py", "max_issues_repo_name": "StanczakDominik/PIC3", "max_issues_repo_head_hexsha": "583262cff0edfaee48b9540505bcd68983ec53ec", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2017-02-14T13:27:24.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-10T19:53:03.000Z", "max_forks_repo_path": "pythonpic/algorithms/particle_push.py", "max_forks_repo_name": "StanczakDominik/PythonPIC", "max_forks_repo_head_hexsha": "583262cff0edfaee48b9540505bcd68983ec53ec", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2016-09-11T19:31:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-11T03:26:02.000Z", "avg_line_length": 33.0186335404, "max_line_length": 108, "alphanum_fraction": 0.6062829195, "include": true, "reason": "import numpy,from numba", "num_tokens": 1475}
function scatterbar3(X,Y,Z,width) %SCATTERBAR3 3-D scatter bar graph. % SCATTERBAR3(X,Y,Z,WIDTH) draws 3-D bars of height Z at locations X and Y with width WIDTH. % % X, Y and Z must be of equal size. If they are vectors, than bars are placed % in the same fashion as the SCATTER3 or PLOT3 functions. % % If they are matrices, then bars are placed in the same fashion as the SURF % and MESH functions. % % The colors of each bar read from the figure's colormap according to the bar's height. % % NOTE: For best results, you should use the 'zbuffer' renderer. To set the current % figure renderer to 'zbuffer' use the following command: % % set(gcf,'renderer','zbuffer') % % % EXAMPLE 1: % y=[1 2 3 1 2 3 1 2 3]; % x=[1 1 1 2 2 2 3 3 3]; % z=[1 2 3 6 5 4 7 8 9]; % scatterbar3(x,y,z,1) % colorbar % % % EXAMPLE 2: % [X,Y]=meshgrid(-1:0.25:1); % Z=2-(X.^2+Y.^2); % scatterbar3(X,Y,Z,0.2) % colormap(hsv) % % % EXAMPLE 3: % t=0:0.1:(2*pi); % x=cos(t); % y=sin(t); % z=sin(t); % scatterbar3(x,y,z,0.07) % By Mickey Stahl - 2/25/02 % Engineering Development Group % Aspiring Developer [r,c]=size(Z); for j=1:r, for k=1:c, if ~isnan(Z(j,k)) drawbar(X(j,k),Y(j,k),Z(j,k),width/2) end end end zlim=[min(Z(:)) max(Z(:))]; if zlim(1)>0,zlim(1)=0;end if zlim(2)<0,zlim(2)=0;end axis([min(X(:))-width max(X(:))+width min(Y(:))-width max(Y(:))+width zlim]) caxis([min(Z(:)) max(Z(:))]) function drawbar(x,y,z,width) h(1)=patch([-width -width width width]+x,[-width width width -width]+y,[0 0 0 0],'b'); h(2)=patch(width.*[-1 -1 1 1]+x,width.*[-1 -1 -1 -1]+y,z.*[0 1 1 0],'b'); h(3)=patch(width.*[-1 -1 -1 -1]+x,width.*[-1 -1 1 1]+y,z.*[0 1 1 0],'b'); h(4)=patch([-width -width width width]+x,[-width width width -width]+y,[z z z z],'b'); h(5)=patch(width.*[-1 -1 1 1]+x,width.*[1 1 1 1]+y,z.*[0 1 1 0],'b'); h(6)=patch(width.*[1 1 1 1]+x,width.*[-1 -1 1 1]+y,z.*[0 1 1 0],'b'); set(h,'facecolor','flat','FaceVertexCData',z)
{"author": "Sable", "repo": "mcbench-benchmarks", "sha": "ba13b2f0296ef49491b95e3f984c7c41fccdb6d8", "save_path": "github-repos/MATLAB/Sable-mcbench-benchmarks", "path": "github-repos/MATLAB/Sable-mcbench-benchmarks/mcbench-benchmarks-ba13b2f0296ef49491b95e3f984c7c41fccdb6d8/1420-scatterbar3/scatterbar3.m"}
module Simple using MLIR test0 = () -> begin println("---- TEST 0 ----\n") # Constructors. ctx = MLIR.IR.Context() println(ctx) loc = MLIR.IR.Location(ctx) println(loc) mod = MLIR.IR.Module(loc) println(mod) op_state = MLIR.IR.OperationState("foo", loc) println(op_state) op = MLIR.IR.Operation(op_state) println(op) reg = MLIR.IR.Region() println(reg) t = MLIR.IR.Type(ctx, "index") println(t) blk = MLIR.IR.Block(t) arg = MLIR.IR.get_arg(blk, 0) println(arg) attr = MLIR.IR.Attribute(ctx, "\"add\"") println(attr) ident = MLIR.IR.Identifier(ctx, "type") println(ident) named_attr = MLIR.IR.NamedAttribute(ident, attr) println(named_attr) end test1 = () -> begin println("\n---- TEST 1 ----\n") # Create and destroy. ctx = MLIR.IR.create_context() MLIR.IR.num_loaded_dialects(ctx) |> y -> println("Num loaded dialects: $y") MLIR.IR.num_registered_dialects(ctx) |> y -> println("Num registered dialects: $y") MLIR.IR.destroy!(ctx) end test2 = () -> begin println("\n---- TEST 2 ----\n") # Create and register standard. ctx = MLIR.IR.create_context() MLIR.IR.register_standard_dialect!(ctx) MLIR.IR.load_standard_dialect!(ctx) MLIR.IR.num_loaded_dialects(ctx) |> y -> println("Num loaded dialects: $y") MLIR.IR.num_registered_dialects(ctx) |> y -> println("Num registered dialects: $y") MLIR.IR.destroy!(ctx) end test3 = () -> begin println("\n---- TEST 3 ----\n") # Create and dump an operation. ctx = MLIR.IR.create_context() loc = MLIR.IR.create_unknown_location(ctx) st = MLIR.IR.OperationState("std.add", loc) index_type = MLIR.IR.parse_type(ctx, "index") MLIR.IR.push!(st, index_type) op = MLIR.IR.Operation(st) MLIR.IR.dump(op) end test4 = () -> begin println("\n---- TEST 4 ----\n") # Create an operation and verify. ctx = MLIR.IR.create_context() loc = MLIR.IR.create_unknown_location(ctx) func_state = MLIR.IR.OperationState("func", loc) func_region = MLIR.IR.create_region() sym_name_ref = MLIR.IR.Identifier(ctx, "sym_name") func_name_attr = MLIR.IR.NamedAttribute(sym_name_ref, MLIR.IR.Attribute(ctx, "\"add\"")) MLIR.IR.push!(func_state, func_region) MLIR.IR.push!(func_state, func_name_attr) type_ref = MLIR.IR.Identifier(ctx, "type") func_type_attr = MLIR.IR.Attribute(ctx, "(f32, f32) -> f32") named_func_type_attr = MLIR.IR.NamedAttribute(type_ref, func_type_attr) MLIR.IR.push!(func_state, named_func_type_attr) func = MLIR.IR.Operation(func_state) MLIR.IR.verify(func) MLIR.IR.dump(func) end test5 = () -> begin println("\n---- TEST 5 ----\n") # Create a more complex operation and verify. ctx = MLIR.IR.create_context() loc = MLIR.IR.create_unknown_location(ctx) module_op = MLIR.IR.Module(loc) module_body = MLIR.IR.get_body(module_op) memref_type = MLIR.IR.parse_type(ctx, "memref<?xf32>") func_body_arg_types = [memref_type, memref_type] func_region = MLIR.IR.create_region() func_body = MLIR.IR.create_block(func_body_arg_types) MLIR.IR.push!(func_region, func_body) func_type_attr = MLIR.IR.Attribute(ctx, "(memref<?xf32>, memref<?xf32>) -> ()") func_name_attr = MLIR.IR.Attribute(ctx, "\"add\"") type_ref = MLIR.IR.Identifier(ctx, "type") sym_name_ref = MLIR.IR.Identifier(ctx, "sym_name") func_attrs = [MLIR.IR.NamedAttribute(type_ref, func_type_attr), MLIR.IR.NamedAttribute(sym_name_ref, func_name_attr)] func_state = MLIR.IR.OperationState("func", loc) MLIR.IR.push!(func_state, func_attrs) MLIR.IR.push!(func_state, func_region) func = MLIR.IR.Operation(func_state) MLIR.IR.verify(func) MLIR.IR.dump(func) end test6 = () -> begin println("\n---- TEST 6 ----\n") # Do syntax. loc = MLIR.IR.Context() do ctx loc = MLIR.IR.Location(ctx) loc end println(loc) end test0() test1() test2() test3() test4() test5() test6() end # module
{"hexsha": "b0b4e10cd3ad58ea5999f57ecc16d64cc5765952", "size": 4060, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/simple.jl", "max_stars_repo_name": "femtomc/MLIR.jl", "max_stars_repo_head_hexsha": "f3b7eefdbd8cdc1ad0a3df50a8138ecadfd9c062", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2020-12-04T11:32:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-26T19:28:41.000Z", "max_issues_repo_path": "examples/simple.jl", "max_issues_repo_name": "JuliaLabs/MLIR.jl", "max_issues_repo_head_hexsha": "a2fd22a5f5d68ec3df0561696294ac98d65e3743", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-09T02:37:24.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-09T02:37:24.000Z", "max_forks_repo_path": "examples/simple.jl", "max_forks_repo_name": "JuliaLabs/MLIR.jl", "max_forks_repo_head_hexsha": "a2fd22a5f5d68ec3df0561696294ac98d65e3743", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-12-04T12:10:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-23T12:02:02.000Z", "avg_line_length": 29.8529411765, "max_line_length": 121, "alphanum_fraction": 0.6519704433, "num_tokens": 1200}
import random from datetime import datetime from math import ceil import numpy as np import tensorflow as tf import tensorflow_hub as hub from sklearn.base import BaseEstimator, ClassifierMixin from sklearn.exceptions import NotFittedError from common.util.log_helper import LogHelper he_init = tf.contrib.layers.variance_scaling_initializer() embedding_size = 512 class USE_BiLSTM(BaseEstimator, ClassifierMixin): def __init__(self, num_neurons=[256, 32], optimizer='adam', learning_rate=0.0001, batch_size=128, activation='relu', initializer='he', num_epoch=100, batch_norm_momentum=None, dropout_rate=None, n_outputs=3, max_check_without_progress=10, show_progress=1, tensorboard_logdir=None, random_state=None, l2_lambda=0, max_sentences=5, pos_weight=None, lstm_layers=1, trainable=False, max_gpu_memory=0.5, ckpt_path=None): self.num_neurons = num_neurons self.optimizer = optimizer self.learning_rate = learning_rate self.batch_size = batch_size self.activation = activation self.num_epoch = num_epoch self.initializer = initializer self.batch_norm_momentum = batch_norm_momentum self.dropout_rate = dropout_rate self.max_checks_without_progress = max_check_without_progress self.show_progress = show_progress self.random_state = random_state self.tensorboard_logdir = tensorboard_logdir self.l2_lambda = l2_lambda self.max_sentences = max_sentences self.n_outputs = n_outputs self.pos_weight = pos_weight self.lstm_layers = lstm_layers self.trainable = trainable self.max_gpu_memory = max_gpu_memory self.ckpt_path = ckpt_path self._session = None self._activation = None self._initializer = None self._optimizer = None self._graph = None self.logger = LogHelper.get_logger(self.__class__.__name__) def __reduce__(self): return (USE_BiLSTM, ( self.num_neurons, self.optimizer, self.learning_rate, self.batch_size, self.activation, self.initializer, self.num_epoch, self.batch_norm_momentum, self.dropout_rate, self.n_outputs, self.max_checks_without_progress, self.show_progress, self.tensorboard_logdir, self.random_state, self.l2_lambda, self.max_sentences, self.pos_weight, self.lstm_layers, self.trainable, self.max_gpu_memory, self.ckpt_path)) def lstm_cell(self, hidden_size): lstm = tf.nn.rnn_cell.BasicLSTMCell(hidden_size) if self.dropout_rate: lstm = tf.nn.rnn_cell.DropoutWrapper(lstm, input_keep_prob=self.keep_prob, output_keep_prob=self.keep_prob) return lstm def gru_cell(self, num_neuron): gru = tf.contrib.rnn.GRUCell(num_neuron) if self.dropout_rate: gru = tf.contrib.rnn.DropoutWrapper(gru, input_keep_prob=self.keep_prob, output_keep_prob=self.keep_prob) return gru def extract_axis_1(self, data, ind): """ Get specified elements along the first axis of tensor. :param data: Tensorflow tensor that will be subsetted. :param ind: Indices to take (one for each element along axis 0 of data). :return: Subsetted tensor. """ batch_range = tf.range(tf.shape(data)[0]) indices = tf.stack([batch_range, ind], axis=1) res = tf.gather_nd(data, indices) return res def _bidirectional_rnn(self, inputs, inputs_length, num_units, scope=None): with tf.variable_scope(scope or 'birnn'): if self.lstm_layers == 1: rnn_cells_fw = self.lstm_cell(num_units) rnn_cells_bw = self.lstm_cell(num_units) else: rnn_cells_fw = tf.nn.rnn_cell.MultiRNNCell([self.lstm_cell(n) for n in num_units]) rnn_cells_bw = tf.nn.rnn_cell.MultiRNNCell([self.lstm_cell(n) for n in num_units]) ((fw_outputs, bw_outputs), (fw_states, bw_states)) = tf.nn.bidirectional_dynamic_rnn(rnn_cells_fw, rnn_cells_bw, inputs, inputs_length, dtype=tf.float32) outputs = tf.concat([fw_outputs, bw_outputs], axis=2) if self.lstm_layers > 1: fw_states = fw_states[self.lstm_layers - 1] bw_states = bw_states[self.lstm_layers - 1] return outputs, fw_states, bw_states def _sent_network(self, h_inputs, b_inputs, b_sizes): embed = hub.Module("https://tfhub.dev/google/universal-sentence-encoder/2", trainable=False) # batch * embed h_embeddings = embed(h_inputs) # batch * sents * embed b_embeddings = tf.map_fn(embed, b_inputs, tf.float32) b_outputs, _, _ = self._bidirectional_rnn(b_embeddings, b_sizes, embedding_size / 2) b_outputs_last = self.extract_axis_1(b_outputs, b_sizes - 1) outputs = tf.concat([h_embeddings, b_outputs_last, tf.abs(tf.subtract(h_embeddings, b_outputs_last)), tf.multiply(h_embeddings, b_outputs_last)], 1) for layer in range(1, len(self.num_neurons)): if self.dropout_rate: outputs = tf.layers.dropout(outputs, rate=self.dropout_rate, training=self._training) outputs = tf.layers.dense(outputs, self.num_neurons[layer], activation=self._activation, kernel_initializer=self._initializer, name="hidden{}".format(layer + 1)) return outputs def _construct_graph(self): if self.random_state: tf.set_random_seed(self.random_state) np.random.seed(self.random_state) if self._initializer is None: if self.initializer == 'he': self._initializer = tf.contrib.layers.variance_scaling_initializer() if self._activation is None: if self.activation == 'relu': self._activation = tf.nn.relu if self._optimizer is None: if self.optimizer == 'adam': self._optimizer = tf.train.AdamOptimizer X_heads = tf.placeholder(tf.string, shape=[None], name="X_heads") X_bodies = tf.placeholder(tf.string, shape=[None, self.max_sentences], name="X_bodies") X_body_sizes = tf.placeholder(tf.int32, shape=[None], name="X_body_sizes") y_ = tf.placeholder(tf.int32, shape=[None], name="y") y_one_hot = tf.one_hot(y_, self.n_outputs, on_value=1.0, off_value=0.0, axis=-1, dtype=tf.float32) if self.batch_norm_momentum or self.dropout_rate: self._training = tf.placeholder_with_default(False, shape=[], name="training") self.keep_prob = tf.cond(self._training, lambda: tf.constant(1 - self.dropout_rate), lambda: tf.constant(1.0)) else: self._training = None pre_output = self._sent_network(X_heads, X_bodies, X_body_sizes) logits = tf.layers.dense(pre_output, self.n_outputs, kernel_initializer=he_init, name="logits") probabilities = tf.nn.softmax(logits, name="probabilities") if self.pos_weight is None: xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_one_hot, logits=logits) else: xentropy = tf.nn.weighted_cross_entropy_with_logits(y_one_hot, logits, self.pos_weight) loss = tf.reduce_mean(xentropy, name="loss") variables = tf.trainable_variables() for v in variables: self.logger.debug(v.name) optimizer = self._optimizer(learning_rate=self.learning_rate) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): training_op = optimizer.minimize(loss) correct = tf.nn.in_top_k(logits, y_, 1) accuracy = tf.reduce_mean(tf.cast(correct, tf.float32), name="accuracy") _, predicts = tf.nn.top_k(logits, k=1, sorted=False) confusion_matrix = tf.confusion_matrix(y_, predicts, num_classes=self.n_outputs, name="confusion_matrix") init = tf.global_variables_initializer() saver = tf.train.Saver(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)) if self.tensorboard_logdir: now = datetime.utcnow().strftime('%Y%m%d-%H%M%S') tb_logdir = self.tensorboard_logdir + "/run{}".format(now) cost_summary = tf.summary.scalar("validation_loss", loss) acc_summary = tf.summary.scalar("validation_accuracy", accuracy) merged_summary = tf.summary.merge_all() file_writer = tf.summary.FileWriter(tb_logdir, tf.get_default_graph()) self._merged_summary = merged_summary self._file_writer = file_writer self._X_head, self._X_body, self.y = X_heads, X_bodies, y_ self._X_body_sizes = X_body_sizes self._logits = logits self._probabilities = probabilities self._loss = loss self._training_op = training_op self._accuracy = accuracy self._confusion_matrix = confusion_matrix self._init, self._saver = init, saver def close_session(self): if self._session: self._session.close() def _get_model_parameters(self): with tf.Session(): gvars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) return {gvar.op.name: value for gvar, value in zip(gvars, self._session.run(gvars))} def _restore_model_parameters(self, model_parameters): gvar_names = list(model_parameters.keys()) assign_ops = {gvar_name: self._graph.get_operation_by_name(gvar_name + "/Assign") for gvar_name in gvar_names} init_values = {gvar_name: assign_op.inputs[1] for gvar_name, assign_op in assign_ops.items()} feed_dict = {init_values[gvar_name]: model_parameters[gvar_name] for gvar_name in gvar_names} self._session.run(assign_ops, feed_dict=feed_dict) def sort_base_bodies(self, h, b, y): """ sort the data according to the length of b :param h: :param b: :param y: :return: """ assert len(h) == len(b) == len(y) b_lengths = [len(b_vector) for b_vector in b] sorted_h = [] sorted_b = [] sorted_y = [] for length in range(min(b_lengths), max(b_lengths)): for idx, vectors in enumerate(b): if len(vectors) == length: sorted_h.append(h[idx]) sorted_b.append(b[idx]) sorted_y.append(y[idx]) return sorted_h, sorted_b, sorted_y def sampling(self, h, b, y): """ Balance the dataset with similar label distribution Get all examples for refutes, and copy it twice into the original dataset, shuffle the dataset. :param h: heads :param b: bodies :param y: labels :return: same order shuffled h,b,y with """ refutes_h = [] refutes_b = [] refutes_y = [] for idx, label in enumerate(y): if label == 1: refutes_h.append(h[idx]) refutes_b.append(b[idx]) refutes_y.append(label) self.logger.debug(len(refutes_y)) for i in range(0, 4): h.extend(refutes_h) b.extend(refutes_b) y.extend(refutes_y) dataset = list(zip(h, b, y)) random.shuffle(dataset) h, b, y = zip(*dataset) h = np.asarray(h, dtype=np.str) b = np.asarray(b, dtype=np.str) y = np.asarray(y) return h, b, y def get_batch(self, h, b, b_sizes, y=None): """ generate batch for training :param b_sizes: :param h_v: :param b_v: :param y: :return: """ assert len(h) == len(b) for batch_i in range(0, ceil(len(b) / self.batch_size)): start_i = batch_i * self.batch_size if start_i >= len(h): break end_i = start_i + self.batch_size if end_i > len(h): end_i = len(h) h_batch = h[start_i:end_i] b_batch = b[start_i:end_i] b_sizes_batch = b_sizes[start_i:end_i] if y is not None: y_batch = y[start_i:end_i] if y is not None: yield h_batch, b_batch, b_sizes_batch, y_batch else: yield h_batch, b_batch, b_sizes_batch def cal_f1_macro(self, confusion_matrix): """ calculate f1 macro :param confusion_matrix: :return: f1 macro score """ self.logger.info("\n" + str(confusion_matrix)) diag = np.diag(confusion_matrix).astype(np.float32) num_golds = np.sum(confusion_matrix, axis=0).astype(np.float32) num_predicts = np.sum(confusion_matrix, axis=1).astype(np.float32) precisions = np.divide(diag, num_golds, out=np.zeros_like(diag), where=num_golds != 0) recalls = np.divide(diag, num_predicts, out=np.zeros_like(diag), where=num_predicts != 0) average_precision = np.mean(precisions) average_recall = np.mean(recalls) f1_macro = 2 * average_precision * average_recall / (average_recall + average_precision) return f1_macro def fit(self, X, y, valid_X=None, y_valid=None): """ fit the dataset to model for training, if valid is not None, it will report performance of the model in each epoch, If the max_check_without_progress is set, the early stopping will be used :param X: :param y: :param valid_X: :param y_valid: :return: """ self.close_session() y = np.array(y) if len(y.shape) == 2: y = np.argmax(y, axis=1) h, b = X['h'], X['b'] b_sizes = X['b_sizes'] y = list(y) self._construct_graph() checks_without_progress = 0 best_f1_macro = 0 best_parameters = None gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=self.max_gpu_memory) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) self._session = sess sess.run([tf.global_variables_initializer(), tf.tables_initializer()]) self._graph = tf.get_default_graph() for epoch in range(self.num_epoch): losses = [] accs = [] for _, (h_np_batch, b_np_batch, b_sizes_batch, y_batch) in enumerate(self.get_batch(h, b, b_sizes, y)): y_batch = np.asarray(y_batch) feed_dict = {self._X_head: h_np_batch, self._X_body: b_np_batch, self.y: y_batch, self._X_body_sizes: b_sizes_batch} if self._training is not None: feed_dict[self._training] = True train_acc, _, loss = sess.run([self._accuracy, self._training_op, self._loss], feed_dict=feed_dict) losses.append(loss) accs.append(train_acc) average_loss = sum(losses) / len(losses) average_acc = sum(accs) / len(accs) if valid_X is not None and y_valid is not None: valid_h_v, valid_b_v = valid_X['h'], valid_X['b'] valid_b_sizes = valid_X['b_sizes'] batch_losses = [] batch_accuracies = [] valid_cm = np.zeros(shape=(self.n_outputs, self.n_outputs), dtype=np.int32) for _, (h_np_batch, b_np_batch, b_sizes_batch, y_batch) in enumerate( self.get_batch(valid_h_v, valid_b_v, valid_b_sizes, y_valid)): feed_dict_valid = {self._X_head: h_np_batch, self._X_body: b_np_batch, self.y: y_batch, self._X_body_sizes: b_sizes_batch} if self.tensorboard_logdir: val_acc_batch, val_loss_batch, cm, summary = sess.run( [self._accuracy, self._loss, self._confusion_matrix, self._merged_summary], feed_dict=feed_dict_valid) self._file_writer.add_summary(summary, epoch) else: val_acc_batch, val_loss_batch, cm = sess.run( [self._accuracy, self._loss, self._confusion_matrix], feed_dict=feed_dict_valid) valid_cm = np.add(valid_cm, cm) batch_losses.append(val_loss_batch) batch_accuracies.append(val_acc_batch) val_f1_macro = self.cal_f1_macro(valid_cm) val_loss = sum(batch_losses) / len(batch_losses) val_acc = sum(batch_accuracies) / len(batch_accuracies) if self.show_progress: if epoch % self.show_progress == 0: self.logger.info( "Epoch: {} Current training accuracy: {:.4f} ,Current training loss: {:.6f} Validation Accuracy: {:.4f} Validation f1 Macro: {:.4f} Validation Loss{:.6f}".format( epoch + 1, average_acc, average_loss, val_acc, val_f1_macro, val_loss)) if val_f1_macro > best_f1_macro: best_f1_macro = val_f1_macro checks_without_progress = 0 self.logger.info("f1_macro has been improved!") best_parameters = self._get_model_parameters() else: checks_without_progress += 1 if checks_without_progress > self.max_checks_without_progress: self.logger.info("Stopping Early! F1 Macro has not improved in {} epoches".format( self.max_checks_without_progress)) break else: if self.show_progress: if epoch % self.show_progress == 0: self.logger.info("Epoch: {} Current training accuracy: {:.4f}".format(epoch + 1, average_acc)) if best_parameters: self._restore_model_parameters(best_parameters) self.save(self.ckpt_path) return self def predict_probabilities(self, X, restore_param_required=True): h_v, b_v = X['h'], X['b'] b_sizes = X['b_sizes'] if restore_param_required: self.restore_model(self.ckpt_path) probabilities = [] for _, (h_np_batch, b_np_batch, b_sizes_batch) in enumerate(self.get_batch(h_v, b_v, b_sizes)): with self._session.as_default() as sess: predicts = self._probabilities.eval( feed_dict={self._X_head: h_np_batch, self._X_body: b_np_batch, self._X_body_sizes: b_sizes_batch}) for predict in predicts: probabilities.append(predict) return np.asarray(probabilities) def predict(self, X, restore_param_required=True): predictions = np.argmax(self.predict_probabilities(X, restore_param_required=restore_param_required), axis=1) return np.reshape(predictions, (-1,)) def save(self, path): self._saver.save(self._session, path) def restore_model(self, path): self._graph = tf.Graph() with self._graph.as_default(): self._construct_graph() gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=self.max_gpu_memory) config = tf.ConfigProto() config.gpu_options.allow_growth = True self._session = tf.Session(graph=self._graph, config=tf.ConfigProto(gpu_options=gpu_options) # config=config ) with self._session.as_default() as sess: self._init.run() sess.run(tf.tables_initializer()) self._saver.restore(sess, path) return self
{"hexsha": "c042f14c2253d3387e8b57eedab35d55092c8c9a", "size": 20378, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/rte_pac/deep_models/USE_BiLSTM_2.py", "max_stars_repo_name": "UKPLab/conll2019-snopes-experiments", "max_stars_repo_head_hexsha": "102f4a05cfba781036bd3a7b06022246e53765ad", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-11-08T09:17:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T19:37:06.000Z", "max_issues_repo_path": "src/rte_pac/deep_models/USE_BiLSTM_2.py", "max_issues_repo_name": "UKPLab/conll2019-snopes-experiments", "max_issues_repo_head_hexsha": "102f4a05cfba781036bd3a7b06022246e53765ad", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-01-28T22:17:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:57:22.000Z", "max_forks_repo_path": "src/rte_pac/deep_models/USE_BiLSTM_2.py", "max_forks_repo_name": "UKPLab/conll2019-snopes-experiments", "max_forks_repo_head_hexsha": "102f4a05cfba781036bd3a7b06022246e53765ad", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-03-08T12:02:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-08T12:02:24.000Z", "avg_line_length": 42.4541666667, "max_line_length": 190, "alphanum_fraction": 0.6023652959, "include": true, "reason": "import numpy", "num_tokens": 4448}
# installing tm install.packages('devtools', lib="C:/R/Packages") library(devtools) slam_url <- "https://cran.r-project.org/src/contrib/Archive/slam/slam_0.1-37.tar.gz" install_url(slam_url) dest <- "C:/Data/Test Folder" mytxtfiles <- list.files(path = dest, pattern = "txt", full.names = TRUE) library(tm) mycorpus <- Corpus(DirSource(comLoc, pattern = "html")) # warnings may appear after you run the previous line, they # can be ignored mycorpus <- tm_map(mycorpus, removeNumbers) mycorpus <- tm_map(mycorpus, removePunctuation) mycorpus <- tm_map(mycorpus, stripWhitespace) mydtm <- DocumentTermMatrix(mycorpus) # remove some OCR weirdness # words with more than 2 consecutive characters mydtm <- mydtm[,!grepl("(.)\\1{2,}", mydtm$dimnames$Terms)] # get each doc as a csv with words and counts for(i in 1:nrow(mydtm)){ # get word counts counts <- as.vector(as.matrix(mydtm[1,])) # get words words <- mydtm$dimnames$Terms # combine into data frame df <- data.frame(word = words, count = counts,stringsAsFactors = FALSE) # exclude words with count of zero df <- df[df$count != 0,] # write to CSV with original txt filename write.csv(df, paste0(mydtm$dimnames$Docs[i],".csv"), row.names = FALSE) } # and now you're ready to work with the csv files ############### PDF to TXT (all text between two words) #### ## Below is about splitting the text files at certain characters ## can be skipped... # if you just want the abstracts, we can use regex to extract that part of # each txt file, Assumes that the abstract is always between the words 'Abstract' # and 'Introduction' abstracts <- lapply(mytxtfiles, function(i) { j <- paste0(scan(i, what = character()), collapse = " ") regmatches(j, gregexpr("(?<=Abstract).*?(?=Introduction)", j, perl=TRUE)) }) # Write abstracts into separate txt files... # write abstracts as txt files # (or use them in the list for whatever you want to do next) lapply(1:length(abstracts), function(i) write.table(abstracts[i], file=paste(mytxtfiles[i], "abstract", "txt", sep="."), quote = FALSE, row.names = FALSE, col.names = FALSE, eol = " " )) # And now you're ready to do some text mining on the txt
{"hexsha": "1266b274e61a3aa436f1512f84e40b64de899b90", "size": 2201, "ext": "r", "lang": "R", "max_stars_repo_path": "ElectricArchaeology_Examples/tm.r", "max_stars_repo_name": "HHS/capstone.arthur.pignotti", "max_stars_repo_head_hexsha": "85b7c5809f4c96777c27a69c8b04a371f94bef09", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-05T16:07:54.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-05T16:07:54.000Z", "max_issues_repo_path": "ElectricArchaeology_Examples/tm.r", "max_issues_repo_name": "HHS/capstone.arthur.pignotti", "max_issues_repo_head_hexsha": "85b7c5809f4c96777c27a69c8b04a371f94bef09", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ElectricArchaeology_Examples/tm.r", "max_forks_repo_name": "HHS/capstone.arthur.pignotti", "max_forks_repo_head_hexsha": "85b7c5809f4c96777c27a69c8b04a371f94bef09", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-06-22T17:54:37.000Z", "max_forks_repo_forks_event_max_datetime": "2018-06-22T17:54:37.000Z", "avg_line_length": 37.3050847458, "max_line_length": 187, "alphanum_fraction": 0.6933212176, "num_tokens": 613}
# Clean the original data # coding=utf-8 # import neccessary packages import numpy as np import pandas as pd import csv import pymongo as pm image2KData = pd.read_csv('single2k_metadata.csv',encoding="utf-8") image410Data = pd.read_csv('targets410_metadata.csv',encoding="latin1")['filename'] del image2KData['url'] # our outside image link is http://otnk64q13.bkt.clouddn.com/ image2KData['url'] = 'http://otnk64q13.bkt.clouddn.com/'+image2KData['filename'] # map target images in filler image image2KData['isTarget'] =np.where(image2KData['filename'].isin(image410Data),1,0) # sort by filler and target image2KsortData = image2KData.sort_values(by = ['isTarget']).reset_index() image2KsortData['imageID'] = image2KsortData.index image2KsortData['imageID'] = image2KsortData['imageID'] + 'I' del image2KsortData['index'] # store in csv image2KsortData.to_csv('1.csv',index=False)
{"hexsha": "653c0aca5603bb80e04924f9094ab9b8804ba2e5", "size": 893, "ext": "py", "lang": "Python", "max_stars_repo_path": "document/data mining/FileClean.py", "max_stars_repo_name": "LiruiErnest/VisMemo", "max_stars_repo_head_hexsha": "4ff84dbb3e9d892025fa9a54e41d2c96a5ae3482", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "document/data mining/FileClean.py", "max_issues_repo_name": "LiruiErnest/VisMemo", "max_issues_repo_head_hexsha": "4ff84dbb3e9d892025fa9a54e41d2c96a5ae3482", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "document/data mining/FileClean.py", "max_forks_repo_name": "LiruiErnest/VisMemo", "max_forks_repo_head_hexsha": "4ff84dbb3e9d892025fa9a54e41d2c96a5ae3482", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-12-10T03:05:56.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-10T03:05:56.000Z", "avg_line_length": 26.2647058824, "max_line_length": 83, "alphanum_fraction": 0.7592385218, "include": true, "reason": "import numpy", "num_tokens": 271}
using Redux using CImGui include("Counter.jl") using .Counter include("../Renderer.jl") using .Renderer const store = create_store(Counter.counter, Counter.State(0)) function counter_ui(store) flag = CImGui.ImGuiWindowFlags_NoTitleBar | CImGui.ImGuiWindowFlags_NoResize | CImGui.ImGuiWindowFlags_AlwaysAutoResize | CImGui.ImGuiWindowFlags_NoSavedSettings | CImGui.ImGuiWindowFlags_NoFocusOnAppearing | CImGui.ImGuiWindowFlags_NoNav CImGui.Begin("Counter", Ref(true), flag) spacing = CImGui.GetStyle().ItemInnerSpacing.x CImGui.PushButtonRepeat(true) CImGui.ArrowButton("##left", CImGui.ImGuiDir_Left) && dispatch!(store, Counter.DECREMENT) CImGui.SameLine(0.0, spacing) CImGui.ArrowButton("##right", CImGui.ImGuiDir_Right) && dispatch!(store, Counter.INCREMENT) CImGui.PopButtonRepeat() CImGui.SameLine() value = get_state(store).counter CImGui.Text("$value") CImGui.End() end Renderer.render(()->counter_ui(store), width=180, height=50, title="App: Counter")
{"hexsha": "742b4492b8e2c7790a2ab3436ec4a4e1ef11b35b", "size": 1104, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/counter/app.jl", "max_stars_repo_name": "ianshmean/Redux.jl", "max_stars_repo_head_hexsha": "4c078aeb1137b9820e86198f9bafae652cb31953", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2020-01-07T13:32:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-06T13:54:49.000Z", "max_issues_repo_path": "examples/counter/app.jl", "max_issues_repo_name": "ianshmean/Redux.jl", "max_issues_repo_head_hexsha": "4c078aeb1137b9820e86198f9bafae652cb31953", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-12-18T03:20:28.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-29T01:38:55.000Z", "max_forks_repo_path": "examples/counter/app.jl", "max_forks_repo_name": "ianshmean/Redux.jl", "max_forks_repo_head_hexsha": "4c078aeb1137b9820e86198f9bafae652cb31953", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-01-06T22:00:59.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T18:26:07.000Z", "avg_line_length": 33.4545454545, "max_line_length": 99, "alphanum_fraction": 0.696557971, "num_tokens": 274}
""" Decline Curve Models Copyright © 2020 David S. Fulford Author ------ David S. Fulford Derrick W. Turk Notes ----- Created on August 5, 2019 """ from math import exp, log, log1p, ceil as ceiling, floor import warnings import dataclasses as dc from dataclasses import dataclass from numpy import ndarray import numpy as np from scipy.special import expi as ei, gammainc # type: ignore from scipy.integrate import fixed_quad # type: ignore from abc import ABC, abstractmethod from typing import (TypeVar, Type, List, Dict, Tuple, Any, Sequence, Optional, Callable, ClassVar, Union) from typing import cast LOG10 = log(10) def _get_end_L(x: ndarray, L: float, i: int = 0) -> int: """ Left-end points that lay outside of distance L. """ dx = x - x[i] k = len(dx) - 1 idx = np.where((dx <= L) & (dx >= 0.))[0] if idx.size > 0: k = min(k, idx[-1] + 1) return k def _get_end_R(x: ndarray, L: float, i: int = -1) -> int: """ Right-end points that lay outside of distance L. """ dx = x[i] - x k = 0 idx = np.where((dx <= L) & (dx >= 0.))[0] if idx.size > 0: k = max(k, idx[0] - 1) return k def _get_L(y: ndarray, x: ndarray, L: float, i: int) -> Tuple[ndarray, ndarray]: """ Left-end points that lay inside of distance L. """ dx = x[i] - x[:i] dy = y[i] - y[:i] idx = np.where((dx <= L) & (dx >= 0.))[0] if idx.size > 0: idx = max(0, idx[0] - 1) return dx[idx], dy[idx] else: return dx[-1], dy[-1] def _get_R(y: ndarray, x: ndarray, L: float, i: int) -> Tuple[ndarray, ndarray]: """ Right-end points that lay inside of distance L. """ dx = x[i + 1:] - x[i] dy = y[i + 1:] - y[i] idx = np.where((dx <= L) & (dx >= 0.))[0] if idx.size > 0: idx = min(len(x) - 1, idx[-1] + 1) return dx[idx], dy[idx] else: return dx[0], dy[0] def bourdet(y: ndarray, x: ndarray, L: float = 0.0, xlog: bool = True, ylog: bool = False) -> Tuple[ndarray, ndarray]: """ Bourdet Derivative Smoothing Bourdet, D., Ayoub, J. A., and Pirard, Y. M. 1989. Use of Pressure Derivative in Well-Test Interpretation. SPE Form Eval 4 (2): 293–302. SPE-12777-PA. https://doi.org/10.2118/12777-PA. Parameters ---------- y: numpy.ndarray[float] An array of y values to compute the derivative for. x: numpy.ndarray[float] An array of x values. L: float = 0.0 Smoothing factor in units of log-cycle fractions. A value of zero returns the point-by-point first-order difference derivative. xlog: bool = True Calculate the derivative with respect to the log of x, i.e. ``dy / d[ln x]``. ylog: bool = False Calculate the derivative with respect to the log of y, i.e. ``d[ln y] / dx``. Returns ------- der: numpy.ndarray[float] The calculated derivative. """ x = np.atleast_1d(x).astype(float) y = np.atleast_1d(y).astype(float) log_x = cast(ndarray, np.log10(x)) if ylog: y = cast(ndarray, np.log(y)) x_L = np.zeros_like(log_x, dtype=float) x_R = np.zeros_like(log_x, dtype=float) y_L = np.zeros_like(log_x, dtype=float) y_R = np.zeros_like(log_x, dtype=float) # get points for forward and backward derivatives k1 = _get_end_L(log_x, L) k2 = _get_end_R(log_x, L) # compute first & last points x_L[0] = log_x[k1] - log_x[0] y_L[0] = y[k1] - y[0] x_R[-1] = log_x[-1] - log_x[k2] y_R[-1] = y[-1] - y[k2] # compute bourdet derivative for i in range(k1, k2): x_L[i], y_L[i] = _get_L(y, log_x, L, i) x_R[i], y_R[i] = _get_R(y, log_x, L, i) x_L *= LOG10 x_R *= LOG10 der = (y_L / x_L * x_R + y_R / x_R * x_L) / (x_L + x_R) # compute forward difference at left edge for i in range(0, k1): idx = _get_end_L(log_x, L, i) dy = y[idx] - y[0] dx = log_x[idx] - log_x[0] dx *= LOG10 der[i] = dy / dx # compute backward difference at right edge for i in range(k2, len(log_x)): idx = _get_end_R(log_x, L, i) dy = y[-1] - y[idx] dx = log_x[-1] - log_x[idx] dx *= LOG10 der[i] = dy / dx if not xlog: der /= x return der
{"hexsha": "200e1fc435859369c039481eca65b5090dc2d3ba", "size": 4369, "ext": "py", "lang": "Python", "max_stars_repo_path": "petbox/dca/bourdet.py", "max_stars_repo_name": "mwentzWW/dca", "max_stars_repo_head_hexsha": "338ba696e9e6081f9549d284a3dae64318a5b6cc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-19T07:52:03.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-04T10:07:50.000Z", "max_issues_repo_path": "petbox/dca/bourdet.py", "max_issues_repo_name": "mwentzWW/dca", "max_issues_repo_head_hexsha": "338ba696e9e6081f9549d284a3dae64318a5b6cc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "petbox/dca/bourdet.py", "max_forks_repo_name": "mwentzWW/dca", "max_forks_repo_head_hexsha": "338ba696e9e6081f9549d284a3dae64318a5b6cc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-10T16:22:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-02T20:21:11.000Z", "avg_line_length": 24.9657142857, "max_line_length": 85, "alphanum_fraction": 0.5619134813, "include": true, "reason": "import numpy,from numpy,from scipy", "num_tokens": 1413}
from mrjob.job import MRJob from mrjob.protocol import PickleProtocol, PickleValueProtocol import numpy as np import lxmls.readers.pos_corpus as pcc from lxmls.sequences.hmm import HMM import pickle from emstep import load_sequence, predict_sequence, load_parameters # A single iteration of the distributed EM algorithm. class EMStep(MRJob): INTERNAL_PROTOCOL = PickleProtocol def __init__(self, *args, **kwargs): MRJob.__init__(self, *args, **kwargs) # Create HMM object. self.hmm = HMM(word_dict, tag_dict) from os import path filename = 'hmm.txt' if path.exists(filename): # Load the HMM parameters from a text file. load_parameters(filename, self.hmm, smoothing=0.1) else: # Initialize the HMM parameters randomly. self.hmm.initialize_random() self.log_likelihood = 0 self.initial_counts = 0 self.emission_counts = 0 self.transition_counts = 0 self.final_counts = 0 def mapper(self, key, s): seq = load_sequence(s, self.hmm.observation_labels, self.hmm.state_labels) log_likelihood, initial_counts, transition_counts, final_counts, emission_counts = predict_sequence( seq, self.hmm) self.log_likelihood += log_likelihood self.initial_counts += initial_counts self.emission_counts += emission_counts self.transition_counts += transition_counts self.final_counts += final_counts def mapper_final(self): num_states = self.hmm.get_num_states() # Number of states. num_observations = self.hmm.get_num_observations() # Number of observation symbols. yield 'log-likelihood', self.log_likelihood for y in range(num_states): name_y = self.hmm.state_labels.get_label_name(y) for s in range(num_states): name_s = self.hmm.state_labels.get_label_name(s) yield 'transition %s %s' % (name_y, name_s), self.transition_counts[y, s] yield 'final '+name_y, self.final_counts[y] yield 'initial '+name_y, self.initial_counts[y] for w in range(num_observations): name_w = self.hmm.observation_labels.get_label_name(w) if self.emission_counts[w].any(): for s in range(num_states): name_s = self.hmm.state_labels.get_label_name(s) if self.emission_counts[w, s]: yield 'emission %s %s' % (name_w, name_s), self.emission_counts[w, s] def reducer(self, key, counts): yield key, sum(counts) # Load the word and tag dictionaries. word_dict, tag_dict = pickle.load(open('word_tag_dict.pkl')) em_step = EMStep() em_step.run()
{"hexsha": "6fce815a27d46f572929c7faa92476045ac4af4d", "size": 2783, "ext": "py", "lang": "Python", "max_stars_repo_path": "lxmls/big_data_em/emstep_split.py", "max_stars_repo_name": "mtreviso/lxmls-toolkit", "max_stars_repo_head_hexsha": "7b135d98c8bde592649fface8e6f24f112939937", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 183, "max_stars_repo_stars_event_min_datetime": "2015-01-04T22:43:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-19T03:30:44.000Z", "max_issues_repo_path": "lxmls/big_data_em/emstep_split.py", "max_issues_repo_name": "mtreviso/lxmls-toolkit", "max_issues_repo_head_hexsha": "7b135d98c8bde592649fface8e6f24f112939937", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 85, "max_issues_repo_issues_event_min_datetime": "2015-05-18T23:24:53.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-14T18:56:49.000Z", "max_forks_repo_path": "lxmls/big_data_em/emstep_split.py", "max_forks_repo_name": "mtreviso/lxmls-toolkit", "max_forks_repo_head_hexsha": "7b135d98c8bde592649fface8e6f24f112939937", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 190, "max_forks_repo_forks_event_min_datetime": "2015-01-04T22:43:30.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-09T11:06:28.000Z", "avg_line_length": 35.6794871795, "max_line_length": 108, "alphanum_fraction": 0.6532518865, "include": true, "reason": "import numpy", "num_tokens": 630}
# coding: utf-8 import chainer import chainer.functions as F class Stack(chainer.Chain): def forward(self, x, y): y1 = F.stack((x, y)) return y1 class StackAxis0(chainer.Chain): def forward(self, x, y): y1 = F.stack((x, y), axis=0) return y1 class StackAxis1(chainer.Chain): def forward(self, x, y): y1 = F.stack((x, y), axis=1) return y1 class StackAxis2(chainer.Chain): def forward(self, x, y): y1 = F.stack((x, y), axis=2) return y1 # ====================================== from chainer_compiler import ch2o import numpy as np if __name__ == '__main__': v = np.random.rand(5, 4, 2).astype(np.float32) w = np.random.rand(5, 4, 2).astype(np.float32) ch2o.generate_testcase(Stack, [v, w]) ch2o.generate_testcase(StackAxis0, [v, w], subname='axis0') ch2o.generate_testcase(StackAxis1, [v, w], subname='axis1') ch2o.generate_testcase(StackAxis2, [v, w], subname='axis2')
{"hexsha": "0efcd3bce35464c39292aff01ecd395c66a1c389", "size": 987, "ext": "py", "lang": "Python", "max_stars_repo_path": "testcases/ch2o_tests/node/Stack.py", "max_stars_repo_name": "vermashresth/chainer-compiler", "max_stars_repo_head_hexsha": "5f5ad365d14398d6ae0214fa012eb10360db8e7e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 116, "max_stars_repo_stars_event_min_datetime": "2019-01-25T03:54:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T00:11:14.000Z", "max_issues_repo_path": "testcases/ch2o_tests/node/Stack.py", "max_issues_repo_name": "vermashresth/chainer-compiler", "max_issues_repo_head_hexsha": "5f5ad365d14398d6ae0214fa012eb10360db8e7e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 431, "max_issues_repo_issues_event_min_datetime": "2019-01-25T10:18:44.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-17T05:28:55.000Z", "max_forks_repo_path": "testcases/ch2o_tests/node/Stack.py", "max_forks_repo_name": "vermashresth/chainer-compiler", "max_forks_repo_head_hexsha": "5f5ad365d14398d6ae0214fa012eb10360db8e7e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 26, "max_forks_repo_forks_event_min_datetime": "2019-01-25T07:21:09.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-26T04:24:35.000Z", "avg_line_length": 22.4318181818, "max_line_length": 63, "alphanum_fraction": 0.5977710233, "include": true, "reason": "import numpy", "num_tokens": 306}
from __future__ import absolute_import, division, print_function from ..accumulators import Mean, WeightedMean, WeightedSum import numpy as np class View(np.ndarray): __slots__ = () def __getitem__(self, ind): sliced = super(View, self).__getitem__(ind) # If the shape is empty, return the parent type if not sliced.shape: return self._PARENT._make(*sliced) # If the dtype has changed, return a normal array (no longer a record) elif sliced.dtype != self.dtype: return np.asarray(sliced) # Otherwise, no change, return the same View type else: return sliced def __repr__(self): # Numpy starts the ndarray class name with "array", so we replace it # with our class name return ( "{self.__class__.__name__}(\n ".format(self=self) + repr(self.view(np.ndarray))[6:] ) def __str__(self): fields = ", ".join(self._FIELDS) return "{self.__class__.__name__}: ({fields})\n{arr}".format( self=self, fields=fields, arr=self.view(np.ndarray) ) def make_getitem_property(name): def fget(self): return self[name] def fset(self, value): self[name] = value return property(fget, fset) def fields(*names): """ This decorator adds the name to the _FIELDS class property (for printing in reprs), and adds a property that looks like this: @property def name(self): return self["name"] @name.setter def name(self, value): self["name"] = value """ def injector(cls): if hasattr(cls, "_FIELDS"): raise RuntimeError( "{0} already has had a fields decorator applied".format( self.__class__.__name__ ) ) fields = [] for name in names: fields.append(name) setattr(cls, name, make_getitem_property(name)) cls._FIELDS = tuple(fields) return cls return injector @fields("value", "variance") class WeightedSumView(View): __slots__ = () _PARENT = WeightedSum @fields( "sum_of_weights", "sum_of_weights_squared", "value", "sum_of_weighted_deltas_squared", ) class WeightedMeanView(View): __slots__ = () _PARENT = WeightedMean @property def variance(self): return self["sum_of_weighted_deltas_squared"] / ( self["sum_of_weights"] - self["sum_of_weights_squared"] / self["sum_of_weights"] ) @fields("count", "value", "sum_of_deltas_squared") class MeanView(View): __slots__ = () _PARENT = Mean # Variance is a computation @property def variance(self): return self["sum_of_deltas_squared"] / (self["count"] - 1) def _to_view(item, value=False): for cls in View.__subclasses__(): if cls._FIELDS == item.dtype.names: ret = item.view(cls) if value and ret.shape: return ret.value else: return ret return item
{"hexsha": "de6383e1b00aafc1e9109ccba81916667b414b88", "size": 3127, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/boost_histogram/_internal/view.py", "max_stars_repo_name": "HDembinski/boost-histogram", "max_stars_repo_head_hexsha": "6071588d8b58504938f72818d22ff3ce2a5b45dc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/boost_histogram/_internal/view.py", "max_issues_repo_name": "HDembinski/boost-histogram", "max_issues_repo_head_hexsha": "6071588d8b58504938f72818d22ff3ce2a5b45dc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/boost_histogram/_internal/view.py", "max_forks_repo_name": "HDembinski/boost-histogram", "max_forks_repo_head_hexsha": "6071588d8b58504938f72818d22ff3ce2a5b45dc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.016, "max_line_length": 78, "alphanum_fraction": 0.5925807483, "include": true, "reason": "import numpy", "num_tokens": 730}
#!/usr/bin/env python import numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt # This code plot the horizontal illuminance inside a room using a SISO array. # Semi-angle at half illuminance (degree) tethaHalf = 70 # Lambertian emission order (adimensional) m = -np.log(2)/np.log10(np.cos(np.deg2rad(tethaHalf))) # Center luminous intensity (cd) I0 = 0.73 # Room's dimensions dimZ = 3 dimX = 5 dimY = dimX # LED's position (m) xt = dimX*0.5 yt = dimY*0.5 # Grid number in the receiver plane ngx = dimX*10 ngy = dimY*10 # Generate the grid vectors x = np.linspace(0,dimX,ngx) y = np.linspace(0,dimY,ngy) # Distance between the transmitter and receiver plane (m) ht = 3 hr = 0.85 htr = ht - hr # Numbers of LEDs per array nLed = 60 # Generate the receiver plane based on grid vectors xr, yr = np.meshgrid(x,y) # Create a zero matrix to store values of horizontal iluminance E = np.zeros((ngx,ngy)) # Distance vector from source to receiver plane d = np.sqrt(np.square(xr-xt) + np.square(yr-yt) + np.square(htr)) # Cos(tetha) cosTetha = htr/d # Get individual horizontal illuminace per LED E = (I0*(cosTetha)**(m+1) )/np.square(d) # Get the horizontal illuminance per LED array E = E*nLed*nLed fig = plt.figure() figE = fig.add_subplot(111, projection='3d') figE.plot_surface(xr,yr,E) figE.set_xlabel('X (m)') figE.set_ylabel('Y (m)') figE.set_zlabel('Horizontal Illuminance (lx)') plt.show()
{"hexsha": "dad773c10429da03e6f3bdf84bcc33e30e0f1c08", "size": 1443, "ext": "py", "lang": "Python", "max_stars_repo_path": "horizontalIlluminance.py", "max_stars_repo_name": "sophiekovalevsky/Visible-Light-Communication", "max_stars_repo_head_hexsha": "91f0634ce9a66a9fb4995dfa2c0730bbf3bc675b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2016-09-06T09:15:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T19:17:50.000Z", "max_issues_repo_path": "horizontalIlluminance.py", "max_issues_repo_name": "sophiekovalevsky/Visible-Light-Communication", "max_issues_repo_head_hexsha": "91f0634ce9a66a9fb4995dfa2c0730bbf3bc675b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "horizontalIlluminance.py", "max_forks_repo_name": "sophiekovalevsky/Visible-Light-Communication", "max_forks_repo_head_hexsha": "91f0634ce9a66a9fb4995dfa2c0730bbf3bc675b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-11-21T03:49:34.000Z", "max_forks_repo_forks_event_max_datetime": "2017-11-21T03:49:34.000Z", "avg_line_length": 21.2205882353, "max_line_length": 77, "alphanum_fraction": 0.7144837145, "include": true, "reason": "import numpy", "num_tokens": 461}
import enum import typing as tp import jax import jax.numpy as jnp import numpy as np from treex import types from treex.metrics.metric import Metric class Reduction(enum.Enum): sum = enum.auto() sum_over_batch_size = enum.auto() weighted_mean = enum.auto() class Reduce(Metric): """Encapsulates metrics that perform a reduce operation on the values.""" total: jnp.ndarray = types.MetricState.node() count: tp.Optional[jnp.ndarray] = types.MetricState.node() def __init__( self, reduction: tp.Union[Reduction, str], on: tp.Optional[types.IndexLike] = None, name: tp.Optional[str] = None, dtype: tp.Optional[jnp.dtype] = None, ): super().__init__(on=on, name=name, dtype=dtype) self.reduction = ( reduction if isinstance(reduction, Reduction) else Reduction[reduction] ) # initialize states self.total = jnp.array(0.0, dtype=self.dtype) if self.reduction in ( Reduction.sum_over_batch_size, Reduction.weighted_mean, ): self.count = jnp.array(0, dtype=jnp.uint32) else: self.count = None def update( self, values: jnp.ndarray, sample_weight: tp.Optional[jnp.ndarray] = None, ): """ Accumulates statistics for computing the reduction metric. For example, if `values` is [1, 3, 5, 7] and reduction=SUM_OVER_BATCH_SIZE, then the value of `result()` is 4. If the `sample_weight` is specified as [1, 1, 0, 0] then value of `result()` would be 2. Arguments: values: Per-example value. sample_weight: Optional weighting of each example. Defaults to 1. Returns: Array with the cumulative reduce. """ # perform update if sample_weight is not None: sample_weight = sample_weight # Update dimensions of weights to match with values if possible. # values, _, sample_weight = tf_losses_utils.squeeze_or_expand_dimensions( # values, sample_weight=sample_weight # ) try: # Broadcast weights if possible. sample_weight = jnp.broadcast_to(sample_weight, values.shape) except ValueError: # Reduce values to same ndim as weight array ndim = values.ndim weight_ndim = sample_weight.ndim if self.reduction == Reduction.sum: values = jnp.sum(values, axis=list(range(weight_ndim, ndim))) else: values = jnp.mean(values, axis=list(range(weight_ndim, ndim))) values = values * sample_weight value_sum = jnp.sum(values) self.total = (self.total + value_sum).astype(self.total.dtype) # Exit early if the reduction doesn't have a denominator. if self.reduction == Reduction.sum: num_values = None # Update `count` for reductions that require a denominator. elif self.reduction == Reduction.sum_over_batch_size: num_values = np.prod(values.shape) else: if sample_weight is None: num_values = np.prod(values.shape) else: num_values = jnp.sum(sample_weight) if self.count is not None: assert num_values is not None self.count = (self.count + num_values).astype(self.count.dtype) def compute(self) -> tp.Any: if self.reduction == Reduction.sum: return self.total else: return self.total / self.count
{"hexsha": "fc0a71b43456ca38e6043e7e7f817d4ec8ddb918", "size": 3691, "ext": "py", "lang": "Python", "max_stars_repo_path": "treex/metrics/reduce.py", "max_stars_repo_name": "BioGeek/treex", "max_stars_repo_head_hexsha": "fcbee17fcbc069ff5d33554013ce00e49405f872", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "treex/metrics/reduce.py", "max_issues_repo_name": "BioGeek/treex", "max_issues_repo_head_hexsha": "fcbee17fcbc069ff5d33554013ce00e49405f872", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "treex/metrics/reduce.py", "max_forks_repo_name": "BioGeek/treex", "max_forks_repo_head_hexsha": "fcbee17fcbc069ff5d33554013ce00e49405f872", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8189655172, "max_line_length": 107, "alphanum_fraction": 0.5960444324, "include": true, "reason": "import numpy,import jax", "num_tokens": 790}
from __future__ import unicode_literals, division import os import threading import numpy as np from gensim.models import Word2Vec from base.document import Document from config import BATCH_SIZE, SAMPLE_LENGTH, EMBEDDING_SIZE from utils import get_answers_for_doc def get_data_for_model(train_dir, labels, test_dir=None, nn_model=None, as_generator=False, batch_size=BATCH_SIZE, word2vec_model=None, tokenizer_model=None): """ Get data in the form of matrices or generators for both train and test sets. :param train_dir: directory with train files :param labels: an iterable of predefined labels (controlled vocabulary) :param test_dir: directory with test files :param nn_model: Keras model of the NN :param batch_size: integer, size of the batch :param word2vec_model: trained w2v gensim model :return: tuple with 2 elements for train and test data. Each element can be either a pair of matrices (X, y) or their generator """ kwargs = dict( label_indices={lab: i for i, lab in enumerate(labels)}, word2vec_model=word2vec_model, tokenizer_model=tokenizer_model, nn_model=nn_model, ) if as_generator: filename_it = FilenameIterator(train_dir, batch_size) train_data = iterate_over_batches(filename_it, **kwargs) else: train_files = {filename[:-4] for filename in os.listdir(train_dir)} train_data = build_x_and_y(train_files, train_dir, **kwargs) test_data = None if test_dir: test_files = {filename[:-4] for filename in os.listdir(test_dir)} test_data = build_x_and_y(test_files, test_dir, **kwargs) return train_data, test_data def build_x_and_y(filenames, file_directory, **kwargs): """ Given file names and their directory, build (X, y) data matrices :param filenames: iterable of strings showing file ids (no extension) :param file_directory: path to a directory where those files lie :param kwargs: additional necessary data for matrix building e.g. scaler :return: a tuple (X, y) """ label_indices = kwargs['label_indices'] word2vec_model = kwargs['word2vec_model'] tokenizer_model = kwargs['tokenizer_model'] nn_model = kwargs['nn_model'] x_matrix = np.zeros((len(filenames), SAMPLE_LENGTH, EMBEDDING_SIZE)) y_matrix = np.zeros((len(filenames), len(label_indices)), dtype=np.bool_) for doc_id, fname in enumerate(filenames): doc = Document(doc_id, os.path.join( file_directory, fname + '.txt'), tokenizer_model=tokenizer_model) words = doc.get_all_words()[ :SAMPLE_LENGTH] for i, w in enumerate(words): if w in word2vec_model: # predict w2v model here! # re-shape to be 1 row, -1 (unknown) columns x_matrix[doc_id][i] = word2vec_model[w].reshape(1, -1) labels = get_answers_for_doc( fname + '.txt', file_directory, filtered_by=set(label_indices.keys()), ) for lab in labels: index = label_indices[lab] y_matrix[doc_id][index] = True if nn_model and type(nn_model.input) == list: return [x_matrix] * len(nn_model.input), y_matrix else: return [x_matrix], y_matrix def iterate_over_batches(filename_it, **kwargs): """ Iterate infinitely over a given filename iterator :param filename_it: FilenameIterator object :param kwargs: additional necessary data for matrix building e.g. scaler :return: yields tuples (X, y) when called """ while True: files = filename_it.next() yield build_x_and_y(files, filename_it.dirname, **kwargs) class FilenameIterator(object): """ A threadsafe iterator yielding a fixed number of filenames from a given folder and looping forever. Can be used for external memory training. """ def __init__(self, dirname, batch_size): self.dirname = dirname self.batch_size = batch_size self.lock = threading.Lock() self.files = list({filename[:-4] for filename in os.listdir(dirname)}) self.i = 0 def __iter__(self): return self def next(self): with self.lock: if self.i == len(self.files): self.i = 0 batch = self.files[self.i:self.i + self.batch_size] if len(batch) < self.batch_size: self.i = 0 else: self.i += self.batch_size return batch
{"hexsha": "fbc9d324bedf90022090ac088d43a23044fa00a4", "size": 4596, "ext": "py", "lang": "Python", "max_stars_repo_path": "nn/input_data.py", "max_stars_repo_name": "ammarinjtk/Multi-Label-Text-Classification", "max_stars_repo_head_hexsha": "9098351c8ad47b30b3c41f5b9d0eed753a9ae960", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-06-29T09:07:09.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-14T15:21:39.000Z", "max_issues_repo_path": "nn/input_data.py", "max_issues_repo_name": "ammarinjtk/Multi-Label-Text-Classification", "max_issues_repo_head_hexsha": "9098351c8ad47b30b3c41f5b9d0eed753a9ae960", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-10-28T02:28:07.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-28T02:28:07.000Z", "max_forks_repo_path": "nn/input_data.py", "max_forks_repo_name": "ammarinjtk/Multi-Label-Text-Classification", "max_forks_repo_head_hexsha": "9098351c8ad47b30b3c41f5b9d0eed753a9ae960", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.7941176471, "max_line_length": 80, "alphanum_fraction": 0.6557876414, "include": true, "reason": "import numpy", "num_tokens": 1050}
#include "Client.h" #include "SystemTool.h" #include <iostream> #include <boost/bind.hpp> #include <iostream> #include <utility> #include <thread> #include <chrono> #include <functional> #include <atomic> IOServiceType iosev; void ServiceRun() { iosev.run(); } using namespace std; int main(int argc, const char* argv[]) { Client client(iosev); int rc = client.Connect("127.0.0.1", 12345); if (rc != 0) { cout << rc << endl; cout << "client connect fail" << endl; return rc; } char buf[512] = {0}; std::thread thd(ServiceRun); thd.detach(); int i = 0; while (true) { cin.getline(buf, 512); //cin >> buf; std::string str(buf); cout << buf << endl; NetMessagePtr pMsg = new NetMessage(i, str); string str2; pMsg->ToStr(str2); uint32_t size = str2.size(); uint8_t *pData = new uint8_t[size + sizeof(uint32_t)]; memcpy(pData, &size, sizeof(uint32_t)); memcpy(pData + sizeof(uint32_t), str2.c_str(), size); client.Send(pData, size + sizeof(uint32_t)); ++i; } client.Close(); return 0; }
{"hexsha": "23dd4d05c420b812362bfdde038f28fd65bd7a0e", "size": 1182, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/test/asionet/TestBoostClient.cpp", "max_stars_repo_name": "SeeForward/SysMonitor", "max_stars_repo_head_hexsha": "fdaac5eacf28b62739c4e050e27abd1fdbfd18c4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2017-06-28T02:46:10.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-16T09:07:34.000Z", "max_issues_repo_path": "src/test/asionet/TestBoostClient.cpp", "max_issues_repo_name": "SeeForward/SysMonitor", "max_issues_repo_head_hexsha": "fdaac5eacf28b62739c4e050e27abd1fdbfd18c4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/test/asionet/TestBoostClient.cpp", "max_forks_repo_name": "SeeForward/SysMonitor", "max_forks_repo_head_hexsha": "fdaac5eacf28b62739c4e050e27abd1fdbfd18c4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2021-12-09T08:03:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-09T08:03:39.000Z", "avg_line_length": 19.3770491803, "max_line_length": 62, "alphanum_fraction": 0.5659898477, "num_tokens": 321}
@doc """ simple_estimator(model::Ising, T::Real, Js::AbstractArray) Returns the following observables as `Dict{String, Any}` # Observables - `"Energy"` - energy density - `"Energy^2"` - square of energy density - `"Magnetization"` - magnetization density - `"|Magnetization|"` - absolute value of magnetization density - `"Magnetization^2"` - square of magnetization density - `"Magnetization^4"` - quadruple of magnetization density """ function simple_estimator(model::Ising, T::Real, Js::AbstractArray, _=nothing) nsites = numsites(model) nbonds = numbonds(model) M = mean(model.spins) E = 0.0 @inbounds for b in bonds(model) s1, s2 = source(b), target(b) E += ifelse(model.spins[s1] == model.spins[s2], -1.0, 1.0) * Js[bondtype(b)] end E /= nsites res = Measurement() res["Magnetization"] = M res["|Magnetization|"] = abs(M) res["Magnetization^2"] = M^2 res["Magnetization^4"] = M^4 res["Energy"] = E res["Energy^2"] = E^2 return res end @doc raw""" improved_estimator(model::Ising, T::Real, Js::AbstractArray, sw::SWInfo) Returns the following observables as `Dict{String, Any}` using cluster information `sw` # Observables - `"Energy"` - energy density - `"Energy^2"` - square of energy density - `"Magnetization"` - magnetization density - `"|Magnetization|"` - absolute value of magnetization density - `"Magnetization^2"` - square of magnetization density - `"Magnetization^4"` - quadruple of magnetization density - `"Clustersize^2"` - ``\sum_c r_c^2 ``, where ``r_c`` is the size density of ``c``-th cluster - `"Clustersize^4"` - ``\sum_c r_c^4 `` - `"Clustersize^2 Clustersize^2"` - ``\sum_{c\ne c'} r_c^2 r_{c'}^2`` """ function improved_estimator(model::Ising, T::Real, Js::AbstractArray, sw::SWInfo) nsites = numsites(model) nbonds = numbonds(model) nc = numclusters(sw) invV = 1.0/nsites ## magnetization M = 0.0 N2 = 0.0 N2N2 = 0.0 N4 = 0.0 for (m,s) in zip(sw.clustersize, sw.clusterspin) M += m*invV*s m2 = (m*invV)^2 N4 += m2*m2 N2N2 += N2*m2 N2 += m2 end M4 = N4 + 6*N2N2 M2 = N2 # energy aJ = 2.0*abs.(Js) mbeta = -1.0/T ns = sw.activated_bonds As = -aJ ./ expm1.(mbeta.*aJ) Ans = ns.*As E0 = 0.0 for b in 1:numbondtypes(model) E0 += Js[b] * numbonds(model,b) end E = 0.0 E2 = 0.0 for b in 1:numbondtypes(model) E2 += (aJ[b]-2.0*E0)*Ans[b] E2 += Ans[b] * As[b]*(ns[b]-1) E2 += 2.0*Ans[b]*E E += Ans[b] end E -= E0 E2 += E0^2 E *= -invV E2 *= invV*invV res = Measurement() res["Magnetization"] = M res["|Magnetization|"] = abs(M) res["Magnetization^2"] = M2 res["Magnetization^4"] = M4 res["Energy"] = E res["Energy^2"] = E2 res["Clustersize^2"] = N2 res["Clustersize^4"] = N4 res["Clustersize^2 Clustersize^2"] = 2*N2N2 return res end default_estimator(model::Ising, update) = ifelse(update==SW_update!, improved_estimator, simple_estimator)
{"hexsha": "f945e7cbd11e2400167a69d86c00f3a74e876821", "size": 3165, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/model/Ising/estimator.jl", "max_stars_repo_name": "UnofficialJuliaMirror/SpinMonteCarlo.jl-71c4a2d3-ecf8-5cd9-ab6a-09a504837b4f", "max_stars_repo_head_hexsha": "fdd23854c33846b4e61396add4787c023ac97209", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2018-04-04T11:58:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T09:16:46.000Z", "max_issues_repo_path": "src/model/Ising/estimator.jl", "max_issues_repo_name": "UnofficialJuliaMirror/SpinMonteCarlo.jl-71c4a2d3-ecf8-5cd9-ab6a-09a504837b4f", "max_issues_repo_head_hexsha": "fdd23854c33846b4e61396add4787c023ac97209", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2017-01-06T11:02:05.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-29T04:02:40.000Z", "max_forks_repo_path": "src/model/Ising/estimator.jl", "max_forks_repo_name": "UnofficialJuliaMirror/SpinMonteCarlo.jl-71c4a2d3-ecf8-5cd9-ab6a-09a504837b4f", "max_forks_repo_head_hexsha": "fdd23854c33846b4e61396add4787c023ac97209", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-10-17T02:59:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-29T00:51:46.000Z", "avg_line_length": 24.9212598425, "max_line_length": 106, "alphanum_fraction": 0.5902053712, "num_tokens": 1097}
import numpy as np from envs.babyai.oracle.teacher import Teacher class DemoCorrections(Teacher): def reset(self): self.env.compute_obj_infos() empty_path = np.zeros((self.env.grid.height + self.env.grid.width, 2)) path = self.oracle.shortest_path_obj() empty_path[:len(path)] = path self.init_obj_infos = self.env.obj_infos.copy() self.demo_path = empty_path.reshape(-1).copy() def empty_feedback(self): """ Return a tensor corresponding to no feedback. """ # Size - obj infos, demos return np.zeros_like(np.concatenate([self.init_obj_infos, self.demo_path])) def random_feedback(self): """ Return a tensor corresponding to no feedback. """ raise NotImplementedError('random feedback not implemented') def compute_feedback(self): """ Return the expert action from the previous timestep. """ return np.concatenate([self.init_obj_infos, self.demo_path]) def feedback_condition(self): """ Returns true when we should give feedback. Currently returns true when the agent's past action did not match the oracle's action. """ # For now, we're being lazy and correcting the agent any time it strays from the agent's optimal set of actions. # This is kind of sketchy since multiple paths can be optimal. return len(self.agent_actions) > 0 and (not self.agent_actions[-1] == self.oracle_actions[-1])
{"hexsha": "3cad8ac584fd422e1cb1bccf82ef93317ac1672c", "size": 1525, "ext": "py", "lang": "Python", "max_stars_repo_path": "envs/babyai/oracle/demo_corrections.py", "max_stars_repo_name": "AliengirlLiv/babyai", "max_stars_repo_head_hexsha": "51421ee11538bf110c5b2d0c84a15f783d854e7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-02-24T08:47:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T09:44:22.000Z", "max_issues_repo_path": "envs/babyai/oracle/demo_corrections.py", "max_issues_repo_name": "AliengirlLiv/babyai", "max_issues_repo_head_hexsha": "51421ee11538bf110c5b2d0c84a15f783d854e7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "envs/babyai/oracle/demo_corrections.py", "max_forks_repo_name": "AliengirlLiv/babyai", "max_forks_repo_head_hexsha": "51421ee11538bf110c5b2d0c84a15f783d854e7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-27T19:03:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-27T19:03:38.000Z", "avg_line_length": 38.125, "max_line_length": 120, "alphanum_fraction": 0.6537704918, "include": true, "reason": "import numpy", "num_tokens": 312}
// Boost Includes ============================================================== #include <boost/python.hpp> #include <boost/cstdint.hpp> // Includes ==================================================================== #include <Magick++/Drawable.h> // Declarations ================================================================ #include <Magick++.h> // Using ======================================================================= using namespace boost::python; namespace { struct Magick_DrawableStrokeLineCap_Wrapper: Magick::DrawableStrokeLineCap { Magick_DrawableStrokeLineCap_Wrapper(PyObject* py_self_, MagickCore::LineCap p0): Magick::DrawableStrokeLineCap(p0), py_self(py_self_) {} PyObject* py_self; }; }// namespace // Module ====================================================================== void Export_pyste_src_DrawableStrokeLineCap() { class_< Magick::DrawableStrokeLineCap, boost::noncopyable, Magick_DrawableStrokeLineCap_Wrapper >("DrawableStrokeLineCap", init< MagickCore::LineCap >()) .def("linecap", (void (Magick::DrawableStrokeLineCap::*)(MagickCore::LineCap) )&Magick::DrawableStrokeLineCap::linecap) .def("linecap", (MagickCore::LineCap (Magick::DrawableStrokeLineCap::*)() const)&Magick::DrawableStrokeLineCap::linecap) ; implicitly_convertible<Magick::DrawableStrokeLineCap,Magick::Drawable>(); }
{"hexsha": "04e932f71bcdb59fdb490a78ed0b79eb55e4f11c", "size": 1385, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "dev/Tools/Python/2.7.12/windows/Lib/site-packages/PythonMagick-0.9.19/pythonmagick_src/_DrawableStrokeLineCap.cpp", "max_stars_repo_name": "jeikabu/lumberyard", "max_stars_repo_head_hexsha": "07228c605ce16cbf5aaa209a94a3cb9d6c1a4115", "max_stars_repo_licenses": ["AML"], "max_stars_count": 61.0, "max_stars_repo_stars_event_min_datetime": "2016-02-03T18:01:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-21T03:04:18.000Z", "max_issues_repo_path": "dev/Tools/Python/2.7.12/windows/Lib/site-packages/PythonMagick-0.9.19/pythonmagick_src/_DrawableStrokeLineCap.cpp", "max_issues_repo_name": "jeikabu/lumberyard", "max_issues_repo_head_hexsha": "07228c605ce16cbf5aaa209a94a3cb9d6c1a4115", "max_issues_repo_licenses": ["AML"], "max_issues_count": 20.0, "max_issues_repo_issues_event_min_datetime": "2016-01-13T17:41:13.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-19T20:51:51.000Z", "max_forks_repo_path": "dev/Tools/Python/2.7.12/windows/Lib/site-packages/PythonMagick-0.9.19/pythonmagick_src/_DrawableStrokeLineCap.cpp", "max_forks_repo_name": "jeikabu/lumberyard", "max_forks_repo_head_hexsha": "07228c605ce16cbf5aaa209a94a3cb9d6c1a4115", "max_forks_repo_licenses": ["AML"], "max_forks_count": 18.0, "max_forks_repo_forks_event_min_datetime": "2016-01-06T05:43:36.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-21T03:04:17.000Z", "avg_line_length": 34.625, "max_line_length": 157, "alphanum_fraction": 0.5646209386, "num_tokens": 276}
/- Copyright (c) 2020 Jannis Limperg. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. Authors: Jannis Limperg -/ import tactic.core /-! # The `unify_equations` tactic This module defines `unify_equations`, a first-order unification tactic that unifies one or more equations in the context. It implements the Qnify algorithm from [McBride, Inverting Inductively Defined Relations in LEGO][mcbride1996]. The tactic takes as input some equations which it simplifies one after the other. Each equation is simplified by applying one of several possible unification steps. Each such step may output other (simpler) equations which are unified recursively until no unification step applies any more. See `tactic.interactive.unify_equations` for an example and an explanation of the different steps. -/ open expr namespace tactic namespace unify_equations /-- The result of a unification step: - `simplified hs` means that the step succeeded and produced some new (simpler) equations `hs`. `hs` can be empty. - `goal_solved` means that the step succeeded and solved the goal (by deriving a contradiction from the given equation). - `not_simplified` means that the step failed to simplify the equation. -/ meta inductive unification_step_result : Type | simplified (next_equations : list name) | not_simplified | goal_solved export unification_step_result /-- A unification step is a tactic that attempts to simplify a given equation and returns a `unification_step_result`. The inputs are: - `equ`, the equation being processed. Must be a local constant. - `lhs_type` and `rhs_type`, the types of equ's LHS and RHS. For homogeneous equations, these are defeq. - `lhs` and `rhs`, `equ`'s LHS and RHS. - `lhs_whnf` and `rhs_whnf`, `equ`'s LHS and RHS in WHNF. - `u`, `equ`'s level. So `equ : @eq.{u} lhs_type lhs rhs` or `equ : @heq.{u} lhs_type lhs rhs_type rhs`. -/ @[reducible] meta def unification_step : Type := ∀ (equ lhs_type rhs_type lhs rhs lhs_whnf rhs_whnf : expr) (u : level), tactic unification_step_result /-- For `equ : t == u` with `t : T` and `u : U`, if `T` and `U` are defeq, we replace `equ` with `equ : t = u`. -/ meta def unify_heterogeneous : unification_step := λ equ lhs_type rhs_type lhs rhs _ _ _, do { is_def_eq lhs_type rhs_type, p ← to_expr ``(@eq_of_heq %%lhs_type %%lhs %%rhs %%equ), t ← to_expr ``(@eq %%lhs_type %%lhs %%rhs), equ' ← note equ.local_pp_name t p, clear equ, pure $ simplified [equ'.local_pp_name] } <|> pure not_simplified /-- For `equ : t = u`, if `t` and `u` are defeq, we delete `equ`. -/ meta def unify_defeq : unification_step := λ equ lhs_type _ _ _ lhs_whnf rhs_whnf _, do { is_def_eq lhs_whnf rhs_whnf, clear equ, pure $ simplified [] } <|> pure not_simplified /-- For `equ : x = t` or `equ : t = x`, where `x` is a local constant, we substitute `x` with `t` in the goal. -/ meta def unify_var : unification_step := λ equ type _ lhs rhs lhs_whnf rhs_whnf u, do { let lhs_is_local := lhs_whnf.is_local_constant, let rhs_is_local := rhs_whnf.is_local_constant, guard $ lhs_is_local ∨ rhs_is_local, let t := if lhs_is_local then (const `eq [u]) type lhs_whnf rhs else (const `eq [u]) type lhs rhs_whnf, change_core t (some equ), equ ← get_local equ.local_pp_name, subst_core equ, pure $ simplified [] } <|> pure not_simplified -- TODO This is an improved version of `injection_with` from core -- (init/meta/injection_tactic). Remove when the improvements have landed in -- core. private meta def injection_with' (h : expr) (ns : list name) (base := `h) (offset := some 1) : tactic (option (list expr) × list name) := do H ← infer_type h, (lhs, rhs, constructor_left, constructor_right, inj_name) ← do { (lhs, rhs) ← match_eq H, constructor_left ← get_app_fn_const_whnf lhs semireducible ff, constructor_right ← get_app_fn_const_whnf rhs semireducible ff, inj_name ← resolve_constant $ constructor_left ++ "inj_arrow", pure (lhs, rhs, constructor_left, constructor_right, inj_name) } <|> fail ("injection tactic failed, argument must be an equality proof where lhs and rhs " ++ "are of the form (c ...), where c is a constructor"), if constructor_left = constructor_right then do -- C.inj_arrow, for a given constructor C of datatype D, has type -- -- ∀ (A₁ ... Aₙ) (x₁ ... xₘ) (y₁ ... yₘ), C x₁ ... xₘ = C y₁ ... yₘ -- → ∀ ⦃P : Sort u⦄, (x₁ = y₁ → ... → yₖ = yₖ → P) → P -- -- where the Aᵢ are parameters of D and the xᵢ/yᵢ are arguments of C. -- Note that if xᵢ/yᵢ are propositions, no equation is generated, so the -- number of equations is not necessarily the constructor arity. -- First, we find out how many equations we need to intro later. inj ← mk_const inj_name, inj_type ← infer_type inj, inj_arity ← get_pi_arity inj_type, let num_equations := (inj_type.nth_binding_body (inj_arity - 1)).binding_domain.pi_arity, -- Now we generate the actual proof of the target. tgt ← target, proof ← mk_mapp inj_name (list.replicate (inj_arity - 3) none ++ [some h, some tgt]), eapply proof, (next, ns) ← intron_with num_equations ns base offset, -- The following filters out 'next' hypotheses of type `true`. The -- `inj_arrow` lemmas introduce these for nullary constructors. next ← next.mfilter $ λ h, do { `(true) ← infer_type h | pure tt, (clear h >> pure ff) <|> pure tt }, pure (some next, ns) else do tgt ← target, -- The following construction deals with a corner case involing -- mutual/nested inductive types. For these, Lean does not generate -- no-confusion principles. However, the regular inductive data type which a -- mutual/nested inductive type is compiled to does have a no-confusion -- principle which we can (usually? always?) use. To find it, we normalise -- the constructor with `unfold_ginductive = tt`. constructor_left ← get_app_fn_const_whnf lhs semireducible tt, let no_confusion := constructor_left.get_prefix ++ "no_confusion", pr ← mk_app no_confusion [tgt, lhs, rhs, h], exact pr, return (none, ns) /-- Given `equ : C x₁ ... xₙ = D y₁ ... yₘ` with `C` and `D` constructors of the same datatype `I`: - If `C ≠ D`, we solve the goal by contradiction using the no-confusion rule. - If `C = D`, we clear `equ` and add equations `x₁ = y₁`, ..., `xₙ = yₙ`. -/ meta def unify_constructor_headed : unification_step := λ equ _ _ _ _ _ _ _, do { (next, _) ← injection_with' equ [] `_ none, try $ clear equ, pure $ match next with | none := goal_solved | some next := simplified $ next.map expr.local_pp_name end } <|> pure not_simplified /-- For `type = I x₁ ... xₙ`, where `I` is an inductive type, `get_sizeof type` returns the constant `I.sizeof`. Fails if `type` is not of this form or if no such constant exists. -/ meta def get_sizeof (type : expr) : tactic pexpr := do n ← get_app_fn_const_whnf type semireducible ff, resolve_name $ n ++ `sizeof lemma add_add_one_ne (n m : ℕ) : n + (m + 1) ≠ n := begin apply ne_of_gt, apply nat.lt_add_of_pos_right, apply nat.pos_of_ne_zero, contradiction end -- Linarith could prove this, but I want to avoid that dependency. /-- `match_n_plus_m n e` matches `e` of the form `nat.succ (... (nat.succ e')...)`. It returns `n` plus the number of `succ` constructors and `e'`. The matching is performed up to normalisation with transparency `md`. -/ meta def match_n_plus_m (md) : ℕ → expr → tactic (ℕ × expr) := λ n e, do e ← whnf e md, match e with | `(nat.succ %%e) := match_n_plus_m (n + 1) e | _ := pure (n, e) end /-- Given `equ : n + m = n` or `equ : n = n + m` with `n` and `m` natural numbers and `m` a nonzero literal, this tactic produces a proof of `false`. More precisely, the two sides of the equation must be of the form `nat.succ (... (nat.succ e)...)` with different numbers of `nat.succ` constructors. Matching is performed with transparency `md`. -/ meta def contradict_n_eq_n_plus_m (md : transparency) (equ lhs rhs : expr) : tactic expr := do ⟨lhs_n, lhs_e⟩ ← match_n_plus_m md 0 lhs, ⟨rhs_n, rhs_e⟩ ← match_n_plus_m md 0 rhs, is_def_eq lhs_e rhs_e md <|> fail ("contradict_n_eq_n_plus_m:\nexpected {lhs_e} and {rhs_e} to be definitionally " ++ "equal at transparency {md}."), let common := lhs_e, guard (lhs_n ≠ rhs_n) <|> fail "contradict_n_eq_n_plus_m:\nexpected {lhs_n} and {rhs_n} to be different.", -- Ensure that lhs_n is bigger than rhs_n. Swap lhs and rhs if that's not -- already the case. ⟨equ, lhs_n, rhs_n⟩ ← if lhs_n > rhs_n then pure (equ, lhs_n, rhs_n) else do { equ ← to_expr ``(eq.symm %%equ), pure (equ, rhs_n, lhs_n) }, let diff := lhs_n - rhs_n, let rhs_n_expr := reflect rhs_n, n ← to_expr ``(%%common + %%rhs_n_expr), let m := reflect (diff - 1), pure `(add_add_one_ne %%n %%m %%equ) /-- Given `equ : t = u` with `t, u : I` and `I.sizeof t ≠ I.sizeof u`, we solve the goal by contradiction. -/ meta def unify_cyclic : unification_step := λ equ type _ _ _ lhs_whnf rhs_whnf _, do { -- Establish `sizeof lhs = sizeof rhs`. sizeof ← get_sizeof type, hyp_lhs ← to_expr ``(%%sizeof %%lhs_whnf), hyp_rhs ← to_expr ``(%%sizeof %%rhs_whnf), hyp_type ← to_expr ``(@eq ℕ %%hyp_lhs %%hyp_rhs), hyp_proof ← to_expr ``(@congr_arg %%type ℕ %%lhs_whnf %%rhs_whnf %%sizeof %%equ), hyp_name ← mk_fresh_name, hyp ← note hyp_name hyp_type hyp_proof, -- Derive a contradiction (if indeed `sizeof lhs ≠ sizeof rhs`). falso ← contradict_n_eq_n_plus_m semireducible hyp hyp_lhs hyp_rhs, exfalso, exact falso, pure goal_solved } <|> pure not_simplified /-- `orelse_step s t` first runs the unification step `s`. If this was successful (i.e. `s` simplified or solved the goal), it returns the result of `s`. Otherwise, it runs `t` and returns its result. -/ meta def orelse_step (s t : unification_step) : unification_step := λ equ lhs_type rhs_type lhs rhs lhs_whnf rhs_whnf u, do r ← s equ lhs_type rhs_type lhs rhs lhs_whnf rhs_whnf u, match r with | simplified _ := pure r | goal_solved := pure r | not_simplified := t equ lhs_type rhs_type lhs rhs lhs_whnf rhs_whnf u end /-- For `equ : t = u`, try the following methods in order: `unify_defeq`, `unify_var`, `unify_constructor_headed`, `unify_cyclic`. If any of them is successful, stop and return its result. If none is successful, fail. -/ meta def unify_homogeneous : unification_step := list.foldl orelse_step (λ _ _ _ _ _ _ _ _, pure not_simplified) [unify_defeq, unify_var, unify_constructor_headed, unify_cyclic] end unify_equations open unify_equations /-- If `equ` is the display name of a local constant with type `t = u` or `t == u`, then `unify_equation_once equ` simplifies it once using `unify_equations.unify_homogeneous` or `unify_equations.unify_heterogeneous`. Otherwise it fails. -/ meta def unify_equation_once (equ : name) : tactic unification_step_result := do eque ← get_local equ, t ← infer_type eque, match t with | (app (app (app (const `eq [u]) type) lhs) rhs) := do lhs_whnf ← whnf_ginductive lhs, rhs_whnf ← whnf_ginductive rhs, unify_homogeneous eque type type lhs rhs lhs_whnf rhs_whnf u | (app (app (app (app (const `heq [u]) lhs_type) lhs) rhs_type) rhs) := do lhs_whnf ← whnf_ginductive lhs, rhs_whnf ← whnf_ginductive rhs, unify_heterogeneous eque lhs_type rhs_type lhs rhs lhs_whnf rhs_whnf u | _ := fail! "Expected {equ} to be an equation, but its type is\n{t}." end /-- Given a list of display names of local hypotheses that are (homogeneous or heterogeneous) equations, `unify_equations` performs first-order unification on each hypothesis in order. See `tactic.interactive.unify_equations` for an example and an explanation of what unification does. Returns true iff the goal has been solved during the unification process. Note: you must make sure that the input names are unique in the context. -/ meta def unify_equations : list name → tactic bool | [] := pure ff | (h :: hs) := do res ← unify_equation_once h, match res with | simplified hs' := unify_equations $ hs' ++ hs | not_simplified := unify_equations hs | goal_solved := pure tt end namespace interactive open lean.parser /-- `unify_equations eq₁ ... eqₙ` performs a form of first-order unification on the hypotheses `eqᵢ`. The `eqᵢ` must be homogeneous or heterogeneous equations. Unification means that the equations are simplified using various facts about constructors. For instance, consider this goal: ``` P : ∀ n, fin n → Prop n m : ℕ f : fin n g : fin m h₁ : n + 1 = m + 1 h₂ : f == g h₃ : P n f ⊢ P m g ``` After `unify_equations h₁ h₂`, we get ``` P : ∀ n, fin n → Prop n : ℕ f : fin n h₃ : P n f ⊢ P n f ``` In the example, `unify_equations` uses the fact that every constructor is injective to conclude `n = m` from `h₁`. Then it replaces every `m` with `n` and moves on to `h₂`. The types of `f` and `g` are now equal, so the heterogeneous equation turns into a homogeneous one and `g` is replaced by `f`. Note that the equations are processed from left to right, so `unify_equations h₂ h₁` would not simplify as much. In general, `unify_equations` uses the following steps on each equation until none of them applies any more: - Constructor injectivity: if `nat.succ n = nat.succ m` then `n = m`. - Substitution: if `x = e` for some hypothesis `x`, then `x` is replaced by `e` everywhere. - No-confusion: `nat.succ n = nat.zero` is a contradiction. If we have such an equation, the goal is solved immediately. - Cycle elimination: `n = nat.succ n` is a contradiction. - Redundancy: if `t = u` but `t` and `u` are already definitionally equal, then this equation is removed. - Downgrading of heterogeneous equations: if `t == u` but `t` and `u` have the same type (up to definitional equality), then the equation is replaced by `t = u`. -/ meta def unify_equations (eqs : interactive.parse (many ident)) : tactic unit := tactic.unify_equations eqs *> skip add_tactic_doc { name := "unify_equations", category := doc_category.tactic, decl_names := [`tactic.interactive.unify_equations], tags := ["simplification"] } end interactive end tactic
{"author": "leanprover-community", "repo": "mathlib", "sha": "5e526d18cea33550268dcbbddcb822d5cde40654", "save_path": "github-repos/lean/leanprover-community-mathlib", "path": "github-repos/lean/leanprover-community-mathlib/mathlib-5e526d18cea33550268dcbbddcb822d5cde40654/src/tactic/unify_equations.lean"}
import numpy as np from tqdm import tqdm_notebook as tqdm import spectral def grid_search(X, param_grid): """ Compute all error rates for the given combinations of parameters Parameters ---------- param_grid : sklearn model_selection ParameterGrid grid of parameters (all combinations to try) Returns ------- out : parameters, ndarray Output the best parameters and all the error rates """ errors = [np.sum(spectral.fast_spectral_decomposition(X, return_eigenvalues=True, n_eigen=4, **p)[0]) for p in tqdm(param_grid)] return param_grid[np.argmin(errors)], errors
{"hexsha": "fab617aba0e903efa22c390071b042489175ca5b", "size": 635, "ext": "py", "lang": "Python", "max_stars_repo_path": "learning.py", "max_stars_repo_name": "ali-h/GraphLang", "max_stars_repo_head_hexsha": "d3fdab912be967a3642708df4d222a5bf1df992c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-02-01T10:12:49.000Z", "max_stars_repo_stars_event_max_datetime": "2018-02-01T11:18:44.000Z", "max_issues_repo_path": "learning.py", "max_issues_repo_name": "ali-h/GraphLang", "max_issues_repo_head_hexsha": "d3fdab912be967a3642708df4d222a5bf1df992c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "learning.py", "max_forks_repo_name": "ali-h/GraphLang", "max_forks_repo_head_hexsha": "d3fdab912be967a3642708df4d222a5bf1df992c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-05-30T10:08:03.000Z", "max_forks_repo_forks_event_max_datetime": "2018-05-30T10:08:03.000Z", "avg_line_length": 26.4583333333, "max_line_length": 105, "alphanum_fraction": 0.6881889764, "include": true, "reason": "import numpy", "num_tokens": 140}
!> Pressure Load for nonlinear elasticity in a total lagrangian formulation !! !! !! @param iShiftU = nint(CommonPar(1)) !! @param iShiftDeltaU = nint(CommonPar(2)) !! @param iFemType = nint(CommonPar(3)) !!! of the associated volume element (one dimension higher) !! @param iLoadProg = nint(CommonPar(4)) !! @param LoadPar = CommonPar(5:10) ! ------------------------------------------------------------------ Subroutine NeumannRefTraction(AE, BE, MaxLRows, XLL, NDim, iDofT, NodELT, Sol0, & Sol1, CommonPar, Param, JParam, DelT, DTm, Time) ! ------------------------------------------------------------------ use funcAux use loadingLib use finiteStrainLib use ptsGaussLib IMPLICIT NONE ! ===== SUBROUTINE ARGUMENTS ======= integer :: MaxLRows,Ndim,iDofT,NodELT ! all integers Real*8 :: DelT, DTm,Time ! all reals ! Reals Vectors and matrices Real*8 :: AE(MaxLRows,MaxLRows), BE(MaxLRows), XLL(*), Sol0(*), Sol1(*), & Param(*), JParam(*), CommonPar(*) ! ===== END ARGUMENTS ======= Integer :: iLoadProg, iShiftDeltaU, iShiftU integer, parameter :: NdimE = 2 real*8 , allocatable :: Xel(:) !! all have dimension NodG*NdimE Real*8 :: normal0(NdimE) , vecU(NdimE), lengthLine, LoadPar(6), pm Real*8 :: Traction(2) type(ptGaussClass) :: PtG iShiftU = nint(CommonPar(1)) iShiftDeltaU = nint(CommonPar(2)) iLoadProg = nint(CommonPar(3)) LoadPar = CommonPar(4:9) call chooseLoad(iLoadProg) call pressureLoad(pm,Time,DelT,LoadPar) call getSliceAllocate(Xel,XLL,1,NodELT,1 ,NdimE, Ndim) VecU = Xel(NdimE + 1 : 2*NdimE) - Xel(1:NdimE) lengthLine = dsqrt(dot_product(vecU,vecU)) normal0(1) = vecU(2)/lengthLine normal0(2) = -vecU(1)/lengthLine write(0,*) "============================= > pm = " , pm Traction = -pm*normal0 AE = 0.0d0 BE = 0.0d0 BE(iShiftDeltaU + 1 ) = 0.5d0*lengthLine*Traction(1) BE(iShiftDeltaU + 2 ) = 0.5d0*lengthLine*Traction(2) BE(iDofT + iShiftDeltaU + 1 ) = 0.5d0*lengthLine*Traction(1) BE(iDofT + iShiftDeltaU + 2 ) = 0.5d0*lengthLine*Traction(2) end subroutine !%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Subroutine NeumannRefTractionS(Coupling,CommonPar,iDofT,Ndim,MaxLRows,iAdd) !%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% use funcAux use ptsGaussLib IMPLICIT NONE Integer :: MaxLRows,Ndim,iDofT, iAdd Integer Coupling(MaxLRows,MaxLRows) Real*8 CommonPar(*) Integer :: iShiftDeltaU iShiftDeltaU=nint(CommonPar(2)) Coupling(iShiftDeltaU + 1, iShiftDeltaU + 1 ) = 1 Coupling(iShiftDeltaU + 2, iShiftDeltaU + 2 ) = 1 Coupling(iDofT + iShiftDeltaU + 1, iDofT + iShiftDeltaU + 1 ) = 1 Coupling(iDofT + iShiftDeltaU + 2, iDofT + iShiftDeltaU + 2 ) = 1 end Subroutine
{"hexsha": "679417526e44f53315fec47f620654599d02c701", "size": 2816, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/FiniteStrain/NeumannRefTraction.f90", "max_stars_repo_name": "felipefr/gpmaterials", "max_stars_repo_head_hexsha": "db9c4b2c348a85c1af01d8d3256a243fb7b59a21", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-08-24T22:15:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-23T03:30:22.000Z", "max_issues_repo_path": "src/FiniteStrain/NeumannRefTraction.f90", "max_issues_repo_name": "felipefr/gpmaterials", "max_issues_repo_head_hexsha": "db9c4b2c348a85c1af01d8d3256a243fb7b59a21", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/FiniteStrain/NeumannRefTraction.f90", "max_forks_repo_name": "felipefr/gpmaterials", "max_forks_repo_head_hexsha": "db9c4b2c348a85c1af01d8d3256a243fb7b59a21", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-18T02:23:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T02:23:44.000Z", "avg_line_length": 30.9450549451, "max_line_length": 101, "alphanum_fraction": 0.5948153409, "num_tokens": 969}
subroutine fermiBreakUpInterface (gsmObj, residINC, gsmRxn) ! ====================================================================== ! ! Fermi break-up calculation of nuclei with A<13 in Preco and Evap ! ! Called from PRECOF ! ! Written by K.K. Gudima, 06/23/06 ! Modified by SGM, 07/09/06 ! Edited by AJS, LANL T-2, December, 2011. ! Edited by LMK, XCP-3, July 2013 (included error protection) ! ! ====================================================================== use, intrinsic :: iso_fortran_env, only: int32, real64, int64 use gsm_params, only: zro, one, thousand, twpi use fermiBreakupClass, only : & & fermiBreakUpResults, newFermiBreakUpResults, & & fermiBreakUpProgeny, fermiBreakUpNucleus implicit none class(GSM), intent(inout) :: gsmObj class(GSMResidual), intent(in ) :: residINC class(GSMReaction), intent(inout) :: gsmRxn integer(int32) :: iafr, ifr, izfr, jfr real(real64) :: cosPhi, cosTheta, phi, totLinMome, & & sinPhi, sinTheta, theta, temp ! Results object: type(fermiBreakUpProgeny), dimension( nint(residINC%numBaryons) ) :: progenyBnk type(fermiBreakUpResults) :: fbuResults ! Nucleus type: type(fermiBreakUpNucleus) :: residual ! For photon emission: type(GSMResidual) :: photoResidual type(GSMResults), pointer :: results => NULL() ! ====================================================================== ! Set up variables and nucleus: residual%numBaryons = residINC%numBaryons residual%numProtons = residINC%numProtons residual%kinEnergy = residINC%kinEnergy/thousand residual%linearXMom = residINC%linearMom(1) residual%linearYMom = residINC%linearMom(2) residual%linearZMom = residINC%linearMom(3) gsmRxn%outData%ifermi = gsmRxn%outData%ifermi + 1 ! Create a object for photon emission, if used: if ( gsmObj%usePhotonEmission ) then photoResidual%numBaryons = residINC%numBaryons photoResidual%numProtons = residINC%numProtons photoResidual%kinEnergy = residINC%kinEnergy photoResidual%linearMom(1) = residINC%linearMom(1) photoResidual%linearMom(2) = residINC%linearMom(2) photoResidual%linearMom(3) = residINC%linearMom(3) end if ! Construct the results object: fbuResults = newFermiBreakUpResults( progenyBnk ) ! Perform Fermi Break-Up simulation call gsmObj%genModels%fbu%execute (residual, fbuResults) ! Now interface results: results => gsmRxn%results ! Set evaporation/fission flags results%info%fusion = zro results%info%wf = zro ! If fragments created, store data into secondary arrays of program progenyLoop: do ifr = 1,fbuResults%numProgeny ! Check for available particle storage: if ( results%numProgeny > results%maxProgenyM1 ) then write(gsmObj%io%message, 1000) fbuResults%numProgeny - ifr + 1 call gsmObj%io%print(3, 3, gsmObj%io%message) exit progenyLoop end if ! Obtain interim results: totLinMome = sqrt( progenyBnk(ifr)%linearXMom**2 + progenyBnk(ifr)%linearYMom**2 + & & progenyBnk(ifr)%linearZMom**2 ) cosTheta = progenyBnk(ifr)%linearZMom/totLinMome if ( abs(cosTheta) > one ) cosTheta = sign(one, cosTheta) ! Ensure cos(theta) in limits of [-1, 1] sinTheta = sqrt(one - cosTheta**2) if (sinTheta > zro) then ! Particle has forward movement (get phi component of direction) temp = totLinMome*sinTheta cosPhi = progenyBnk(ifr)%linearXMom/(temp) ! cos(phi) = px/[sin(theta) ptot] sinPhi = progenyBnk(ifr)%linearYMom/(temp) ! sin(phi) = py/[sin(theta) ptot] else ! Scatter is at 90 degrees (in theta) to incident beam cosPhi = one sinPhi = zro endif ! Obtain (phi), ensure within range [0, 2pi] phi = atan2 (sinPhi, cosPhi) if (phi < zro) phi = twpi + phi theta = atan2 (sinTheta, cosTheta) ! Obtain GSM fragment label: iafr = nint(progenyBnk(ifr)%numBaryons) izfr = nint(progenyBnk(ifr)%numProtons) jfr = zro if (progenyBnk(ifr)%numBaryons <= 4.1d0 .and. progenyBnk(ifr)%numProtons <= 2.1d0) then if (iafr == 1 .and. izfr == 0) jfr = 1 ! n if (iafr == 1 .and. izfr == 1) jfr = 2 ! p if (iafr == 2 .and. izfr == 1) jfr = 3 ! d if (iafr == 3 .and. izfr == 1) jfr = 4 ! t if (iafr == 3 .and. izfr == 2) jfr = 5 ! He-3 if (iafr == 4 .and. izfr == 2) jfr = 6 ! He-4 else jfr = thousand*izfr + (iafr - izfr) ! Type for all others endif if (jfr == 0) jfr = thousand*izfr + (iafr - izfr) ! Store progeny in particle bank: results%numProgeny = results%numProgeny + 1 results%progenyBnk(results%numProgeny)%numBaryons = progenyBnk(ifr)%numBaryons results%progenyBnk(results%numProgeny)%numProtons = progenyBnk(ifr)%numProtons results%progenyBnk(results%numProgeny)%kinEnergy = progenyBnk(ifr)%kinEnergy / thousand results%progenyBnk(results%numProgeny)%restMass = progenyBnk(ifr)%restMass/thousand results%progenyBnk(results%numProgeny)%phi = phi results%progenyBnk(results%numProgeny)%theta = theta results%progenyBnk(results%numProgeny)%sinTheta = sinTheta results%progenyBnk(results%numProgeny)%cosTheta = cosTheta results%progenyBnk(results%numProgeny)%typeID = jfr results%progenyBnk(results%numProgeny)%prodMech = 1500 ! Simulate photon emission: if ( gsmObj%usePhotonEmission ) then call gammaCascade(results%progenyBnk(results%numProgeny), photoResidual) end if end do progenyLoop return ! ====================================================================== 1000 format("The GSM progeny array was exceeded. Cannot tally last ", i3, & & " fragments.") ! ====================================================================== end subroutine fermiBreakUpInterface
{"hexsha": "1f4e6367fd8f0fa531fc0c6e34c895b8ec0c212a", "size": 6113, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/GeneralizedSpallation/fermiBreakUpInterface.f90", "max_stars_repo_name": "lanl/generalized-spallation-model", "max_stars_repo_head_hexsha": "4a2f01a873d2e8f2304b8fd1474d43d1ce8d744d", "max_stars_repo_licenses": ["Intel", "Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-24T18:05:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-24T18:05:48.000Z", "max_issues_repo_path": "src/GeneralizedSpallation/fermiBreakUpInterface.f90", "max_issues_repo_name": "lanl/generalized-spallation-model", "max_issues_repo_head_hexsha": "4a2f01a873d2e8f2304b8fd1474d43d1ce8d744d", "max_issues_repo_licenses": ["Intel", "Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/GeneralizedSpallation/fermiBreakUpInterface.f90", "max_forks_repo_name": "lanl/generalized-spallation-model", "max_forks_repo_head_hexsha": "4a2f01a873d2e8f2304b8fd1474d43d1ce8d744d", "max_forks_repo_licenses": ["Intel", "Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0268456376, "max_line_length": 105, "alphanum_fraction": 0.6134467528, "num_tokens": 1907}
# -*- coding: utf-8 -*- """ @author: ZhiyuanLi """ import numpy as np import tensorflow as tf import pandas as pd import collections from tensorflow.keras import Sequential, layers, optimizers from xgboost import XGBClassifier from sklearn.model_selection import LeaveOneOut from sklearn.metrics import roc_auc_score from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from statistics import mean, stdev from sklearn.cluster import SpectralClustering # load data df = pd.read_csv("...file...") # create an adjacent matrix def adjacent_matrix(d): # define a zero matrix mat = np.zeros((338,338)) m,n = mat.shape # unique attributes a1 = list(d['x1'].unique()) a2 = list(d['x2'].unique()) a2 = [x for x in a2 if x == x] # attributes for i in a1: # get index pos = d.index[df['x1'] == i].tolist() #print(pos) for x in pos: for y in pos: mat[x][y] = 1 for i in a2: # get index pos = d.index[df['x2'] == i].tolist() #print(pos) for x in pos: for y in pos: mat[x][y] = 1 return mat # using synthetic data, suppose data size: 338 x 10 just for simple run # experiment setup only using small size data for convenience np.random.seed(123) input_len = 338 sample_size = 10 X = np.random.rand(sample_size,input_len) # simulate label using binomial distribution y = np.random.binomial(1,0.5,sample_size) # get adjacency mat mat = adjacent_matrix(df) # define base-classifiers def base_classifier(u,v,k): M = {} for i in range(k): M[i] = XGBClassifier( objective='binary:logistic', use_label_encoder=False, verbosity = 0, scale_pos_weight=1, max_depth = u, reg_lambda = v) return M # define a meta-classifier def meta_classifier(k): # build a neural net for probs input neuralNet = Sequential([layers.Dense(k, activation = tf.nn.relu), layers.Dense(1, activation = tf.nn.sigmoid)]) neuralNet.build(input_shape=(None,k)) return neuralNet # define OAP_EL def OAP_EL(X,y,mat,u,v,k,s): # repeat loocv tf.random.set_seed(s) # graph clustering sc = SpectralClustering(k, affinity='precomputed', n_init=100, random_state = 12345) sc.fit(mat) # sort the features based on labels labels_clust = sc.labels_ temp = np.append(X, [labels_clust], axis = 0) X = X[:,temp[temp.shape[0]-1, :].argsort()] # define k xgb models M = base_classifier(u,v,k) # fit k xgb models freq = collections.Counter(labels_clust) f_n = 0 probs_df = np.empty((len(y),1)) for key, i in zip(freq, range(len(M))): v = freq[key] M[i].fit(X[:,f_n:f_n+v],y) # predicted probs probs = M[i].predict_proba(X[:,f_n:f_n+v])[:,1].reshape(-1,1) probs_df = np.append(probs_df, probs, axis = 1) f_n = f_n+v probs_df = np.delete(probs_df, 0, 1) #print(probs_df.shape) # define meta-classifier as neural network net_probs = meta_classifier(k) optimizer_fusion = optimizers.Adam(learning_rate=1e-3) # epoch can be adjusted for epoch in range(1000): with tf.GradientTape() as tape_probs: x_train = tf.cast(probs_df, dtype = tf.float32) y_train = tf.cast(y, dtype = tf.float32) y_train = tf.reshape(y_train, (len(y_train),1)) logits = net_probs(x_train) loss = tf.reduce_mean(tf.losses.binary_crossentropy(y_train, logits, from_logits=False)) # add regularization = [] loss_reg = [] for i,p in enumerate(net_probs.trainable_variables): if i % 2 == 0: loss_reg.append(tf.nn.l2_loss(p)) loss_regularization = tf.reduce_sum(tf.stack(loss_reg)) # l2 norm loss loss = loss + 0.001*loss_regularization grads = tape_probs.gradient(loss, net_probs.trainable_variables) optimizer_fusion.apply_gradients(zip(grads, net_probs.trainable_variables)) #if epoch % 200 == 0: # print('Epoch: {}, Loss: {}'.format(epoch, loss)) return M, net_probs, labels_clust # define main model def train(s, X, y, mat): # define predicted label y_test_pred_all = [] # define loocv loo = LeaveOneOut() # repeat loocv tf.random.set_seed(s) # define tuning paramaters for xgb # now is for testing, can be adjusted for those paramaters U = [2,4] W = [0.001] # (n-1) validation via loocv for train_index, test_index in loo.split(X,y): X_train, X_test = X[train_index,:], X[test_index,:] y_train, y_test = y[train_index], y[test_index] # skip normalization/preprocessing # define a dictionary for saving paramaters params = {} # tune for k in range(2,4): for u in U: for w in W: y_vali_pred_all = [] for tune_index, vali_index in loo.split(X_train, y_train): X_tune, X_vali = X_train[tune_index,:], X_train[vali_index,:] y_tune, y_vali = y_train[tune_index], y_train[vali_index] # train OAP-EL base, meta, labels_clust = OAP_EL(X_tune,y_tune,mat,u,w,k,s) # sort the features based on labels temp = np.append(X_vali, [labels_clust], axis = 0) X_vali = X_vali[:,temp[temp.shape[0]-1, :].argsort()] freq = collections.Counter(labels_clust) f_n = 0 prob_vali_pred = np.empty((len(y_vali),1)) for key, i in zip(freq, range(len(base))): v = freq[key] # predicted probs probs = base[i].predict_proba(X_vali[:,f_n:f_n+v])[:,1].reshape(-1,1) #print(probs) prob_vali_pred = np.append(prob_vali_pred, probs, axis = 1) f_n = f_n+v prob_vali_pred = np.delete(prob_vali_pred, 0, 1) #print(k, u, w, prob_vali_pred) # get final predicted probs y_vali_pred = meta(prob_vali_pred).numpy() y_vali_pred[y_vali_pred>=0.5] = 1 y_vali_pred[y_vali_pred<0.5] = 0 y_vali_pred_all.append(y_vali_pred[0][0]) #print(y_vali_pred_all) # since we use sythetic data, so roc_auc may not exist try: auc_vali = roc_auc_score(y_vali_pred_all, y_train) except ValueError: auc_vali = 0 # use 0 instead params[auc_vali] = [k,u,w] #print(params) # pick the optimal params with highest auc optimal_params = max(params, key=params.get) k_hat = params[optimal_params][0] u_hat = params[optimal_params][1] w_hat = params[optimal_params][2] # fit OAP-EL based on best pamraters base, meta, labels_clust = OAP_EL(X_train,y_train,mat,u_hat,w_hat,k_hat,s) # sort the features based on labels temp = np.append(X_test, [labels_clust], axis = 0) X_test = X_test[:,temp[temp.shape[0]-1, :].argsort()] freq = collections.Counter(labels_clust) f_n = 0 prob_test_pred = np.empty((len(y_test),1)) for key, i in zip(freq, range(len(base))): v = freq[key] # predicted probs probs = base[i].predict_proba(X_test[:,f_n:f_n+v])[:,1].reshape(-1,1) #print(probs) prob_test_pred = np.append(prob_test_pred, probs, axis = 1) f_n = f_n+v prob_test_pred = np.delete(prob_test_pred, 0, 1) # get final predicted probs y_test_pred = meta(prob_test_pred).numpy() y_test_pred[y_test_pred>=0.5] = 1 y_test_pred[y_test_pred<0.5] = 0 y_test_pred_all.append(y_test_pred[0][0]) return y_test_pred_all # compute prediction metrics with m replicates def pred_metrics(m, y): acc = [] recall = [] speci = [] auc = [] # random seed seed = np.random.randint(10000, size=m) for s in seed: # get prediction y_pred = train(s, X, y, mat) # get confusion matrix cm = confusion_matrix(y_pred, y) # compute metrics acc.append(round(accuracy_score(y, y_pred), 3)) recall.append(round(cm[1,1]/(cm[0,1]+cm[1,1]), 3)) speci.append(round(cm[0,0]/(cm[0,0]+cm[1,0]), 3)) auc.append(round(roc_auc_score(y, y_pred), 3)) print('Mean(SD) accuracy: {}({})'.format(mean(acc), stdev(acc))) print('Mean(SD) recall: {}({})'.format(mean(recall), stdev(recall))) print('Mean(SD) specificity: {}({})'.format(mean(speci), stdev(speci))) print('Mean(SD) AUC: {}({})'.format(mean(auc), stdev(auc))) # evaluation pred_metrics(2,y) # suppose only 2 runs
{"hexsha": "813337f93b8491e3415ab80b23268e004987e168", "size": 10232, "ext": "py", "lang": "Python", "max_stars_repo_path": "OAP_EL_Early_Prediction/OAP_EL.py", "max_stars_repo_name": "jiaolang771/aicad", "max_stars_repo_head_hexsha": "f683fb7c3adeecebff3d36b660fe180a8f8b8c1d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "OAP_EL_Early_Prediction/OAP_EL.py", "max_issues_repo_name": "jiaolang771/aicad", "max_issues_repo_head_hexsha": "f683fb7c3adeecebff3d36b660fe180a8f8b8c1d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OAP_EL_Early_Prediction/OAP_EL.py", "max_forks_repo_name": "jiaolang771/aicad", "max_forks_repo_head_hexsha": "f683fb7c3adeecebff3d36b660fe180a8f8b8c1d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4511784512, "max_line_length": 103, "alphanum_fraction": 0.5175918686, "include": true, "reason": "import numpy", "num_tokens": 2469}
#bhuvan's submission for eyantra hactober fest (image processing) import numpy as np import cv2 cap = cv2.VideoCapture(0)#opens the camera # Capture frame-by-frame def greenCircleDetect():# to detect and draw green contour around green circle a=0;b=255;c=0;#describes color of contour lower_green = np.array([35,30,50])#HSV range for green color upper_green = np.array([75,255,255]) mask = cv2.inRange(hsv, lower_green, upper_green)#creating the mask res = cv2.bitwise_and(frame,frame, mask= mask) img = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)#BGR to Gray conversion img = cv2.medianBlur(img,5) cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)#again gray to BGR conversion try:# to deal with the case when there are no circles so nonetype object is returned circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20, param1=50,param2=30,minRadius=0,maxRadius=0)#finding circles using hough transform circles = np.uint16(np.around(circles)) print("green circle detected") for i in circles[0,:]: # draw the outer circle cv2.circle(cimg,(i[0],i[1]),i[2],(a,b,c),2) cv2.circle(ori,(i[0],i[1]),i[2],(a,b,c),2) # draw the center of the circle cv2.circle(cimg,(i[0],i[1]),2,(a,b,c),3) cv2.circle(ori,(i[0],i[1]),i[2],(a,b,c),2) except: print("no green circle") def redCircleDetect():#similar to above commented function to detect green circles a=0;b=0;c=255; lower_red = np.array([0,70,50]) upper_red = np.array([10,255,255]) mask = cv2.inRange(hsv, lower_red, upper_red) res = cv2.bitwise_and(frame,frame, mask= mask) img = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY) img = cv2.medianBlur(img,5) try: circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20, param1=50,param2=30,minRadius=0,maxRadius=0) circles = np.uint16(np.around(circles)) print("red circle detected") for i in circles[0,:]: # draw the outer circle cv2.circle(cimg,(i[0],i[1]),i[2],(a,b,c),2) cv2.circle(ori,(i[0],i[1]),i[2],(a,b,c),2) # draw the center of the circle cv2.circle(cimg,(i[0],i[1]),2,(a,b,c),3) cv2.circle(ori,(i[0],i[1]),i[2],(a,b,c),2) except: print("no red circle") def blueCircleDetect():#similar to above commented function to detect blue circles a=255;b=0;c=0; lower_blue = np.array([90,50,50]) upper_blue = np.array([130,255,255]) mask = cv2.inRange(hsv, lower_blue, upper_blue) res = cv2.bitwise_and(frame,frame, mask= mask) img = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY) img = cv2.medianBlur(img,5) cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR) try: circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20, param1=50,param2=30,minRadius=0,maxRadius=0) circles = np.uint16(np.around(circles)) print("blue circle detected") for i in circles[0,:]: # draw the outer circle cv2.circle(cimg,(i[0],i[1]),i[2],(a,b,c),2) cv2.circle(ori,(i[0],i[1]),i[2],(a,b,c),2) # draw the center of the circle cv2.circle(cimg,(i[0],i[1]),2,(a,b,c),3) cv2.circle(ori,(i[0],i[1]),i[2],(a,b,c),2) except: print("no blue circle") while True: ret, frame = cap.read() ori=frame#a copy of original image on which contours are drawn hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) greenCircleDetect() redCircleDetect() blueCircleDetect() cv2.imshow('frame', ori) cv2.waitKey(3) cap.release() cv2.destroyAllWindows()
{"hexsha": "2764469778aaced84bcaf0f3ef2ee607bbe34910", "size": 3760, "ext": "py", "lang": "Python", "max_stars_repo_path": "color_detect.py", "max_stars_repo_name": "bhuvanjhamb/image-processing", "max_stars_repo_head_hexsha": "b89ee42db793ba90e2eb40cc03f6262f56c0574b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "color_detect.py", "max_issues_repo_name": "bhuvanjhamb/image-processing", "max_issues_repo_head_hexsha": "b89ee42db793ba90e2eb40cc03f6262f56c0574b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "color_detect.py", "max_forks_repo_name": "bhuvanjhamb/image-processing", "max_forks_repo_head_hexsha": "b89ee42db793ba90e2eb40cc03f6262f56c0574b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0, "max_line_length": 110, "alphanum_fraction": 0.6058510638, "include": true, "reason": "import numpy", "num_tokens": 1136}
module pcre_constants use, intrinsic :: iso_c_binding, only : c_int implicit none ! Extension integer(c_int), parameter :: PCRE_SUCCESS = 0 !! The code ran successfully integer(c_int), parameter :: PCRE_ERROR_NOMATCH = -1 !! The subject string did not match the pattern. integer(c_int), parameter :: PCRE_ERROR_NULL = -2 !! Either code or subject was passed as NULL, or ovector was NULL and ovecsize was not zero. integer(c_int), parameter :: PCRE_ERROR_BADOPTION = -3 !! An unrecognized bit was set in the options argument. integer(c_int), parameter :: PCRE_ERROR_BADMAGIC = -4 !! PCRE stores a 4-byte "magic number" at the start of the compiled code, to catch the case when it is passed a junk pointer and to detect when a pattern that was compiled in an environment of one endianness is run in an environment with the other endianness. This is the error that PCRE gives when the magic number is not present. integer(c_int), parameter :: PCRE_ERROR_UNKNOWN_OPCODE = -5 !! While running the pattern match, an unknown item was encountered in the compiled pattern. This error could be caused by a bug in PCRE or by overwriting of the compiled pattern. integer(c_int), parameter :: PCRE_ERROR_NOMEMORY = -6 !! If a pattern contains back references, but the ovector that is passed to pcre_exec() is not big enough to remember the referenced substrings, PCRE gets a block of memory at the start of matching to use for this purpose. If the call via pcre_malloc() fails, this error is given. The memory is automatically freed at the end of matching. !! This error is also given if pcre_stack_malloc() fails in pcre_exec(). This can happen only when PCRE has been compiled with --disable-stack-for-recursion. integer(c_int), parameter :: PCRE_ERROR_NOSUBSTRING = -7 !! This error is used by the pcre_copy_substring(), pcre_get_substring(), and pcre_get_substring_list() functions (see below). It is never returned by pcre_exec(). integer(c_int), parameter :: PCRE_ERROR_MATCHLIMIT = -8 !! The backtracking limit, as specified by the match_limit field in a pcre_extra structure (or defaulted) was reached. See the description above. integer(c_int), parameter :: PCRE_ERROR_CALLOUT = -9 !! This error is never generated by pcre_exec() itself. It is provided for use by callout functions that want to yield a distinctive error code. See the pcrecallout documentation for details. integer(c_int), parameter :: PCRE_ERROR_BADUTF8 = -10 !! A string that contains an invalid UTF-8 byte sequence was passed as a subject, and the PCRE_NO_UTF8_CHECK option was not set. If the size of the output vector (ovecsize) is at least 2, the byte offset to the start of the the invalid UTF-8 character is placed in the first element, and a reason code is placed in the second element. The reason codes are listed in the following section. For backward compatibility, if PCRE_PARTIAL_HARD is set and the problem is a truncated UTF-8 character at the end of the subject (reason codes 1 to 5), PCRE_ERROR_SHORTUTF8 is returned instead of PCRE_ERROR_BADUTF8. integer(c_int), parameter :: PCRE_ERROR_BADUTF8_OFFSET = -11 !! The UTF-8 byte sequence that was passed as a subject was checked and found to be valid (the PCRE_NO_UTF8_CHECK option was not set), but the value of startoffset did not point to the beginning of a UTF-8 character or the end of the subject. integer(c_int), parameter :: PCRE_ERROR_PARTIAL = -12 !! The subject string did not match, but it did match partially. See the pcrepartial documentation for details of partial matching. integer(c_int), parameter :: PCRE_ERROR_BADPARTIAL = -13 !! This code is no longer in use. It was formerly returned when the PCRE_PARTIAL option was used with a compiled pattern containing items that were not supported for partial matching. From release 8.00 onwards, there are no restrictions on partial matching. integer(c_int), parameter :: PCRE_ERROR_INTERNAL = -14 !! An unexpected internal error has occurred. This error could be caused by a bug in PCRE or by overwriting of the compiled pattern. integer(c_int), parameter :: PCRE_ERROR_BADCOUNT = -15 !! This error is given if the value of the ovecsize argument is negative. integer(c_int), parameter :: PCRE_ERROR_RECURSIONLIMIT = -21 !! The internal recursion limit, as specified by the match_limit_recursion field in a pcre_extra structure (or defaulted) was reached. See the description above. integer(c_int), parameter :: PCRE_ERROR_BADNEWLINE = -23 !! An invalid combination of PCRE_NEWLINE_xxx options was given. integer(c_int), parameter :: PCRE_ERROR_BADOFFSET = -24 !! The value of startoffset was negative or greater than the length of the subject, that is, the value in length. integer(c_int), parameter :: PCRE_ERROR_SHORTUTF8 = -25 !! This error is returned instead of PCRE_ERROR_BADUTF8 when the subject string ends with a truncated UTF-8 character and the PCRE_PARTIAL_HARD option is set. Information about the failure is returned as for PCRE_ERROR_BADUTF8. It is in fact sufficient to detect this case, but this special error code for PCRE_PARTIAL_HARD precedes the implementation of returned information; it is retained for backwards compatibility. integer(c_int), parameter :: PCRE_ERROR_RECURSELOOP = -26 !! This error is returned when pcre_exec() detects a recursion loop within the pattern. Specifically, it means that either the whole pattern or a subpattern has been called recursively for the second time at the same position in the subject string. Some simple patterns that might do this are detected and faulted at compile time, but more complicated cases, in particular mutual recursions between two different subpatterns, cannot be detected until run time. integer(c_int), parameter :: PCRE_ERROR_JIT_STACKLIMIT = -27 !! This error is returned when a pattern that was successfully studied using a JIT compile option is being matched, but the memory available for the just-in-time processing stack is not large enough. See the pcrejit documentation for more details. integer(c_int), parameter :: PCRE_ERROR_BADMODE = -28 !! This error is given if a pattern that was compiled by the 8-bit library is passed to a 16-bit or 32-bit library function, or vice versa. integer(c_int), parameter :: PCRE_ERROR_BADENDIANNESS = -29 !! This error is given if a pattern that was compiled and saved is reloaded on a host with different endianness. The utility function pcre_pattern_to_host_byte_order() can be used to convert such a pattern so that it runs on the new host. integer(c_int), parameter :: PCRE_ERROR_JIT_BADOPTION = -31 !! This error is returned when a pattern that was successfully studied using a JIT compile option is being matched, but the matching mode (partial or complete match) does not correspond to any JIT compilation mode. When the JIT fast path function is used, this error may be also given for invalid options. See the pcrejit documentation for more details. integer(c_int), parameter :: PCRE_ERROR_BADLENGTH = -32 !! This error is given if pcre_exec() is called with a negative value for the length argument. integer(c_int), parameter :: PCRE_ERROR_UNSET = -33 !! The requested field by pcre_fullinfo() is not set ! Request types for pcre_fullinfo integer(c_int), parameter :: PCRE_INFO_OPTIONS = 0 integer(c_int), parameter :: PCRE_INFO_SIZE = 1 integer(c_int), parameter :: PCRE_INFO_CAPTURECOUNT = 2 integer(c_int), parameter :: PCRE_INFO_BACKREFMAX = 3 integer(c_int), parameter :: PCRE_INFO_FIRSTBYTE = 4 integer(c_int), parameter :: PCRE_INFO_FIRSTTABLE = 5 integer(c_int), parameter :: PCRE_INFO_LASTLITERAL = 6 integer(c_int), parameter :: PCRE_INFO_NAMEENTRYSIZE = 7 integer(c_int), parameter :: PCRE_INFO_NAMECOUNT = 8 integer(c_int), parameter :: PCRE_INFO_NAMETABLE = 9 integer(c_int), parameter :: PCRE_INFO_STUDYSIZE = 10 integer(c_int), parameter :: PCRE_INFO_DEFAULT_TABLES = 11 integer(c_int), parameter :: PCRE_INFO_OKPARTIAL = 12 integer(c_int), parameter :: PCRE_INFO_JCHANGED = 13 integer(c_int), parameter :: PCRE_INFO_HASCRORLF = 14 integer(c_int), parameter :: PCRE_INFO_MINLENGTH = 15 integer(c_int), parameter :: PCRE_INFO_JIT = 16 integer(c_int), parameter :: PCRE_INFO_JITSIZE = 17 integer(c_int), parameter :: PCRE_INFO_MAXLOOKBEHIND = 18 integer(c_int), parameter :: PCRE_INFO_FIRSTCHARACTER = 19 integer(c_int), parameter :: PCRE_INFO_FIRSTCHARACTERFLAGS = 20 integer(c_int), parameter :: PCRE_INFO_REQUIREDCHAR = 21 integer(c_int), parameter :: PCRE_INFO_REQUIREDCHARFLAGS = 22 integer(c_int), parameter :: PCRE_INFO_MATCHLIMIT = 23 integer(c_int), parameter :: PCRE_INFO_RECURSIONLIMIT = 24 integer(c_int), parameter :: PCRE_INFO_MATCH_EMPTY = 25 ! Public options integer(c_int), parameter :: PCRE_CASELESS = int(z'00000001') integer(c_int), parameter :: PCRE_MULTILINE = int(z'00000002') integer(c_int), parameter :: PCRE_DOTALL = int(z'00000004') integer(c_int), parameter :: PCRE_EXTENDED = int(z'00000008') integer(c_int), parameter :: PCRE_ANCHORED = int(z'00000010') integer(c_int), parameter :: PCRE_DOLLAR_ENDONLY = int(z'00000020') integer(c_int), parameter :: PCRE_EXTRA = int(z'00000040') integer(c_int), parameter :: PCRE_NOTBOL = int(z'00000080') integer(c_int), parameter :: PCRE_NOTEOL = int(z'00000100') integer(c_int), parameter :: PCRE_UNGREEDY = int(z'00000200') integer(c_int), parameter :: PCRE_NOTEMPTY = int(z'00000400') integer(c_int), parameter :: PCRE_UTF8 = int(z'00000800') integer(c_int), parameter :: PCRE_UTF16 = int(z'00000800') integer(c_int), parameter :: PCRE_UTF32 = int(z'00000800') integer(c_int), parameter :: PCRE_NO_AUTO_CAPTURE = int(z'00001000') integer(c_int), parameter :: PCRE_NO_UTF8_CHECK = int(z'00002000') integer(c_int), parameter :: PCRE_NO_UTF16_CHECK = int(z'00002000') integer(c_int), parameter :: PCRE_NO_UTF32_CHECK = int(z'00002000') integer(c_int), parameter :: PCRE_AUTO_CALLOUT = int(z'00004000') integer(c_int), parameter :: PCRE_PARTIAL_SOFT = int(z'00008000') integer(c_int), parameter :: PCRE_PARTIAL = int(z'00008000') integer(c_int), parameter :: PCRE_NEVER_UTF = int(z'00010000') integer(c_int), parameter :: PCRE_DFA_SHORTEST = int(z'00010000') integer(c_int), parameter :: PCRE_NO_AUTO_POSSESS = int(z'00020000') integer(c_int), parameter :: PCRE_DFA_RESTART = int(z'00020000') integer(c_int), parameter :: PCRE_FIRSTLINE = int(z'00040000') integer(c_int), parameter :: PCRE_DUPNAMES = int(z'00080000') integer(c_int), parameter :: PCRE_NEWLINE_CR = int(z'00100000') integer(c_int), parameter :: PCRE_NEWLINE_LF = int(z'00200000') integer(c_int), parameter :: PCRE_NEWLINE_CRLF = int(z'00300000') integer(c_int), parameter :: PCRE_NEWLINE_ANY = int(z'00400000') integer(c_int), parameter :: PCRE_NEWLINE_ANYCRLF = int(z'00500000') integer(c_int), parameter :: PCRE_BSR_ANYCRLF = int(z'00800000') integer(c_int), parameter :: PCRE_BSR_UNICODE = int(z'01000000') integer(c_int), parameter :: PCRE_JAVASCRIPT_COMPAT = int(z'02000000') integer(c_int), parameter :: PCRE_NO_START_OPTIMIZE = int(z'04000000') integer(c_int), parameter :: PCRE_NO_START_OPTIMISE = int(z'04000000') integer(c_int), parameter :: PCRE_PARTIAL_HARD = int(z'08000000') integer(c_int), parameter :: PCRE_NOTEMPTY_ATSTART = int(z'10000000') integer(c_int), parameter :: PCRE_UCP = int(z'20000000') integer(c_int), parameter :: PCRE_CONFIG_UTF8 = 0 integer(c_int), parameter :: PCRE_CONFIG_NEWLINE = 1 integer(c_int), parameter :: PCRE_CONFIG_LINK_SIZE = 2 integer(c_int), parameter :: PCRE_CONFIG_POSIX_MALLOC_THRESHOLD = 3 integer(c_int), parameter :: PCRE_CONFIG_MATCH_LIMIT = 4 integer(c_int), parameter :: PCRE_CONFIG_STACKRECURSE = 5 integer(c_int), parameter :: PCRE_CONFIG_UNICODE_PROPERTIES = 6 integer(c_int), parameter :: PCRE_CONFIG_MATCH_LIMIT_RECURSION = 7 integer(c_int), parameter :: PCRE_CONFIG_BSR = 8 integer(c_int), parameter :: PCRE_CONFIG_JIT = 9 integer(c_int), parameter :: PCRE_CONFIG_UTF16 = 10 integer(c_int), parameter :: PCRE_CONFIG_JITTARGET = 11 integer(c_int), parameter :: PCRE_CONFIG_UTF32 = 12 integer(c_int), parameter :: PCRE_CONFIG_PARENS_LIMIT = 13 end module pcre_constants
{"hexsha": "19a7b58e801baac8be354f8b6d198cf3a3bf3c23", "size": 13170, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "lib/pcre_constants.f90", "max_stars_repo_name": "14NGiestas/fregex", "max_stars_repo_head_hexsha": "1e20f084fb33b2b9dda8ca887bafae3c847e08c6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-16T02:38:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-16T19:11:00.000Z", "max_issues_repo_path": "lib/pcre_constants.f90", "max_issues_repo_name": "14NGiestas/fregex", "max_issues_repo_head_hexsha": "1e20f084fb33b2b9dda8ca887bafae3c847e08c6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/pcre_constants.f90", "max_forks_repo_name": "14NGiestas/fregex", "max_forks_repo_head_hexsha": "1e20f084fb33b2b9dda8ca887bafae3c847e08c6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 86.6447368421, "max_line_length": 608, "alphanum_fraction": 0.7125284738, "num_tokens": 3319}
[STATEMENT] lemma h1b_helper_leq: "(\<forall>((a::real), (b::real), (c::real))\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0) \<Longrightarrow> (\<exists>y.\<forall>x<y. (\<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] proof - [PROOF STATE] proof (state) goal (1 subgoal): 1. \<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] show "(\<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0) \<Longrightarrow> (\<exists>y.\<forall>x<y. (\<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] proof (induct leq) [PROOF STATE] proof (state) goal (2 subgoals): 1. \<forall>(a, b, c)\<in>set []. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set []. a * x\<^sup>2 + b * x + c \<le> 0 2. \<And>a leq. \<lbrakk>\<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0; \<forall>(a, b, c)\<in>set (a # leq). \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0\<rbrakk> \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set (a # leq). a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] case Nil [PROOF STATE] proof (state) this: \<forall>a\<in>set []. case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 goal (2 subgoals): 1. \<forall>(a, b, c)\<in>set []. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set []. a * x\<^sup>2 + b * x + c \<le> 0 2. \<And>a leq. \<lbrakk>\<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0; \<forall>(a, b, c)\<in>set (a # leq). \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0\<rbrakk> \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set (a # leq). a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] then [PROOF STATE] proof (chain) picking this: \<forall>a\<in>set []. case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: \<forall>a\<in>set []. case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 goal (1 subgoal): 1. \<exists>y. \<forall>x<y. \<forall>a\<in>set []. case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 [PROOF STEP] by auto [PROOF STATE] proof (state) this: \<exists>y. \<forall>x<y. \<forall>a\<in>set []. case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 goal (1 subgoal): 1. \<And>a leq. \<lbrakk>\<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0; \<forall>(a, b, c)\<in>set (a # leq). \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0\<rbrakk> \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set (a # leq). a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>a leq. \<lbrakk>\<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0; \<forall>(a, b, c)\<in>set (a # leq). \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0\<rbrakk> \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set (a # leq). a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] case (Cons q leq) [PROOF STATE] proof (state) this: \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 goal (1 subgoal): 1. \<And>a leq. \<lbrakk>\<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0; \<forall>(a, b, c)\<in>set (a # leq). \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0\<rbrakk> \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set (a # leq). a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] have ind: " \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 [PROOF STEP] using Cons.prems [PROOF STATE] proof (prove) using this: \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 goal (1 subgoal): 1. \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 [PROOF STEP] by auto [PROOF STATE] proof (state) this: \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 goal (1 subgoal): 1. \<And>a leq. \<lbrakk>\<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0; \<forall>(a, b, c)\<in>set (a # leq). \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0\<rbrakk> \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set (a # leq). a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] then [PROOF STATE] proof (chain) picking this: \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 [PROOF STEP] have "case q of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 " [PROOF STATE] proof (prove) using this: \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 goal (1 subgoal): 1. case q of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 [PROOF STEP] by simp [PROOF STATE] proof (state) this: case q of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 goal (1 subgoal): 1. \<And>a leq. \<lbrakk>\<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0; \<forall>(a, b, c)\<in>set (a # leq). \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0\<rbrakk> \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set (a # leq). a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] then [PROOF STATE] proof (chain) picking this: case q of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 [PROOF STEP] obtain y2 where y2_prop: "case q of (a, ba, c) \<Rightarrow> (\<forall>y<y2. a * y\<^sup>2 + ba * y + c \<le> 0)" [PROOF STATE] proof (prove) using this: case q of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 goal (1 subgoal): 1. (\<And>y2. case q of (a, ba, c) \<Rightarrow> \<forall>y<y2. a * y\<^sup>2 + ba * y + c \<le> 0 \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by auto [PROOF STATE] proof (state) this: case q of (a, ba, c) \<Rightarrow> \<forall>y<y2. a * y\<^sup>2 + ba * y + c \<le> 0 goal (1 subgoal): 1. \<And>a leq. \<lbrakk>\<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0; \<forall>(a, b, c)\<in>set (a # leq). \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0\<rbrakk> \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set (a # leq). a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] have "\<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 [PROOF STEP] using ind [PROOF STATE] proof (prove) using this: \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 goal (1 subgoal): 1. \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 [PROOF STEP] by simp [PROOF STATE] proof (state) this: \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 goal (1 subgoal): 1. \<And>a leq. \<lbrakk>\<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0; \<forall>(a, b, c)\<in>set (a # leq). \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0\<rbrakk> \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set (a # leq). a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] then [PROOF STATE] proof (chain) picking this: \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 [PROOF STEP] have " \<exists>y. \<forall>x<y. \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0" [PROOF STATE] proof (prove) using this: \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 goal (1 subgoal): 1. \<exists>y. \<forall>x<y. \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 [PROOF STEP] using Cons.hyps [PROOF STATE] proof (prove) using this: \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> \<exists>x. \<forall>y<x. a * y\<^sup>2 + ba * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 goal (1 subgoal): 1. \<exists>y. \<forall>x<y. \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 [PROOF STEP] by blast [PROOF STATE] proof (state) this: \<exists>y. \<forall>x<y. \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 goal (1 subgoal): 1. \<And>a leq. \<lbrakk>\<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0; \<forall>(a, b, c)\<in>set (a # leq). \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0\<rbrakk> \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set (a # leq). a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] then [PROOF STATE] proof (chain) picking this: \<exists>y. \<forall>x<y. \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 [PROOF STEP] obtain y1 where y1_prop: "\<forall>x<y1. \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> a * x^2 + ba * x + c \<le> 0" [PROOF STATE] proof (prove) using this: \<exists>y. \<forall>x<y. \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 goal (1 subgoal): 1. (\<And>y1. \<forall>x<y1. \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 \<Longrightarrow> thesis) \<Longrightarrow> thesis [PROOF STEP] by blast [PROOF STATE] proof (state) this: \<forall>x<y1. \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 goal (1 subgoal): 1. \<And>a leq. \<lbrakk>\<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0; \<forall>(a, b, c)\<in>set (a # leq). \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0\<rbrakk> \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set (a # leq). a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] let ?y = "min y1 y2" [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>a leq. \<lbrakk>\<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0; \<forall>(a, b, c)\<in>set (a # leq). \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0\<rbrakk> \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set (a # leq). a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] have "\<forall>x < ?y. (\<forall>a\<in>set (q #leq). case a of (a, ba, c) \<Rightarrow> a * x^2 + ba * x + c \<le> 0)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<forall>x<min y1 y2. \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 [PROOF STEP] using y1_prop y2_prop [PROOF STATE] proof (prove) using this: \<forall>x<y1. \<forall>a\<in>set leq. case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 case q of (a, ba, c) \<Rightarrow> \<forall>y<y2. a * y\<^sup>2 + ba * y + c \<le> 0 goal (1 subgoal): 1. \<forall>x<min y1 y2. \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 [PROOF STEP] by fastforce [PROOF STATE] proof (state) this: \<forall>x<min y1 y2. \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 goal (1 subgoal): 1. \<And>a leq. \<lbrakk>\<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0; \<forall>(a, b, c)\<in>set (a # leq). \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0\<rbrakk> \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set (a # leq). a * x\<^sup>2 + b * x + c \<le> 0 [PROOF STEP] then [PROOF STATE] proof (chain) picking this: \<forall>x<min y1 y2. \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: \<forall>x<min y1 y2. \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 goal (1 subgoal): 1. \<exists>y. \<forall>x<y. \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 [PROOF STEP] by blast [PROOF STATE] proof (state) this: \<exists>y. \<forall>x<y. \<forall>a\<in>set (q # leq). case a of (a, ba, c) \<Rightarrow> a * x\<^sup>2 + ba * x + c \<le> 0 goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: \<forall>(a, b, c)\<in>set leq. \<exists>x. \<forall>y<x. a * y\<^sup>2 + b * y + c \<le> 0 \<Longrightarrow> \<exists>y. \<forall>x<y. \<forall>(a, b, c)\<in>set leq. a * x\<^sup>2 + b * x + c \<le> 0 goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 7738, "file": "Virtual_Substitution_QE", "length": 37}
# -*- coding: utf-8 -*- # Copyright (c) 2012, Sergio Callegari # All rights reserved. # This file is part of PyDSM. # PyDSM is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # PyDSM is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with PyDSM. If not, see <http://www.gnu.org/licenses/>. # This file includes code ported from the DELSIG Matlab toolbox # (see http://www.mathworks.com/matlabcentral/fileexchange/19) # covered by the following copyright and permission notice # # Copyright (c) 2009 Richard Schreier # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the distribution # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ DELSIG helpers routines ======================= """ import numpy as np from math import ceil, sqrt from ._padding import padl, padr from ._rmsGain import rmsGain from ..relab import db __all__ = ["ds_synNTFobj1", "ds_f1f2", "ds_optzeros"] def ds_synNTFobj1(x, p, osr, f0): """ Objective function for synthesizeNTF. """ z = np.exp(2j*np.pi*(f0+0.5/osr*x)) if f0 > 0: z = padl(z, len(p)/2, np.exp(2j*np.pi*f0)) z = np.concatenate((z, z.conj())) if f0 == 0: z = padr(z, len(p), 1) f1, f2 = ds_f1f2(osr, f0) return db(rmsGain((z, p, 1), f1, f2)) def ds_f1f2(osr=64, f0=0, complex_flag=False): """ Lower and higher extremes of the signal band as normalized frequencies Helper function. Parameters ---------- osr : float, optional the oversamping ratio f0 : float, optional normalized center frequency for BP modulators, or 0 for LP modulators. Defaults to 0. complex_flag : bool, optional flag indicating if the modulator is quadrature type. Returns ------- f1f2 : tuple with two entries corresponding to the lower and higher extremes of the signal band. """ if complex_flag: f1 = f0-0.5/osr f2 = f0+0.5/osr else: if f0 > 0.25/osr: f1 = f0-0.25/osr f2 = f0+0.25/osr else: f1 = 0 f2 = 0.5/osr return f1, f2 def ds_optzeros(n, opt=1): """ Zeros which minimize the in-band noise of a delta-sigma modulator Helper function for synthesizeNTF, that returns the zeros as normalized angular frequencies Parameters ---------- n : int the number of optimized zeros to return opt : int, optional flag for optimized zeros, defaults to 1 0 -> not optimized 1 -> optimized 2 -> optimized with at least one zero at band-center Returns ------- zeros : ndarray of reals the zeros for the modulator as normalized angular frequencies. Notes ----- The zeros are always located on the complex unit circle. As such, they are returned as frequencies, not as complex values. The zero's frequencies are normalized with respect to the signal bandwidth. See also Sec. 4.3.1 in [1]_ .. [1] Richard Schreier, Gabor C. Temes, "Understanding Delta-Sigma Data Converters," IEEE Press and Wiley Interscience, 2005. """ if opt == 0: optZeros = np.zeros(ceil(n/2.)) else: if n == 1: optZeros = np.asarray([0.]) elif n == 2: if opt == 1: optZeros = np.asarray([sqrt(1./3)]) else: optZeros = np.asarray([0.]) elif n == 3: optZeros = np.asarray([sqrt(3./5), 0.]) elif n == 4: if opt == 1: discr = sqrt(9./49-3./35) tmp = 3./7 optZeros = np.sqrt([tmp+discr, tmp-discr]) else: optZeros = np.asarray([0., sqrt(5./7)]) elif n == 5: discr = sqrt(25./81-5./21) tmp = 5./9 optZeros = np.sqrt([tmp+discr, tmp-discr, 0.]) elif n == 6: if opt == 1: optZeros = np.asarray([0.23862059, 0.66120988, 0.9324696]) else: discr = sqrt(56.)/33 tmp = 7./11 optZeros = np.sqrt([0, tmp+discr, tmp-discr]) elif n == 7: optZeros = np.asarray([0, 0.40584371, 0.74153078, 0.94910785]) elif n == 8: if opt == 1: optZeros = np.asarray([0.18343709, 0.52553345, 0.79666684, 0.96028993]) else: optZeros = np.asarray([0, 0.50563161, 0.79017286, 0.95914731]) elif n == 9: optZeros = np.asarray([0, 0.32425101, 0.61337056, 0.83603082, 0.9681602]) elif n == 10: if opt == 1: optZeros = np.asarray([0.1834370913, 0.5255334458, 0.7966668433, 0.9602899327]) else: optZeros = np.asarray([0, 0.41572267, 0.67208682, 0.86238894, 0.97342121]) elif n == 11: optZeros = np.asarray([0, 0.26953955, 0.51909468, 0.73015137, 0.88706238, 0.97822864]) elif n == 12: if opt == 1: optZeros = np.asarray([0.12523875, 0.36783403, 0.58731921, 0.7699033, 0.90411753, 0.9815607]) else: optZeros = np.asarray([0, 0.35222363, 0.58006251, 0.76647993, 0.90281326, 0.98132047]) elif n == 13: optZeros = np.asarray([0, 0.23045331, 0.44849063, 0.64234828, 0.8015776, 0.91759824, 0.98418306]) elif n == 14: if opt == 1: optZeros = np.asarray([0.10806212, 0.31911586, 0.51525046, 0.68729392, 0.82720185, 0.92843513, 0.98628389]) else: optZeros = np.asarray([0, 0.30524384, 0.50836649, 0.6836066, 0.82537239, 0.92772336, 0.98615167]) else: raise ValueError('Optimized zeros for n>14 are not available.') # Sort the zeros and replicate them. z = np.sort(optZeros) optZeros = np.zeros(n) m = 0 if (n % 2) == 1: optZeros[0] = z[0] z = z[1:] m = m+1 for i in range(len(z)): optZeros[m] = z[i] optZeros[m+1] = -z[i] m = m+2 return optZeros
{"hexsha": "ac61c7830fa29d88a43d364c6a71ed2cb955b9db", "size": 8068, "ext": "py", "lang": "Python", "max_stars_repo_path": "pydsm/delsig/_ds.py", "max_stars_repo_name": "EnjoyLifeFund/macHighSierra-py36-pkgs", "max_stars_repo_head_hexsha": "5668b5785296b314ea1321057420bcd077dba9ea", "max_stars_repo_licenses": ["BSD-3-Clause", "BSD-2-Clause", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pydsm/delsig/_ds.py", "max_issues_repo_name": "EnjoyLifeFund/macHighSierra-py36-pkgs", "max_issues_repo_head_hexsha": "5668b5785296b314ea1321057420bcd077dba9ea", "max_issues_repo_licenses": ["BSD-3-Clause", "BSD-2-Clause", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pydsm/delsig/_ds.py", "max_forks_repo_name": "EnjoyLifeFund/macHighSierra-py36-pkgs", "max_forks_repo_head_hexsha": "5668b5785296b314ea1321057420bcd077dba9ea", "max_forks_repo_licenses": ["BSD-3-Clause", "BSD-2-Clause", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.4786324786, "max_line_length": 78, "alphanum_fraction": 0.5743678731, "include": true, "reason": "import numpy", "num_tokens": 2265}
import contextlib from datetime import datetime, timezone import getpass import io import json import pathlib import uuid import pickle import hashlib import subprocess from os.path import join, exists import numpy as np import sqlalchemy as sqla from sqlalchemy.ext.declarative import declarative_base as sqla_declarative_base from sqlalchemy_utils import database_exists, create_database from . import s3_utils sqlalchemy_base = sqla_declarative_base() DB_DUMP_URL = 'https://vasa.millennium.berkeley.edu:9000/robustness-eval/robustness_evaluation.db' def download_db(): if not exists(join(s3_utils.default_cache_root_path, 'robustness_evaluation.db')): print('downloading database dump...') subprocess.run(['wget', '-P', s3_utils.default_cache_root_path, DB_DUMP_URL, '--no-check-certificate'], check=True) def gen_short_uuid(num_chars=None): num = uuid.uuid4().int alphabet = '23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' res = [] while num > 0: num, digit = divmod(num, len(alphabet)) res.append(alphabet[digit]) res2 = ''.join(reversed(res)) if num_chars is None: return res2 else: return res2[:num_chars] def get_logdir_key(model_id): return 'logdir/{}'.format(model_id) def get_checkpoint_data_key(checkpoint_id): return 'checkpoints/{}_data.bytes'.format(checkpoint_id) def get_dataset_data_key(dataset_id): return 'datasets/{}_data.bytes'.format(dataset_id) def get_evaluation_setting_extra_data_key(evaluation_setting_id): return 'evaluation_settings/{}_extra_data.bytes'.format(evaluation_setting_id) def get_evaluation_setting_processed_dataset_key(evaluation_setting_id): return 'evaluation_settings/{}_processed_dataset.bytes'.format(evaluation_setting_id) def get_raw_input_data_key(raw_input_id): return 'raw_inputs/{}_data.bytes'.format(raw_input_id) def get_evaluation_extra_data_key(evaluation_id): return 'evaluations/{}_data.bytes'.format(evaluation_id) def get_evaluation_logits_data_key(evaluation_id): return 'evaluations/{}_logits_data.bytes'.format(evaluation_id) def get_evaluation_chunk_extra_data_key(evaluation_chunk_id): return 'evaluation_chunks/{}_data.bytes'.format(evaluation_chunk_id) def get_evaluation_chunk_logits_data_key(evaluation_chunk_id): return 'evaluation_chunks/{}_logits_data.bytes'.format(evaluation_chunk_id) def get_evaluation_chunk_indices_data_key(evaluation_chunk_id): return 'evaluation_chunks/{}_indices_data.bytes'.format(evaluation_chunk_id) class Model(sqlalchemy_base): __tablename__ = 'models' uuid = sqla.Column(sqla.String, primary_key=True) name = sqla.Column(sqla.String, unique=True) description = sqla.Column(sqla.String) username = sqla.Column(sqla.String) creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now()) extra_info = sqla.Column(sqla.JSON) checkpoints = sqla.orm.relationship('Checkpoint', back_populates='model', cascade='all, delete, delete-orphan', foreign_keys='Checkpoint.model_uuid') final_checkpoint_uuid = sqla.Column(sqla.String, sqla.ForeignKey('checkpoints.uuid'), nullable=True) final_checkpoint = sqla.orm.relationship('Checkpoint', foreign_keys=[final_checkpoint_uuid], uselist=False) completed = sqla.Column(sqla.Boolean) hidden = sqla.Column(sqla.Boolean) logdir_filepaths = sqla.Column(sqla.JSON) def __repr__(self): return f'<Model(uuid="{self.uuid}", name="{self.name}")>' def __hash__(self): return hash(hash(self.uuid) + hash(self.name)) def __eq__(self, other): return self.__hash__() == hash(other) class Checkpoint(sqlalchemy_base): __tablename__ = 'checkpoints' uuid = sqla.Column(sqla.String, primary_key=True) name = sqla.Column(sqla.String, unique=True) creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now()) model_uuid = sqla.Column(sqla.String, sqla.ForeignKey('models.uuid'), nullable=False) model = sqla.orm.relationship('Model', back_populates='checkpoints', foreign_keys=[model_uuid]) evaluations = sqla.orm.relationship('Evaluation', back_populates='checkpoint', cascade='all, delete, delete-orphan', foreign_keys='Evaluation.checkpoint_uuid') training_step = sqla.Column(sqla.BigInteger) epoch = sqla.Column(sqla.Float) username = sqla.Column(sqla.String) extra_info = sqla.Column(sqla.JSON) hidden = sqla.Column(sqla.Boolean) def __repr__(self): return f'<Checkpoint(uuid="{self.uuid}", model_uuid="{self.model_uuid}")>' def __hash__(self): return hash(hash(self.uuid) + hash(self.name)) def __eq__(self, other): return self.__hash__() == hash(other) class Dataset(sqlalchemy_base): __tablename__ = 'datasets' uuid = sqla.Column(sqla.String, primary_key=True) name = sqla.Column(sqla.String, unique=True, nullable=False) description = sqla.Column(sqla.String) username = sqla.Column(sqla.String) creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now()) size = sqla.Column(sqla.Integer) # Number of datapoints in the dataset extra_info = sqla.Column(sqla.JSON) evaluation_settings = sqla.orm.relationship('EvaluationSetting', back_populates='dataset', cascade='all, delete, delete-orphan', foreign_keys='EvaluationSetting.dataset_uuid') hidden = sqla.Column(sqla.Boolean) def __repr__(self): return f'<Dataset(uuid="{self.uuid}", name="{self.name}")>' def __hash__(self): return hash(hash(self.uuid) + hash(self.name)) def __eq__(self, other): return self.__hash__() == hash(other) class EvaluationSetting(sqlalchemy_base): __tablename__ = 'evaluation_settings' uuid = sqla.Column(sqla.String, primary_key=True) name = sqla.Column(sqla.String, unique=True, nullable=False) description = sqla.Column(sqla.String) username = sqla.Column(sqla.String) creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now()) dataset_uuid = sqla.Column(sqla.String, sqla.ForeignKey('datasets.uuid'), nullable=False) dataset = sqla.orm.relationship('Dataset', back_populates='evaluation_settings', foreign_keys=[dataset_uuid]) evaluations = sqla.orm.relationship('Evaluation', back_populates='setting', cascade='all, delete, delete-orphan', foreign_keys='Evaluation.setting_uuid') raw_inputs = sqla.orm.relationship('RawInput', back_populates='setting', cascade='all, delete, delete-orphan', foreign_keys='RawInput.setting_uuid') extra_info = sqla.Column(sqla.JSON) hidden = sqla.Column(sqla.Boolean) def __repr__(self): return f'<EvaluationSetting(uuid="{self.uuid}", name="{self.name}")>' def __hash__(self): return hash(hash(self.uuid) + hash(self.name)) def __eq__(self, other): return self.__hash__() == hash(other) # For the raw float32 inputs we have for external models class RawInput(sqlalchemy_base): __tablename__ = 'raw_inputs' uuid = sqla.Column(sqla.String, primary_key=True) name = sqla.Column(sqla.String, unique=True) description = sqla.Column(sqla.String) username = sqla.Column(sqla.String) creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now()) setting_uuid = sqla.Column(sqla.String, sqla.ForeignKey('evaluation_settings.uuid'), nullable=False) setting = sqla.orm.relationship('EvaluationSetting', back_populates='raw_inputs', foreign_keys=[setting_uuid]) data_shape = sqla.Column(sqla.JSON) data_format = sqla.Column(sqla.String) # numpy type evaluations = sqla.orm.relationship('Evaluation', back_populates='raw_input', cascade='all, delete, delete-orphan', foreign_keys='Evaluation.raw_input_uuid') extra_info = sqla.Column(sqla.JSON) hidden = sqla.Column(sqla.Boolean) def __repr__(self): return f'<RawInput(uuid="{self.uuid}", name="{self.name}")>' def __hash__(self): return hash(hash(self.uuid) + hash(self.name)) def __eq__(self, other): return self.__hash__() == hash(other) class Evaluation(sqlalchemy_base): __tablename__ = 'evaluations' uuid = sqla.Column(sqla.String, primary_key=True) name = sqla.Column(sqla.String, unique=True) creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now()) checkpoint_uuid = sqla.Column(sqla.String, sqla.ForeignKey('checkpoints.uuid'), nullable=False) checkpoint = sqla.orm.relationship('Checkpoint', back_populates='evaluations', foreign_keys=[checkpoint_uuid]) # TODO: eventually make this nullable=False setting_uuid = sqla.Column(sqla.String, sqla.ForeignKey('evaluation_settings.uuid'), nullable=True) setting = sqla.orm.relationship('EvaluationSetting', back_populates='evaluations', foreign_keys=[setting_uuid]) raw_input_uuid = sqla.Column(sqla.String, sqla.ForeignKey('raw_inputs.uuid'), nullable=True) raw_input = sqla.orm.relationship('RawInput', back_populates='evaluations', foreign_keys=[raw_input_uuid]) chunks = sqla.orm.relationship('EvaluationChunk', back_populates='evaluation', cascade='all, delete, delete-orphan', foreign_keys='EvaluationChunk.evaluation_uuid') username = sqla.Column(sqla.String) extra_info = sqla.Column(sqla.JSON) completed = sqla.Column(sqla.Boolean) hidden = sqla.Column(sqla.Boolean) def __repr__(self): return f'<Evaluation(uuid="{self.uuid}", checkpoint_uuid="{self.checkpoint_uuid}")>' def __hash__(self): return hash(hash(self.uuid) + hash(self.name)) def __eq__(self, other): return self.__hash__() == hash(other) class EvaluationChunk(sqlalchemy_base): __tablename__ = 'evaluation_chunks' uuid = sqla.Column(sqla.String, primary_key=True) creation_time = sqla.Column(sqla.DateTime(timezone=False), server_default=sqla.sql.func.now()) evaluation_uuid = sqla.Column(sqla.String, sqla.ForeignKey('evaluations.uuid'), nullable=False) evaluation = sqla.orm.relationship('Evaluation', back_populates='chunks', foreign_keys=[evaluation_uuid]) username = sqla.Column(sqla.String) extra_info = sqla.Column(sqla.JSON) hidden = sqla.Column(sqla.Boolean) def __repr__(self): return f'<EvaluationChunk(uuid="{self.uuid}", evaluation_uuid="{self.evaluation_uuid}")>' def __hash__(self): return hash(hash(self.uuid) + hash(self.name)) def __eq__(self, other): return self.hash() == hash(other) class ModelRepository: def __init__(self, mode=s3_utils.DB_CONNECTION_MODE, sql_verbose=False, download_database=True): self.sql_verbose = sql_verbose if mode == "sqlite": if download_database: download_db() self.db_connection_string = s3_utils.DB_CONNECTION_STRING_SQLITE self.engine = sqla.create_engine(self.db_connection_string, echo=self.sql_verbose) elif mode == "rds": self.db_connection_string = s3_utils.DB_CONNECTION_STRING_RDS self.engine = sqla.create_engine(self.db_connection_string, echo=self.sql_verbose, pool_pre_ping=True) else: assert False if not database_exists(self.engine.url): create_database(self.engine.url) self.sessionmaker = sqla.orm.sessionmaker(bind=self.engine, expire_on_commit=False) self.cache_root_path = s3_utils.default_cache_root_path self.s3wrapper = s3_utils.DoubleBucketS3Wrapper(bucket_vasa='robustness-eval', bucket_google='imagenet-testbed', cache_root_path=self.cache_root_path, verbose=False) if mode == "sqlite": # if in public mode, prevent any writes to the global bucket (which fail due to permissions error anyways) self.s3wrapper.put = lambda *args, **kwargs: None self.s3wrapper.put_multiple = lambda *args, **kwargs: None self.s3wrapper.upload_file = lambda *args, **kwargs: None self.uuid_length = 10 self.pickle_protocol = 4 def dispose(self): self.engine.dispose() @contextlib.contextmanager def session_scope(self): session = self.sessionmaker() try: yield session session.commit() except: session.rollback() raise finally: session.close() def gen_short_uuid(self): new_id = gen_short_uuid(num_chars=self.uuid_length) # TODO: check that we don't have a collision with the db? return new_id def gen_checkpoint_uuid(self): return gen_short_uuid(num_chars=None) def run_query_with_optional_session(self, query, session=None): if session is None: with self.session_scope() as sess: return query(sess) else: return query(session) def run_get(self, get_fn, session=None, assert_exists=True): def query(sess): result = get_fn(sess) assert len(result) <= 1 if assert_exists: assert len(result) == 1 if len(result) == 0: return None else: return result[0] return self.run_query_with_optional_session(query, session) def get_model(self, *, uuid=None, name=None, session=None, assert_exists=True, load_final_checkpoint=False, load_all_checkpoints=False, load_evaluations=False): if uuid is not None: assert type(uuid) is str if name is not None: assert type(name) is str def get_fn(sess): return self.get_models(uuids=[uuid] if uuid is not None else None, names=[name] if name is not None else None, session=sess, load_final_checkpoint=load_final_checkpoint, load_all_checkpoints=load_all_checkpoints, load_evaluations=load_evaluations, show_hidden=True) return self.run_get(get_fn, session=session, assert_exists=assert_exists) def get_checkpoint(self, uuid=None, *, session=None, assert_exists=True, load_parents=False, load_evaluations=False): if uuid is not None: assert type(uuid) is str def get_fn(sess): return self.get_checkpoints(uuids=[uuid], session=sess, load_parents=load_parents, load_evaluations=load_evaluations, show_hidden=True) return self.run_get(get_fn, session=session, assert_exists=assert_exists) def get_dataset(self, *, uuid=None, name=None, session=None, assert_exists=True, load_evaluation_settings=False): if uuid is not None: assert type(uuid) is str if name is not None: assert type(name) is str def get_fn(sess): return self.get_datasets(uuids=[uuid] if uuid is not None else None, names=[name] if name is not None else None, session=sess, load_evaluation_settings=load_evaluation_settings, show_hidden=True) return self.run_get(get_fn, session=session, assert_exists=assert_exists) def get_evaluation_setting(self, *, uuid=None, name=None, session=None, assert_exists=True, load_parents=False, load_evaluations=False, load_raw_inputs=False): if uuid is not None: assert type(uuid) is str if name is not None: assert type(name) is str def get_fn(sess): return self.get_evaluation_settings(uuids=[uuid] if uuid is not None else None, names=[name] if name is not None else None, session=sess, load_parents=load_parents, load_evaluations=load_evaluations, load_raw_inputs=load_raw_inputs, show_hidden=True) return self.run_get(get_fn, session=session, assert_exists=assert_exists) def get_raw_input(self, uuid=None, *, session=None, assert_exists=True, load_parents=False, load_evaluations=False): if uuid is not None: assert type(uuid) is str def get_fn(sess): return self.get_raw_inputs(uuids=[uuid], session=sess, load_parents=load_parents, load_evaluations=load_evaluations, show_hidden=True) return self.run_get(get_fn, session=session, assert_exists=assert_exists) def get_evaluation(self, uuid=None, *, session=None, assert_exists=True, load_parents=False, load_chunks=True): if uuid is not None: assert type(uuid) is str def get_fn(sess): return self.get_evaluations(uuids=[uuid], session=sess, load_parents=load_parents, load_chunks=load_chunks, show_hidden=True) return self.run_get(get_fn, session=session, assert_exists=assert_exists) def get_evaluation_chunk(self, *, uuid=None, session=None, assert_exists=True, load_parents=False): if uuid is not None: assert type(uuid) is str def get_fn(sess): return self.get_evaluation_chunks(uuids=[uuid], session=sess, load_parents=load_parents, show_hidden=True) return self.run_get(get_fn, session=session, assert_exists=assert_exists) def model_uuid_exists(self, uuid, session=None): return self.get_model(uuid=uuid, assert_exists=False, session=session) is not None def checkpoint_uuid_exists(self, uuid, session=None): return self.get_checkpoint(uuid=uuid, assert_exists=False, session=session) is not None def dataset_uuid_exists(self, uuid, session=None): return self.get_dataset(uuid=uuid, assert_exists=False, session=session) is not None def evaluation_setting_uuid_exists(self, uuid, session=None): return self.get_evaluation_setting(uuid=uuid, assert_exists=False, session=session) is not None def raw_input_uuid_exists(self, uuid, session=None): return self.get_raw_input(uuid=uuid, assert_exists=False, session=session) is not None def evaluation_uuid_exists(self, uuid, session=None): return self.get_evaluation(uuid=uuid, assert_exists=False, session=session) is not None def evaluation_chunk_uuid_exists(self, uuid, session=None): return self.get_evaluation_chunk(uuid=uuid, assert_exists=False, session=session) is not None def get_checkpoints(self, uuids=None, *, session=None, load_parents=True, load_evaluations=False, show_hidden=False): cur_options = [] if load_parents: cur_options.append(sqla.orm.subqueryload(Checkpoint.model)) if load_evaluations: cur_options.append(sqla.orm.subqueryload(Checkpoint.evaluations)) filter_list = [] if not show_hidden: filter_list.append(Checkpoint.hidden == False) if uuids is not None: filter_list.append(Checkpoint.uuid.in_(uuids)) def query(sess): return sess.query(Checkpoint).options(cur_options).filter(*filter_list).all() return self.run_query_with_optional_session(query, session) def get_datasets(self, *, uuids=None, names=None, session=None, load_evaluation_settings=True, show_hidden=False): cur_options = [] if load_evaluation_settings: cur_options.append(sqla.orm.subqueryload(Dataset.evaluation_settings)) filter_list = [] if not show_hidden: filter_list.append(Dataset.hidden == False) if uuids is not None: filter_list.append(Dataset.uuid.in_(uuids)) if names is not None: filter_list.append(Dataset.name.in_(names)) def query(sess): return sess.query(Dataset).options(cur_options).filter(*filter_list).all() return self.run_query_with_optional_session(query, session) def get_evaluation_settings(self, *, uuids=None, names=None, session=None, load_parents=True, load_evaluations=False, load_raw_inputs=False, show_hidden=False): cur_options = [] if load_parents: cur_options.append(sqla.orm.subqueryload(EvaluationSetting.dataset)) if load_evaluations: cur_options.append(sqla.orm.subqueryload(EvaluationSetting.evaluations).subqueryload(Evaluation.checkpoint).subqueryload(Checkpoint.model)) if load_raw_inputs: cur_options.append(sqla.orm.subqueryload(EvaluationSetting.raw_inputs)) filter_list = [] if not show_hidden: filter_list.append(EvaluationSetting.hidden == False) if uuids is not None: filter_list.append(EvaluationSetting.uuid.in_(uuids)) if names is not None: filter_list.append(EvaluationSetting.name.in_(names)) def query(sess): return sess.query(EvaluationSetting).options(cur_options).filter(*filter_list).all() return self.run_query_with_optional_session(query, session) def get_raw_inputs(self, uuids=None, *, session=None, load_parents=True, load_evaluations=False, show_hidden=False): cur_options = [] if load_parents: cur_options.append(sqla.orm.subqueryload(RawInput.setting).subqueryload(EvaluationSetting.dataset)) if load_evaluations: cur_options.append(sqla.orm.subqueryload(RawInput.evaluations).subqueryload(Evaluation.checkpoint).subqueryload(Checkpoint.model)) filter_list = [] if not show_hidden: filter_list.append(RawInput.hidden == False) if uuids is not None: filter_list.append(RawInput.uuid.in_(uuids)) def query(sess): return sess.query(RawInput).options(cur_options).filter(*filter_list).all() return self.run_query_with_optional_session(query, session) def get_evaluations(self, uuids=None, *, session=None, load_parents=True, load_chunks=True, show_hidden=False): cur_options = [] if load_parents: cur_options.append(sqla.orm.subqueryload(Evaluation.checkpoint).subqueryload(Checkpoint.model)) cur_options.append(sqla.orm.subqueryload(Evaluation.raw_input)) cur_options.append(sqla.orm.subqueryload(Evaluation.setting).subqueryload(EvaluationSetting.dataset)) if load_chunks: cur_options.append(sqla.orm.subqueryload(Evaluation.chunks)) filter_list = [] if not show_hidden: filter_list.append(Evaluation.hidden == False) if uuids is not None: filter_list.append(Evaluation.uuid.in_(uuids)) def query(sess): return sess.query(Evaluation).options(cur_options).filter(*filter_list).all() return self.run_query_with_optional_session(query, session) def get_evaluation_chunks(self, *, uuids=None, session=None, load_parents=False, show_hidden=False): cur_options = [] if load_parents: cur_options.append(sqla.orm.subqueryload(EvaluationChunk.evaluation)) filter_list = [] if not show_hidden: filter_list.append(EvaluationChunk.hidden == False) if uuids is not None: filter_list.append(EvaluationChunk.uuid.in_(uuids)) def query(sess): return sess.query(EvaluationChunk).options(cur_options).filter(*filter_list).all() return self.run_query_with_optional_session(query, session) def get_models(self, *, uuids=None, names=None, session=None, load_parents=True, load_final_checkpoint=True, load_all_checkpoints=False, load_evaluations=False, show_hidden=False): cur_options = [] checkpoint_nodes = [] if load_final_checkpoint: cur_options.append(sqla.orm.subqueryload(Model.final_checkpoint)) checkpoint_nodes.append(cur_options[-1]) if load_all_checkpoints: cur_options.append(sqla.orm.subqueryload(Model.checkpoints)) checkpoint_nodes.append(cur_options[-1]) if load_evaluations: for opt in checkpoint_nodes: opt.subqueryload(Checkpoint.evaluations) filter_list = [] if not show_hidden: filter_list.append(Model.hidden == False) if uuids is not None: filter_list.append(Model.uuid.in_(uuids)) if names is not None: filter_list.append(Model.name.in_(names)) def query(sess): return sess.query(Model).options(cur_options).filter(*filter_list).all() return self.run_query_with_optional_session(query, session) def create_model(self, extra_info=None, name=None, description=None, verbose=False, completed=False): with self.session_scope() as session: new_id = self.gen_short_uuid() username = getpass.getuser() new_model = Model(uuid=new_id, name=name, description=description, username=username, extra_info=extra_info, hidden=False, completed=completed, logdir_filepaths={}, final_checkpoint_uuid=None) session.add(new_model) return self.get_model(uuid=new_id, assert_exists=True) def rename_model(self, model_uuid, new_name): with self.session_scope() as session: model = self.get_model(uuid=model_uuid, session=session, assert_exists=True) old_name = model.name model.name = new_name return old_name def hide_model(self, model_uuid): with self.session_scope() as session: model = self.get_model(uuid=model_uuid, session=session, assert_exists=True) model.hidden = True def get_latest_model_checkpoint_data(self, model_uuid, verbose=False, allow_non_final_checkpoint=True): with self.session_scope() as session: model = self.get_model(uuid=model_uuid, session=session, assert_exists=True) if len(model.checkpoints) == 0: return None, None if allow_non_final_checkpoint: cur_checkpoints = sorted(model.checkpoints, key=lambda x: x.training_step) checkpoint_to_load = cur_checkpoints[-1] else: assert model.final_checkpoint is not None checkpoint_to_load = model.final_checkpoint checkpoint_uuid = checkpoint_to_load.uuid key = get_checkpoint_data_key(checkpoint_uuid) if self.s3wrapper.exists(key): data = self.s3wrapper.get(key, verbose=verbose) else: data = None return data, checkpoint_to_load def mark_model_as_completed(self, model_uuid): with self.session_scope() as session: model = self.get_model(uuid=model_uuid, session=session, assert_exists=True) model.completed = True def set_final_model_checkpoint(self, model_uuid, checkpoint_uuid): with self.session_scope() as session: model = self.get_model(uuid=model_uuid, session=session, assert_exists=True) checkpoint = self.get_checkpoint(checkpoint_uuid, session=session, assert_exists=True) assert checkpoint.model_uuid == model_uuid model.final_checkpoint_uuid = checkpoint_uuid def store_logdir(self, model_uuid, logdir, verbose=False): with self.session_scope() as session: model = self.get_model(uuid=model_uuid, session=session, assert_exists=True) logdir_path = pathlib.Path(logdir).resolve() assert logdir_path.is_dir() tmp_filepaths = [x for x in logdir_path.glob('**/*') if x.is_file()] all_data = {} base_key = get_logdir_key(model_uuid) + '/' cur_logdir_files = {} for cur_filepath in tmp_filepaths: cur_filepath_resolved= cur_filepath.resolve() with open(cur_filepath_resolved, 'rb') as f: cur_data = f.read() cur_relative_path = str(cur_filepath.relative_to(logdir_path)) assert cur_relative_path not in cur_logdir_files cur_logdir_files[cur_relative_path] = { 'size': cur_filepath_resolved.stat().st_size, 'mtime': cur_filepath_resolved.stat().st_mtime} cur_key = base_key + cur_relative_path all_data[cur_key] = cur_data self.s3wrapper.put_multiple(all_data, verbose=verbose) model.logdir_filepaths = cur_logdir_files sqla.orm.attributes.flag_modified(model, 'logdir_filepaths') def create_checkpoint(self, *, model_uuid, training_step=None, epoch=None, name=None, data_bytes=None, extra_info=None, verbose=False): with self.session_scope() as session: assert self.model_uuid_exists(model_uuid, session=session) new_id = self.gen_checkpoint_uuid() username = getpass.getuser() new_checkpoint = Checkpoint(uuid=new_id, model_uuid=model_uuid, username=username, extra_info=extra_info, name=name, training_step=training_step, epoch=epoch, hidden=False) if data_bytes is not None: key = get_checkpoint_data_key(new_id) self.s3wrapper.put(data_bytes, key, verbose=verbose) session.add(new_checkpoint) return self.get_checkpoint(uuid=new_id, assert_exists=True) def get_checkpoint_data(self, checkpoint_uuid, verbose=False): with self.session_scope() as session: assert self.checkpoint_uuid_exists(checkpoint_uuid, session=session) key = get_checkpoint_data_key(checkpoint_uuid) if self.s3wrapper.exists(key): return self.s3wrapper.get(key, verbose=verbose) else: return None def create_evaluation(self, *, checkpoint_uuid, setting_uuid, name=None, logits_data_bytes=None, extra_data_bytes=None, raw_input_uuid=None, extra_info=None, completed=False, verbose=False): with self.session_scope() as session: assert self.checkpoint_uuid_exists(checkpoint_uuid, session=session) assert self.evaluation_setting_uuid_exists(setting_uuid, session=session) if raw_input_uuid is not None: assert self.raw_input_uuid_exists(raw_input_uuid, session=session) new_id = self.gen_short_uuid() username = getpass.getuser() new_evaluation = Evaluation(uuid=new_id, checkpoint_uuid=checkpoint_uuid, setting_uuid=setting_uuid, raw_input_uuid=raw_input_uuid, username=username, extra_info=extra_info, name=name, completed=completed, hidden=False) if extra_data_bytes is not None: key = get_evaluation_extra_data_key(new_id) self.s3wrapper.put(extra_data_bytes, key, verbose=verbose) if logits_data_bytes is not None: key = get_evaluation_logits_data_key(new_id) self.s3wrapper.put(logits_data_bytes, key, verbose=verbose) session.add(new_evaluation) return self.get_evaluation(uuid=new_id, assert_exists=True) def hide_evaluation(self, evaluation_uuid): with self.session_scope() as session: evaluation = self.get_evaluation(evaluation_uuid, session=session, assert_exists=True) evaluation.hidden = True def rename_evaluation(self, evaluation_uuid, new_name): with self.session_scope() as session: evaluation = self.get_evaluation(evaluation_uuid, session=session, assert_exists=True) old_name = evaluation.name evaluation.name = new_name return old_name def mark_evaluation_as_completed(self, evaluation_uuid): with self.session_scope() as session: evaluation = self.get_evaluation(uuid=evaluation_uuid, session=session, assert_exists=True) evaluation.completed = True def update_evaluation_extra_info(self, evaluation_uuid, new_extra_info): with self.session_scope() as session: evaluation = self.get_evaluation(uuid=evaluation_uuid, session=session, assert_exists=True) evaluation.extra_info = new_extra_info def get_evaluation_extra_data(self, evaluation_uuid, verbose=False): with self.session_scope() as session: assert self.evaluation_uuid_exists(evaluation_uuid, session=session) key = get_evaluation_extra_data_key(evaluation_uuid) if self.s3wrapper.exists(key): return self.s3wrapper.get(key, verbose=verbose) else: return None def get_evaluation_logits_data(self, evaluation_uuid, verbose=False): with self.session_scope() as session: assert self.evaluation_uuid_exists(evaluation_uuid, session=session) key = get_evaluation_logits_data_key(evaluation_uuid) if self.s3wrapper.exists(key): return self.s3wrapper.get(key, verbose=verbose) else: return None def has_evaluation_logits_data(self, evaluation_uuid): with self.session_scope() as session: assert self.evaluation_uuid_exists(evaluation_uuid, session=session) key = get_evaluation_logits_data_key(evaluation_uuid) return self.s3wrapper.exists(key) def put_evaluation_extra_data(self, evaluation_uuid, extra_data_bytes, verbose=False): with self.session_scope() as session: assert self.evaluation_uuid_exists(evaluation_uuid, session=session) key = get_evaluation_extra_data_key(evaluation_uuid) self.s3wrapper.put(extra_data_bytes, key, verbose=verbose) def put_evaluation_logits_data(self, evaluation_uuid, logits_data_bytes, verbose=False): with self.session_scope() as session: assert self.evaluation_uuid_exists(evaluation_uuid, session=session) key = get_evaluation_logits_data_key(evaluation_uuid) self.s3wrapper.put(logits_data_bytes, key, verbose=verbose) def create_dataset(self, *, name, size, description=None, data_bytes=None, data_filename=None, # use one of the two - directly uploading from a file can save memory extra_info=None, verbose=False): assert name is not None assert size is not None assert type(size) is int assert data_bytes is None or data_filename is None assert data_bytes is not None or data_filename is not None with self.session_scope() as session: new_id = self.gen_short_uuid() username = getpass.getuser() new_dataset = Dataset(uuid=new_id, name=name, description=description, username=username, size=size, extra_info=extra_info, hidden=False) key = get_dataset_data_key(new_id) if data_bytes is not None: self.s3wrapper.put(data_bytes, key, verbose=verbose) else: assert data_filename is not None self.s3wrapper.upload_file(data_filename, key, verbose=verbose) session.add(new_dataset) return self.get_dataset(uuid=new_id, assert_exists=True) def get_dataset_data(self, dataset_uuid, verbose=False): with self.session_scope() as session: assert self.dataset_uuid_exists(dataset_uuid, session=session) key = get_dataset_data_key(dataset_uuid) if self.s3wrapper.exists(key): return self.s3wrapper.get(key, verbose=verbose) else: return None def download_dataset_data(self, dataset_uuid, target_filename, verbose=False): with self.session_scope() as session: assert self.dataset_uuid_exists(dataset_uuid, session=session) key = get_dataset_data_key(dataset_uuid) if self.s3wrapper.exists(key): return self.s3wrapper.download_file(key, target_filename, verbose=verbose) def rename_dataset(self, dataset_uuid, new_name): with self.session_scope() as session: dataset = self.get_dataset(uuid=dataset_uuid, session=session, assert_exists=True) old_name = dataset.name dataset.name = new_name return old_name def hide_dataset(self, dataset_uuid): with self.session_scope() as session: dataset = self.get_dataset(uuid=dataset_uuid, session=session, assert_exists=True) dataset.hidden = True def create_evaluation_setting(self, *, name, dataset_uuid=None, description=None, extra_info=None, # in case the evaluation settings have a differently processe dataset associated with them processed_dataset_bytes=None, # use one of the two - directly uploading from a file can save memory processed_dataset_filename=None, extra_data_bytes=None, verbose=False): assert name is not None assert processed_dataset_filename is None or processed_dataset_bytes is None with self.session_scope() as session: if dataset_uuid is not None: assert self.dataset_uuid_exists(dataset_uuid, session=session) new_id = self.gen_short_uuid() username = getpass.getuser() new_setting = EvaluationSetting(uuid=new_id, name=name, description=description, username=username, dataset_uuid=dataset_uuid, extra_info=extra_info, hidden=False) if extra_data_bytes is not None: key = get_evaluation_setting_extra_data_key(new_id) self.s3wrapper.put(extra_data_bytes, key, verbose=verbose) key = get_evaluation_setting_processed_dataset_key(new_id) if processed_dataset_bytes is not None: self.s3wrapper.put(processed_dataset_bytes, key, verbose=verbose) elif processed_dataset_filename is not None: self.s3wrapper.upload_file(processed_dataset_filename, key, verbose=verbose) session.add(new_setting) return self.get_evaluation_setting(uuid=new_id, assert_exists=True) def hide_evaluation_setting(self, evaluation_setting_uuid): with self.session_scope() as session: evaluation_setting = self.get_evaluation_setting(uuid=evaluation_setting_uuid, session=session, assert_exists=True) evaluation_setting.hidden = True def rename_evaluation_setting(self, evaluation_setting_uuid, new_name): with self.session_scope() as session: evaluation_setting = self.get_evaluation_setting(uuid=evaluation_setting_uuid, session=session, assert_exists=True) old_name = evaluation_setting.name evaluation_setting.name = new_name return old_name def get_evaluation_setting_extra_data(self, evaluation_setting_uuid, verbose=False): with self.session_scope() as session: assert self.evaluation_setting_uuid_exists(evaluation_setting_uuid, session=session) key = get_evaluation_setting_extra_data_key(evaluation_setting_uuid) if self.s3wrapper.exists(key): return self.s3wrapper.get(key, verbose=verbose) else: return None def download_evaluation_setting_processed_dataset_data(self, evaluation_setting_uuid, target_filename, verbose=False): with self.session_scope() as session: assert self.evaluation_setting_uuid_exists(evaluation_setting_uuid, session=session) key = get_evaluation_setting_processed_dataset_key(evaluation_setting_uuid) if self.s3wrapper.exists(key): return self.s3wrapper.download_file(key, target_filename, verbose=verbose) def get_evaluation_setting_processed_dataset_data(self, evaluation_setting_uuid, verbose=False): with self.session_scope() as session: assert self.evaluation_setting_uuid_exists(evaluation_setting_uuid, session=session) key = get_evaluation_setting_processed_dataset_key(evaluation_setting_uuid) if self.s3wrapper.exists(key): return self.s3wrapper.get(key, verbose=verbose) else: return None def create_raw_input(self, *, name, evaluation_setting_uuid, data_shape, data_format, description=None, extra_info=None, data_bytes=None, data_filename=None, # use one of the two - directly uploading from a file can save memory verbose=False): assert name is not None assert evaluation_setting_uuid is not None assert data_bytes is None or data_filename is None assert data_bytes is not None or data_filename is not None assert data_format in ['float32', 'float64', 'uint8'] # add more here if necessary assert type(data_shape) is list for x in data_shape: assert type(x) is int with self.session_scope() as session: assert self.evaluation_setting_uuid_exists(evaluation_setting_uuid, session=session) new_id = self.gen_short_uuid() username = getpass.getuser() new_raw_input = RawInput(uuid=new_id, name=name, description=description, username=username, data_shape=data_shape, data_format=data_format, setting_uuid=evaluation_setting_uuid, extra_info=extra_info, hidden=False) key = get_raw_input_data_key(new_id) if data_bytes is not None: self.s3wrapper.put(data_bytes, key, verbose=verbose) else: assert data_filename is not None self.s3wrapper.upload_file(data_filename, key, verbose=verbose) session.add(new_raw_input) return self.get_raw_input(uuid=new_id, assert_exists=True) def hide_raw_input(self, raw_input_uuid): with self.session_scope() as session: raw_input = self.get_raw_input(raw_input_uuid, session=session, assert_exists=True) raw_input.hidden = True def rename_raw_input(self, raw_input_uuid, new_name): with self.session_scope() as session: raw_input = self.get_raw_input(raw_input_uuid, session=session, assert_exists=True) old_name = raw_input.name raw_input.name = new_name return old_name def download_raw_input_data(self, raw_input_uuid, target_filename, verbose=False): with self.session_scope() as session: assert self.raw_input_uuid_exists(raw_input_uuid, session=session) key = get_raw_input_data_key(raw_input_uuid) if self.s3wrapper.exists(key): return self.s3wrapper.download_file(key, target_filename, verbose=verbose) def get_raw_input_data(self, raw_input_uuid, verbose=False): with self.session_scope() as session: assert self.raw_input_uuid_exists(raw_input_uuid, session=session) key = get_raw_input_data_key(raw_input_uuid) if self.s3wrapper.exists(key): return self.s3wrapper.get(key, verbose=verbose) else: return None def create_evaluation_chunk(self, *, evaluation_uuid, logits_data_bytes=None, indices=None, # pass in either indices or indices_bytes indices_bytes=None, extra_data_bytes=None, extra_info=None, verbose=False): if indices is not None: assert indices_bytes is None indices_bytes = pickle.dumps(indices) else: assert indices_bytes is not None with self.session_scope() as session: assert self.evaluation_uuid_exists(evaluation_uuid, session=session) new_id = self.gen_short_uuid() username = getpass.getuser() new_chunk = EvaluationChunk(uuid=new_id, evaluation_uuid=evaluation_uuid, username=username, extra_info=extra_info, hidden=False) if extra_data_bytes is not None: key = get_evaluation_chunk_extra_data_key(new_id) self.s3wrapper.put(extra_data_bytes, key, verbose=verbose) if logits_data_bytes is not None: key = get_evaluation_chunk_logits_data_key(new_id) self.s3wrapper.put(logits_data_bytes, key, verbose=verbose) if indices_bytes is not None: key = get_evaluation_chunk_indices_data_key(new_id) self.s3wrapper.put(indices_bytes, key, verbose=verbose) session.add(new_chunk) return self.get_evaluation_chunk(uuid=new_id, assert_exists=True) def hide_evaluation_chunk(self, evaluation_chunk_uuid): with self.session_scope() as session: evaluation_chunk = self.get_evaluation_chunk(uuid=evaluation_chunk_uuid, session=session, assert_exists=True) evaluation_chunk.hidden = True def get_evaluation_chunk_extra_data(self, evaluation_chunk_uuid, verbose=False): with self.session_scope() as session: assert self.evaluation_chunk_uuid_exists(evaluation_chunk_uuid, session=session) key = get_evaluation_chunk_extra_data_key(evaluation_chunk_uuid) if self.s3wrapper.exists(key): return self.s3wrapper.get(key, verbose=verbose) else: return None def get_evaluation_chunk_logits_data(self, evaluation_chunk_uuid, verbose=False): with self.session_scope() as session: assert self.evaluation_chunk_uuid_exists(evaluation_chunk_uuid, session=session) key = get_evaluation_chunk_logits_data_key(evaluation_chunk_uuid) if self.s3wrapper.exists(key): return self.s3wrapper.get(key, verbose=verbose) else: return None def get_evaluation_chunk_indices_data(self, evaluation_chunk_uuid, verbose=False, unpickle=False): with self.session_scope() as session: assert self.evaluation_chunk_uuid_exists(evaluation_chunk_uuid, session=session) key = get_evaluation_chunk_indices_data_key(evaluation_chunk_uuid) if self.s3wrapper.exists(key): data = self.s3wrapper.get(key, verbose=verbose) if unpickle: return pickle.loads(data) else: return data else: return None
{"hexsha": "f77e5ba401b2a1a42643ec90597574f08944aac4", "size": 50605, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/mldb/model_repository.py", "max_stars_repo_name": "modestyachts/imagenet-testbed", "max_stars_repo_head_hexsha": "f4083a29524fe9a9e029bf34d1476cea5a497132", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 69, "max_stars_repo_stars_event_min_datetime": "2020-07-21T01:17:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T16:31:32.000Z", "max_issues_repo_path": "src/mldb/model_repository.py", "max_issues_repo_name": "modestyachts/imagenet-testbed", "max_issues_repo_head_hexsha": "f4083a29524fe9a9e029bf34d1476cea5a497132", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-12-07T19:17:05.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-23T23:39:22.000Z", "max_forks_repo_path": "src/mldb/model_repository.py", "max_forks_repo_name": "modestyachts/imagenet-testbed", "max_forks_repo_head_hexsha": "f4083a29524fe9a9e029bf34d1476cea5a497132", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-10-31T23:51:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-25T06:15:55.000Z", "avg_line_length": 47.876064333, "max_line_length": 179, "alphanum_fraction": 0.6302736884, "include": true, "reason": "import numpy", "num_tokens": 9599}
using CoinbasePro using Test using DataFrames @testset "CoinbasePro" begin for file in filter(x->occursin("test_", x), readdir(".")) include(file) end end
{"hexsha": "30e24704bf9b293b76002d4319f13475ea2d0187", "size": 159, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "stimakov123/CoinbasePro.jl", "max_stars_repo_head_hexsha": "03be51feb9b4d73236a4fa7f66765fbe2624ccda", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2021-06-26T18:10:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-18T21:32:35.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "stimakov123/CoinbasePro.jl", "max_issues_repo_head_hexsha": "03be51feb9b4d73236a4fa7f66765fbe2624ccda", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-03-22T19:34:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-18T08:31:39.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "stimakov123/CoinbasePro.jl", "max_forks_repo_head_hexsha": "03be51feb9b4d73236a4fa7f66765fbe2624ccda", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-03-28T14:06:54.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-17T13:54:55.000Z", "avg_line_length": 14.4545454545, "max_line_length": 57, "alphanum_fraction": 0.7358490566, "num_tokens": 44}