text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
const USE_GPU = false # Use GPU? If this is set false, then no GPU needs to be available
using ParallelStencil
using ParallelStencil.FiniteDifferences2D
@static if USE_GPU
@init_parallel_stencil(CUDA, Float64, 2)
macro pow(args...) esc(:(CUDA.pow($(args...)))) end
macro tanh(args...) esc(:(CUDA.tanh($(args...)))) end
else
@init_parallel_stencil(Threads, Float64, 2)
pow(x,y) = x^y
macro pow(args...) esc(:(pow($(args...)))) end
macro tanh(args...) esc(:(Base.tanh($(args...)))) end
end
using Plots, Printf, Statistics, LinearAlgebra
@parallel function update_old!(Phi_o::Data.Array, ∇V_o::Data.Array, Phi::Data.Array, ∇V::Data.Array)
@all(Phi_o) = @all(Phi)
@all(∇V_o) = @all(∇V)
return
end
@parallel function compute_params_∇!(EtaC::Data.Array, K_muf::Data.Array, Rog::Data.Array, ∇V::Data.Array, ∇qD::Data.Array, Phi::Data.Array, Pf::Data.Array, Pt::Data.Array, Vx::Data.Array, Vy::Data.Array, qDx::Data.Array, qDy::Data.Array, μs::Data.Number, η2μs::Data.Number, R::Data.Number, λPe::Data.Number, k_μf0::Data.Number, ϕ0::Data.Number, nperm::Data.Number, θ_e::Data.Number, θ_k::Data.Number, ρfg::Data.Number, ρsg::Data.Number, ρgBG::Data.Number, dx::Data.Number, dy::Data.Number)
@all(EtaC) = (1.0-θ_e)*@all(EtaC) + θ_e*( μs/@all(Phi)*η2μs*(1.0+0.5*(1.0/R-1.0)*(1.0+@tanh((@all(Pf)-@all(Pt))/λPe))) )
@all(K_muf) = (1.0-θ_k)*@all(K_muf) + θ_k*( k_μf0*@pow((@all(Phi)/ϕ0), nperm) )
@all(Rog) = ρfg*@all(Phi) + ρsg*(1.0-@all(Phi)) - ρgBG
@all(∇V) = @d_xa(Vx)/dx + @d_ya(Vy)/dy
@all(∇qD) = @d_xa(qDx)/dx + @d_ya(qDy)/dy
return
end
@parallel function compute_RP!(dτPf::Data.Array, RPt::Data.Array, RPf::Data.Array, K_muf::Data.Array, ∇V::Data.Array, ∇qD::Data.Array, Pt::Data.Array, Pf::Data.Array, EtaC::Data.Array, Phi::Data.Array, Pfsc::Data.Number, Pfdmp::Data.Number, min_dxy2::Data.Number, dx::Data.Number, dy::Data.Number)
@inn(dτPf) = min_dxy2/@maxloc(K_muf)/4.1/Pfsc
@all(RPt) = - @all(∇V) - (@all(Pt) - @all(Pf))/(@all(EtaC)*(1.0-@all(Phi)))
@all(RPf) = @all(RPf)*Pfdmp - @all(∇qD) + (@all(Pt) - @all(Pf))/(@all(EtaC)*(1.0-@all(Phi)))
return
end
@parallel function compute_P_τ!(Pt::Data.Array, Pf::Data.Array, τxx::Data.Array, τyy::Data.Array, σxy::Data.Array, RPt::Data.Array, RPf::Data.Array, dτPf::Data.Array, Vx::Data.Array, Vy::Data.Array, ∇V::Data.Array, dτPt::Data.Number, μs::Data.Number, β_n::Data.Number, dx::Data.Number, dy::Data.Number)
@all(Pt) = @all(Pt) + dτPt *@all(RPt)
@all(Pf) = @all(Pf) + @all(dτPf)*@all(RPf)
@all(τxx) = 2.0*μs*( @d_xa(Vx)/dx - 1.0/3.0*@all(∇V) - β_n*@all(RPt) )
@all(τyy) = 2.0*μs*( @d_ya(Vy)/dy - 1.0/3.0*@all(∇V) - β_n*@all(RPt) )
@all(σxy) = 2.0*μs*(0.5*( @d_yi(Vx)/dy + @d_xi(Vy)/dx ))
return
end
@parallel function compute_res!(Rx::Data.Array, Ry::Data.Array, dVxdτ::Data.Array, dVydτ::Data.Array, τxx::Data.Array, τyy::Data.Array, σxy::Data.Array, Pt::Data.Array, Rog::Data.Array, dampX::Data.Number, dampY::Data.Number, dx::Data.Number, dy::Data.Number)
@all(Rx) = @d_xi(τxx)/dx + @d_ya(σxy)/dy - @d_xi(Pt)/dx
@all(Ry) = @d_yi(τyy)/dy + @d_xa(σxy)/dx - @d_yi(Pt)/dy - @av_yi(Rog)
@all(dVxdτ) = dampX*@all(dVxdτ) + @all(Rx)
@all(dVydτ) = dampY*@all(dVydτ) + @all(Ry)
return
end
@parallel function compute_update!(Vx::Data.Array, Vy::Data.Array, qDx::Data.Array, qDy::Data.Array, Phi::Data.Array, dVxdτ::Data.Array, dVydτ::Data.Array, K_muf::Data.Array, Pf::Data.Array, Phi_o::Data.Array, ∇V::Data.Array, ∇V_o::Data.Array, dτV::Data.Number, ρfg::Data.Number, ρgBG::Data.Number, CN::Data.Number, dt::Data.Number, dx::Data.Number, dy::Data.Number)
@inn(Vx) = @inn(Vx) + dτV*@all(dVxdτ)
@inn(Vy) = @inn(Vy) + dτV*@all(dVydτ)
@inn(qDx) = -@av_xi(K_muf)*(@d_xi(Pf)/dx)
@inn(qDy) = -@av_yi(K_muf)*(@d_yi(Pf)/dy + (ρfg - ρgBG))
@all(Phi) = @all(Phi_o) + (1.0-@all(Phi))*(CN*@all(∇V_o) + (1.0-CN)*@all(∇V))*dt
return
end
@parallel_indices (ix,iy) function bc_x!(A::Data.Array)
A[1 , iy] = A[2 , iy]
A[end, iy] = A[end-1, iy]
return
end
@parallel_indices (ix,iy) function bc_y!(A::Data.Array)
A[ix, 1 ] = A[ix, 2 ]
A[ix, end] = A[ix, end-1]
return
end
##################################################
@views function HydroMech2D()
# Physics - scales
ρfg = 1.0 # fluid rho*g
k_μf0 = 1.0 # reference permeability
ηC0 = 1.0 # reference bulk viscosity
# Physics - non-dimensional parameters
η2μs = 10.0 # bulk/shear viscosity ration
R = 500.0 # Compaction/decompaction strength ratio for bulk rheology
nperm = 3.0 # Carman-Kozeny exponent
ϕ0 = 0.01 # reference porosity
ra = 2 # radius of initil porosity perturbation
λ0 = 1.0 # standard deviation of initial porosity perturbation
t_tot = 0.02 # total time
# Physics - dependent scales
ρsg = 2.0*ρfg # solid rho*g
lx = 20.0 # domain size x
ly = ra*lx # domain size y
ϕA = 2*ϕ0 # amplitude of initial porosity perturbation
λPe = 0.01 # effective pressure transition zone
dt = 1e-5 # physical time-step
# Numerics
CN = 0.5 # Crank-Nicolson CN=0.5, Backward Euler CN=0.0
res = 128
nx, ny = res-1, ra*res-1 # numerical grid resolutions; should be a mulitple of 32-1 for optimal GPU perf
ε = 1e-5 # non-linear tolerance
iterMax = 5e3 # max nonlinear iterations
nout = 200 # error checking frequency
β_n = 1.0 # numerical compressibility
Vdmp = 5.0 # velocity damping for momentum equations
Pfdmp = 0.8 # fluid pressure damping for momentum equations
Vsc = 2.0 # reduction of PT steps for velocity
Ptsc = 2.0 # reduction of PT steps for total pressure
Pfsc = 4.0 # reduction of PT steps for fluid pressure
θ_e = 9e-1 # relaxation factor for non-linear viscosity
θ_k = 1e-1 # relaxation factor for non-linear permeability
dt_red = 1e-3 # reduction of physical timestep
# Derived physics
μs = ηC0*ϕ0/η2μs # solid shear viscosity
λ = λ0*sqrt(k_μf0*ηC0) # initial perturbation width
ρgBG = ρfg*ϕ0 + ρsg*(1.0-ϕ0) # Background density
# Derived numerics
dx, dy = lx/(nx-1), ly/(ny-1) # grid step in x, y
min_dxy2 = min(dx,dy)^2
dτV = min_dxy2/μs/(1.0+β_n)/4.1/Vsc # PT time step for velocity
dτPt = 4.1*μs*(1.0+β_n)/max(nx,ny)/Ptsc
dampX = 1.0-Vdmp/nx
dampY = 1.0-Vdmp/ny
# Array allocations
Phi_o = @zeros(nx ,ny )
Pt = @zeros(nx ,ny )
Pf = @zeros(nx ,ny )
Rog = @zeros(nx ,ny )
∇V = @zeros(nx ,ny )
∇V_o = @zeros(nx ,ny )
∇qD = @zeros(nx ,ny )
dτPf = @zeros(nx ,ny )
RPt = @zeros(nx ,ny )
RPf = @zeros(nx ,ny )
τxx = @zeros(nx ,ny )
τyy = @zeros(nx ,ny )
σxy = @zeros(nx-1,ny-1)
dVxdτ = @zeros(nx-1,ny-2)
dVydτ = @zeros(nx-2,ny-1)
Rx = @zeros(nx-1,ny-2)
Ry = @zeros(nx-2,ny-1)
Vx = @zeros(nx+1,ny )
Vy = @zeros(nx ,ny+1)
qDx = @zeros(nx+1,ny )
# Initial conditions
qDy = zeros(nx ,ny+1)
Phi = ϕ0*ones(nx ,ny )
Radc = zeros(nx ,ny )
Radc .= [(((ix-1)*dx-0.5*lx)/λ/4.0)^2 + (((iy-1)*dy-0.25*ly)/λ)^2 for ix=1:size(Radc,1), iy=1:size(Radc,2)]
Phi[Radc.<1.0] .= Phi[Radc.<1.0] .+ ϕA
EtaC = μs./Phi.*η2μs
K_muf = k_μf0.*(Phi./ϕ0)
ϕ0bc = mean.(Phi[:,end])
qDy[:,[1,end]] .= (ρsg.-ρfg).*(1.0.-ϕ0bc).*k_μf0.*(ϕ0bc./ϕ0).^nperm
Phi = Data.Array(Phi)
EtaC = Data.Array(EtaC)
K_muf = Data.Array(K_muf)
qDy = Data.Array(qDy)
t = 0.0
it = 1
# Preparation of visualisation
ENV["GKSwstype"]="nul"; if isdir("viz2D_out")==false mkdir("viz2D_out") end; loadpath = "./viz2D_out/"; anim = Animation(loadpath,String[])
println("Animation directory: $(anim.dir)")
X, Y, Yv = 0:dx:lx, 0:dy:ly, (-dy/2):dy:(ly+dy/2)
# Time loop
while t<t_tot
@parallel update_old!(Phi_o, ∇V_o, Phi, ∇V)
err=2*ε; iter=1; niter=0
while err > ε && iter <= iterMax
if (iter==11) global wtime0 = Base.time() end
@parallel compute_params_∇!(EtaC, K_muf, Rog, ∇V, ∇qD, Phi, Pf, Pt, Vx, Vy, qDx, qDy, μs, η2μs, R, λPe, k_μf0, ϕ0, nperm, θ_e, θ_k, ρfg, ρsg, ρgBG, dx, dy)
@parallel compute_RP!(dτPf, RPt, RPf, K_muf, ∇V, ∇qD, Pt, Pf, EtaC, Phi, Pfsc, Pfdmp, min_dxy2, dx, dy)
@parallel (1:size(dτPf,1), 1:size(dτPf,2)) bc_x!(dτPf)
@parallel (1:size(dτPf,1), 1:size(dτPf,2)) bc_y!(dτPf)
@parallel compute_P_τ!(Pt, Pf, τxx, τyy, σxy, RPt, RPf, dτPf, Vx, Vy, ∇V, dτPt, μs, β_n, dx, dy)
@parallel compute_res!(Rx, Ry, dVxdτ, dVydτ, τxx, τyy, σxy, Pt, Rog, dampX, dampY, dx, dy)
@parallel compute_update!(Vx, Vy, qDx, qDy, Phi, dVxdτ, dVydτ, K_muf, Pf, Phi_o, ∇V, ∇V_o, dτV, ρfg, ρgBG, CN, dt, dx, dy)
@parallel (1:size(Vx,1), 1:size(Vx,2)) bc_y!(Vx)
@parallel (1:size(Vy,1), 1:size(Vy,2)) bc_x!(Vy)
@parallel (1:size(qDx,1), 1:size(qDx,2)) bc_y!(qDx)
@parallel (1:size(qDy,1), 1:size(qDy,2)) bc_x!(qDy)
if mod(iter,nout)==0
global norm_Ry, norm_RPf
norm_Ry = norm(Ry)/length(Ry); norm_RPf = norm(RPf)/length(RPf); err = max(norm_Ry, norm_RPf)
# @printf("iter = %d, err = %1.3e [norm_Ry=%1.3e, norm_RPf=%1.3e] \n", iter, err, norm_Ry, norm_RPf)
end
iter+=1; niter+=1
end
# Performance
wtime = Base.time()-wtime0
A_eff = (8*2)/1e9*nx*ny*sizeof(Data.Number) # Effective main memory access per iteration [GB] (Lower bound of required memory access: Te has to be read and written: 2 whole-array memaccess; Ci has to be read: : 1 whole-array memaccess)
wtime_it = wtime/(niter-10) # Execution time per iteration [s]
T_eff = A_eff/wtime_it # Effective memory throughput [GB/s]
@printf("it = %d, time = %1.3e sec (@ T_eff = %1.2f GB/s) \n", it, wtime, round(T_eff, sigdigits=2))
# Visualisation
default(size=(500,700))
if mod(it,5)==0
p1 = heatmap(X, Y, Array(Phi)' , aspect_ratio=1, xlims=(X[1],X[end]), ylims=(Y[1],Y[end]), c=:viridis, title="porosity")
p2 = heatmap(X, Y, Array(Pt-Pf)', aspect_ratio=1, xlims=(X[1],X[end]), ylims=(Y[1],Y[end]), c=:viridis, title="effective pressure")
p3 = heatmap(X, Yv, Array(qDy)' , aspect_ratio=1, xlims=(X[1],X[end]), ylims=(Yv[1],Yv[end]), c=:viridis, title="vertical Darcy flux")
p4 = heatmap(X, Yv, Array(Vy)' , aspect_ratio=1, xlims=(X[1],X[end]), ylims=(Yv[1],Yv[end]), c=:viridis, title="vertical velocity")
display(plot(p1, p2, p3, p4)); frame(anim)
end
# Time
dt = dt_red/(1e-10+maximum(abs.(∇V)))
t = t + dt
it+=1
end
gif(anim, "HydroMech2D.gif", fps = 15)
return
end
HydroMech2D()
|
{"hexsha": "e7a6e4f1149f7af2fc08fe5519b23784d3ac74bb", "size": 11629, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "miniapps/HydroMech2D.jl", "max_stars_repo_name": "deconvolution/ParallelStencil.jl", "max_stars_repo_head_hexsha": "58dddae5ba46053fa4a942e2a271422d97f6013e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-12T15:49:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T15:49:12.000Z", "max_issues_repo_path": "miniapps/HydroMech2D.jl", "max_issues_repo_name": "boriskaus/ParallelStencil.jl", "max_issues_repo_head_hexsha": "a790e954e99f5b558a8022bf850e685f780cbd18", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "miniapps/HydroMech2D.jl", "max_forks_repo_name": "boriskaus/ParallelStencil.jl", "max_forks_repo_head_hexsha": "a790e954e99f5b558a8022bf850e685f780cbd18", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.3411214953, "max_line_length": 490, "alphanum_fraction": 0.5520681056, "num_tokens": 4469}
|
# phylo/manipulation.jl
# =====================
#
# Types and methods for phylogenetic trees.
#
# Part of the Bio.Phylo module.
#
# This file contains methods for manipulating the structure of phylogenies.
#
# This file is a part of BioJulia. License is MIT: https://github.com/BioJulia/Bio.jl/blob/master/LICENSE.md
# Internal functions
"""
unconnected_clades(tree::Phylogeny)
**Unexported function for internal use.**
Gets the internal clade vertices which are not connected to either parents or
children.
"""
function unconnected_clades(tree::Phylogeny)
cv = clades(tree)
idx = find(degree(tree.graph, cv) .== 0)
return cv[idx]
end
"""
subtree_roots(tree::Phylogeny)
**Unexported function for internal use.**
Gets clade vertices from the phylogeny which are roots of detached subphylogenies.
Such vertices have children, but no parent, and are not THE root of the
phylogeny (n + 1). Such subtrees are often created after pruning of trees.
"""
function subtree_roots(tree::Phylogeny)
cv = clades(tree)
idx = find(haschildren(tree, cv) & !hasparent(tree, cv))
return cv[idx]
end
"""
disconnect_root(tree::Phylogeny)
**Unexported function for internal use.**
Unconnects the root vertex from all of its children.
"""
function disconnect_root!(tree::Phylogeny)
r = root(tree)
for i in children(tree, r)
destroy_branch!(tree, Edge(r, i))
end
return tree
end
"""
delete!(tree::Phylogeny, vertex::Int, preserve_bl::Bool = false)
Delete a node from a phylogenetic tree.
"""
function Base.delete!(tree::Phylogeny, vertex::Int, preserve_bl::Bool = false)
p = Phylogenies.parent(tree, vertex)
# Delete the connection to parent but remember the branchlength of the
# deleted branch.
parentedge = Edge(p, vertex)
lentoparent = preserve_bl ? branchlength(tree, parentedge) : 0.0
destroy_branch!(tree, parentedge)
for branch in child_branches(tree, vertex)
newbl = branchlength(tree, branch) + lentoparent
add_branch!(tree, Edge(parent, dst(branch)), newbl)
destroy_branch!(tree, Edge(vertex, dst(branch)))
end
return tree
end
|
{"hexsha": "df8136eca19d035feef2b6c6373f6543ef6f6c87", "size": 2152, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/manipulation.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/Phylogenies.jl-875022e3-a016-506a-99d3-0712b6c7904b", "max_stars_repo_head_hexsha": "1eaa2d838ed3eba02a8a51608771e82f14405920", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2017-10-07T22:04:59.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-07T18:45:14.000Z", "max_issues_repo_path": "src/manipulation.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/Phylogenies.jl-875022e3-a016-506a-99d3-0712b6c7904b", "max_issues_repo_head_hexsha": "1eaa2d838ed3eba02a8a51608771e82f14405920", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2017-03-21T12:11:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-25T12:57:09.000Z", "max_forks_repo_path": "src/manipulation.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/Phylogenies.jl-875022e3-a016-506a-99d3-0712b6c7904b", "max_forks_repo_head_hexsha": "1eaa2d838ed3eba02a8a51608771e82f14405920", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2017-04-01T08:46:42.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-09T02:17:28.000Z", "avg_line_length": 27.2405063291, "max_line_length": 108, "alphanum_fraction": 0.7039962825, "num_tokens": 558}
|
"""
Nanocar Avogadro 2 plug-in - LAMMPS setup.
Write LAMMPs simulation input files.
Author: Kutay B. Sezginel
Date: November 2018
"""
import os
import sys
import json
import argparse
from angstrom import Molecule
import numpy as np
import periodictable
from lammps_writer import write_data_file, write_input_file
FF_LIST = ['UFF', 'UFF4MOF', 'DREIDING']
PLUGIN_DIR = os.path.abspath(os.path.dirname(__file__))
def get_options():
"""Create user interface options."""
user_options = {}
surface_info = read_surface_info()
user_options['box_x'] = {'label': 'Simulation Box X (nm)',
'type': 'float',
'default': surface_info['x'] / 10}
user_options['box_y'] = {'label': 'Simulation Box Y (nm)',
'type': 'float',
'default': surface_info['y'] / 10}
user_options['box_z'] = {'label': 'Simulation Box Z (nm)',
'type': 'float',
'default': 3.0}
user_options['timestep'] = {'label': 'Timestep (fs)',
'type': 'float',
'default': 1.0}
user_options['sim_length'] = {'label': 'Simulation length (ns)',
'type': 'float',
'default': 1.0}
user_options['dir'] = {'label': 'Save directory',
'type': 'string',
'default': PLUGIN_DIR}
return {'userOptions': user_options }
def run_workflow():
"""Run main function - LAMMPS setup."""
stdinStr = sys.stdin.read()
opts = json.loads(stdinStr)
setup_lammps(opts)
def setup_lammps(opts):
"""Write LAMMPS simulation files."""
# Read structure information
coords = np.array(opts['cjson']['atoms']['coords']['3d'])
atoms = [periodictable.elements[i].symbol for i in opts['cjson']['atoms']['elements']['number']]
nanocar = Molecule(atoms=atoms, coordinates=np.array(coords).reshape((int(len(coords) / 3)), 3))
opts['box_x'], opts['box_y'], opts['box_z'] = opts['box_x'] * 10, opts['box_y'] * 10, opts['box_z'] * 10
nanocar.set_cell([opts['box_x'], opts['box_y'], opts['box_z'], 90, 90, 90])
nanocar.center([opts['box_x'] / 2, opts['box_y'] / 2, opts['box_z'] / 2])
if not os.path.isdir(opts['dir']):
opts['dir'] = PLUGIN_DIR
print('Directory not found! Using plug-in directory -> %s' % PLUGIN_DIR)
data_file = os.path.join(opts['dir'], 'data.nanocar')
write_data_file(data_file, nanocar)
# Write input file
surface_info = read_surface_info()
surface_ids = surface_info['id']
surface_atoms = surface_ids[1] - surface_ids[0]
num_atoms = len(nanocar.atoms)
if surface_ids[0] == 1:
mol_ids = [num_atoms - surface_atoms, num_atoms]
else:
mol_ids = [1, num_atoms - surface_atoms - 1]
input_file = os.path.join(opts['dir'], 'in.nanocar')
inp_parameters = {'sim_length': opts['sim_length'], 'ts': opts['timestep'],
'mol_ids': mol_ids, 'surface_ids': surface_ids, 'T': 300}
write_input_file(input_file, nanocar, inp_parameters)
def read_surface_info():
"""Read surface size for the last metal surface built."""
filename = os.path.join(PLUGIN_DIR, 'surface_info.json')
if os.path.exists(filename):
with open(filename, 'r') as f:
surface_info = json.load(f)
else:
surface_info = {'x': 0.0, 'y': 0.0, 'id': [0, 0]}
return surface_info
if __name__ == "__main__":
parser = argparse.ArgumentParser('LAMMPS!')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--print-options', action='store_true')
parser.add_argument('--run-workflow', action='store_true')
parser.add_argument('--display-name', action='store_true')
parser.add_argument('--menu-path', action='store_true')
parser.add_argument('--lang', nargs='?', default='en')
args = vars(parser.parse_args())
debug = args['debug']
if args['display_name']:
print("LAMMPS setup")
if args['menu_path']:
print("&Build|Nanocar")
if args['print_options']:
print(json.dumps(get_options()))
elif args['run_workflow']:
print(json.dumps(run_workflow()))
|
{"hexsha": "49e8724de2a16d55b63eebb815f331b54f246b57", "size": 4333, "ext": "py", "lang": "Python", "max_stars_repo_path": "lammps_setup.py", "max_stars_repo_name": "mss1451/nanocar-avogadro", "max_stars_repo_head_hexsha": "429a14482a6768da2cecd3ef2546318791516dec", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2019-07-09T06:16:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-10T12:56:28.000Z", "max_issues_repo_path": "lammps_setup.py", "max_issues_repo_name": "mss1451/nanocar-avogadro", "max_issues_repo_head_hexsha": "429a14482a6768da2cecd3ef2546318791516dec", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lammps_setup.py", "max_forks_repo_name": "mss1451/nanocar-avogadro", "max_forks_repo_head_hexsha": "429a14482a6768da2cecd3ef2546318791516dec", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-08T23:42:18.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-08T23:42:18.000Z", "avg_line_length": 35.2276422764, "max_line_length": 108, "alphanum_fraction": 0.5905838911, "include": true, "reason": "import numpy", "num_tokens": 1101}
|
import pandas as pd
from todoist.api import TodoistAPI
import os
import sys
import numpy as np
import datetime
def create_api():
try:
token = os.environ['TODOIST_TOKEN']
except KeyError:
sys.stderr.write("TODOIST_* environment variable not set\n")
sys.exit(1)
api = TodoistAPI(token)
return api
def transform_act(act_dict):
"""
Function to Transform TodoistAPI Returned Activity Dictionary
-----INPUT-----
act_dict: the activity dictionary returned by TodoistAPI.activity.get()
-----OUTPUT-----
act_df: the standardized Pandas Dataframe for activity history, with all
possible variables included
"""
for i in range(len(act_dict)):
for key, item in act_dict[i]['extra_data'].items():
act_dict[i][key] = item
del(act_dict[i]['extra_data'])
act_df = pd.DataFrame(act_dict)
vnames = ['client', 'content', 'due_date', 'event_date', 'event_type',
'id', 'initiator_id', 'last_content', 'last_due_date',
'name', 'object_id', 'object_type', 'parent_item_id',
'parent_project_id']
for vname in vnames:
if vname not in act_df.columns:
act_df[vname] = np.nan
return act_df
def df_standardization(df):
"""
Function to Standardize the Downloaded or Loaded Todoist Activity DataFrame
-----INPUT-----
df: activity dataframe that has been created with transform_act()
-----OUTPUT-----
df: activity Pandas Dataframe with needed dtypes for further analysis
"""
df.client = df.client.astype('category')
df.content = df.content.astype(str)
df.due_date = pd.to_datetime(df.due_date)
df.event_date = pd.to_datetime(df.event_date)
df.event_type = df.event_type.astype('category')
df.id = df.id.astype(str)
df.initiator_id = df.initiator_id.astype(str)
df.last_content = df.last_content.astype(str)
df.last_due_date = pd.to_datetime(df.last_due_date)
df.name = df.name.astype(str)
df.object_id = df.object_id.astype(str)
df.object_type = df.object_type.astype('category')
df.parent_item_id = df.parent_item_id.astype(float)
df.parent_project_id = df.parent_project_id.astype(float)
return df
def act_fetch_all(until=datetime.datetime.now()):
"""
Function to Download All Activity History as Todoist Activity
-----INPUT-----
api_engine: a TodoistAPI(token) object that connects to Todoist API
until: a datetime.datetime object that signifies the latest time the
user wants to fetch the history. Defaults to the time of the excution.
-----OUTPUT-----
act_df: a standardized Activity DataFrame with all activity history
-----DEPENDENCY-----
transform_act(): transforms dictionary to Pandas DataFrame
df_standardization(): standardizes Pandas DataFrame
"""
api_engine = create_api()
until_param = until.strftime('%Y-%m-%dT%H:%M:%S')
act_df = df_standardization(transform_act(
api_engine.activity.get(limit=100, until=until_param)))
# print('act_df is downloaded until {}'.format(until_param))
new_len = len(act_df)
# print('new_len is {}'.format(new_len))
while new_len == 100:
until = act_df.event_date.min() - datetime.timedelta(seconds=1)
until_param = until.strftime('%Y-%m-%dT%H:%M:%S')
new_df = df_standardization(transform_act(
api_engine.activity.get(limit=100, until=until_param)))
# print('act_df is downloaded until {}'.format(until_param))
new_len = len(new_df)
# print('new_len is updated to {}'.format(new_len))
act_df = act_df.append(new_df)
return act_df
def act_fetch_new(since, until=datetime.datetime.now()):
"""
Function to Update Activity History as Todoist Activity
-----INPUT-----
since: datetime object of the latest event_date from loaded records
until: a datetime.datetime object that signifies the latest time the
user wants to fetch the history. Defaults to the time of the excution.
-----OUTPUT-----
act_df: an updated standardized full Activity DataFrame
df: the new activity history since last fetch
-----DEPENDENCY-----
transform_act(): transforms dictionary to Pandas DataFrame
df_standardization(): standardizes Pandas DataFrame
"""
api_engine = create_api()
since_param = since.strftime('%Y-%m-%dT%H:%M:%S')
until_param = until.strftime('%Y-%m-%dT%H:%M:%S')
df = df_standardization(transform_act(
api_engine.activity.get(limit=100, since=since_param,
until=until_param)))
new_len = len(df)
while new_len == 100:
since = df.event_date.max() + datetime.timedelta(seconds=1)
since_param = since.strftime('%Y-%m-%dT%H:%M:%S')
new_df = df_standardization(transform_act(
api_engine.activity.get(limit=100, since=since_param,
until=until_param)))
new_len = len(new_df)
df = df.append(new_df)
return df
|
{"hexsha": "a4555ab06cd28bdca18b01cc2b1deee6f7203bc4", "size": 5045, "ext": "py", "lang": "Python", "max_stars_repo_path": "todoist_functions.py", "max_stars_repo_name": "ElvinOuyang/todoist-history-analytics", "max_stars_repo_head_hexsha": "f3eb9629c84e3878af39524e8e5694bd3c743973", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-10-04T10:48:45.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T23:59:17.000Z", "max_issues_repo_path": "todoist_functions.py", "max_issues_repo_name": "ElvinOuyang/todoist-history-analytics", "max_issues_repo_head_hexsha": "f3eb9629c84e3878af39524e8e5694bd3c743973", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "todoist_functions.py", "max_forks_repo_name": "ElvinOuyang/todoist-history-analytics", "max_forks_repo_head_hexsha": "f3eb9629c84e3878af39524e8e5694bd3c743973", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-10-04T10:51:31.000Z", "max_forks_repo_forks_event_max_datetime": "2020-10-04T10:51:31.000Z", "avg_line_length": 38.8076923077, "max_line_length": 79, "alphanum_fraction": 0.664222002, "include": true, "reason": "import numpy", "num_tokens": 1147}
|
import numpy as np
import tensorflow as tf
from pre_processing import get_data
X_test, y_test, X_train, y_train = get_data()
print(X_test.shape)
print(y_test.shape)
print(X_train.shape)
print(y_train.shape)
x_in = tf.placeholder(tf.float32, [None, 22, 1000], name="input_x")
y_real = tf.placeholder(tf.int32, [None], name="real_y")
y_temp = tf.one_hot(y_real, 4)
input_layer = tf.reshape(x_in, [-1, 22, 1000, 1], name="reshaped_x")
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
pool2_flat = tf.reshape(pool2, [-1, (5*250*64)])
aff3 = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
logits = tf.layers.dense(inputs=aff3, units=4)
# calculate accuracy
prediction = tf.argmax(logits, 1, output_type=tf.int32)
my_acc = tf.reduce_mean(tf.cast(tf.equal(y_real, prediction), tf.float32))
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_temp, logits=logits))
optimizer = tf.train.AdamOptimizer().minimize(loss)
writer = tf.summary.FileWriter("./graphs/base_rnn/1")
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
sess.run(tf.local_variables_initializer())
writer.add_graph(sess.graph)
for k in range(15):
t_size = np.array(range(X_train.shape[0]))
np.random.shuffle(t_size)
test_mask = t_size[:300]
print("*****k is:", k)
for i in range(20):
inputs = {x_in: X_train[test_mask], y_real: y_train[test_mask]}
cost, _ = sess.run([loss, optimizer], feed_dict=inputs)
print("training loss is:", cost)
inputs = {x_in: X_test, y_real: y_test}
accu, cost = sess.run([my_acc, loss], feed_dict=inputs)
print("----test accuracy is:", accu)
print("test cost is:", cost)
|
{"hexsha": "75e4936c56f865ac94dc7f264bc68964de318fab", "size": 2157, "ext": "py", "lang": "Python", "max_stars_repo_path": "vanilla_cnn.py", "max_stars_repo_name": "charliezjw/Neural-Signal-Decoder", "max_stars_repo_head_hexsha": "fb0df09ba0314724c7c90141bd47cc8fb0201b7a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "vanilla_cnn.py", "max_issues_repo_name": "charliezjw/Neural-Signal-Decoder", "max_issues_repo_head_hexsha": "fb0df09ba0314724c7c90141bd47cc8fb0201b7a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vanilla_cnn.py", "max_forks_repo_name": "charliezjw/Neural-Signal-Decoder", "max_forks_repo_head_hexsha": "fb0df09ba0314724c7c90141bd47cc8fb0201b7a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1940298507, "max_line_length": 95, "alphanum_fraction": 0.675475197, "include": true, "reason": "import numpy", "num_tokens": 593}
|
"""
Methods to simplify making routine / standard plots.
"""
import numpy as np
import copy
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import astropy.units as u
from astropy.visualization.wcsaxes import WCSAxes
from astropy.coordinates import SkyCoord
# simply importing sunpy.cm will register the sunpy colortables with matplotlib
import sunpy.cm
# determine the default matplotlib backend when the module is loaded
mpl_backend_default = matplotlib.get_backend()
# specify the backend for non-interactive plots
mpl_backend_non_interactive = 'agg'
def plot_image_rs(map_in, xrange=[-1.4, 1.4], yrange=[-1.4, 1.4], log_min=0.5, log_max=3.5,
cmap_name=None, outfile=None, dpi=100, save_interactive=False):
"""
Quick method to plot a map by specifying the x and y range in SOLAR coordinates.
- xrange and yrange are two elements lists or tuples that specify the solar coords in Rs
- e.g. xrange=[-1.3, -1.3], yrange=[-1.3, 1.3]
- if a output file is specified, it will switch to a non-interactive backend and save the file
without showing the plot (unless save_interactive=True).
- cmap_name (optional) is a string that specifies a sunpy or matplotlib colormap
"""
# I don't want to modify the input map at all --> copy the map object just in case
map = copy.deepcopy(map_in)
# info from the map
rs_obs = map.rsun_obs
# get the coordinate positions of the x and y ranges
x0 = xrange[0]*rs_obs.value*u.arcsec
x1 = xrange[1]*rs_obs.value*u.arcsec
y0 = yrange[0]*rs_obs.value*u.arcsec
y1 = yrange[1]*rs_obs.value*u.arcsec
bot_left = SkyCoord(x0, y0, frame=map.coordinate_frame)
top_right = SkyCoord(x1, y1, frame=map.coordinate_frame)
# experiment with different styles of plotting the x and y window
# using "limits" lets you plot outside of the image window, which can be important
# for aligning images.
plot_method = 'limits'
if plot_method == 'submap':
map = map.submap(bot_left, top_right)
# setup the optional colormap
if cmap_name is not None:
cmap = plt.get_cmap(cmap_name)
map.plot_settings['cmap'] = cmap
# Set the map plot min/max
pmin = 10.0**(log_min)
pmax = 10.0**(log_max)
map.plot_settings['norm'] = colors.LogNorm(pmin, pmax)
# Change the colormap so undefined values don't show up white
map.plot_settings['cmap'].set_bad(color='black')
# if saving a file, don't use the interactive backend
if outfile is not None and not save_interactive:
matplotlib.use(mpl_backend_non_interactive)
# setup the figure
fig = plt.figure(figsize=(10, 9))
# Manually specify the axis (vs. getting through map.plot) this way you have more control
axis = WCSAxes(fig, [0.1, 0.025, 0.95, 0.95], wcs=map.wcs)
fig.add_axes(axis) # note that the axes have to be explicitly added to the figure
# plot the image
map.plot(axes=axis)
# example for adjusting the tick spacing (see astropy examples for WCSAxes)
custom_ticks = False
if custom_ticks:
spacing = 500.*u.arcsec
axis.coords[0].set_ticks(spacing=spacing)
axis.coords[1].set_ticks(spacing=spacing)
# if plot is NOT a submap, compute the pixel positions and change the matplotlib limits
if plot_method == 'limits':
pp_bot_left = map.world_to_pixel(bot_left)
pp_top_right = map.world_to_pixel(top_right)
axis.set_xlim(left=pp_bot_left.x.value, right=pp_top_right.x.value)
axis.set_ylim(bottom=pp_bot_left.y.value, top=pp_top_right.y.value)
# plot the colorbar
plt.colorbar()
# save the plot (optional)
if outfile is not None:
print("Saving image plot to: " + outfile)
fig.savefig(outfile, dpi=dpi)
# revert to the default MPL backend
if not save_interactive:
plt.close()
matplotlib.use(mpl_backend_default)
else:
plt.show()
else:
plt.show()
def plot_image_rs_full(map_in, xrange=[-1.4, 1.4], yrange=[-1.4, 1.4], log_min=0.5, log_max=3.5,
cmap_name=None, outfile=None, dpi=100, save_interactive=False):
"""
Quick method to plot a map by specifying the x and y range in SOLAR coordinates.
- Unlike plot_image_rs, here the image fills the entire frame, with no outside annotations
like axes labels or colorbars
- xrange and yrange are two elements lists or tuples that specify the solar coords in Rs
- e.g. xrange=[-1.3, -1.3], yrange=[-1.3, 1.3]
- if a output file is specified, it will switch to a non-interactive backend and save the file
without showing the plot (unless save_interactive=True).
- cmap_name (optional) is a string that specifies a sunpy or matplotlib colormap
ToDo:
- put white annotations in the corners that describe the image (time, inst, clon, b0)
- overplot some solar grid lines (e.g. the lat=0 line and/or various clons)
"""
# I don't want to modify the input map at all --> copy the map object just in case
map = copy.deepcopy(map_in)
# info from the map
rs_obs = map.rsun_obs
# get the coordinate positions of the x and y ranges
x0 = xrange[0]*rs_obs.value*u.arcsec
x1 = xrange[1]*rs_obs.value*u.arcsec
y0 = yrange[0]*rs_obs.value*u.arcsec
y1 = yrange[1]*rs_obs.value*u.arcsec
bot_left = SkyCoord(x0, y0, frame=map.coordinate_frame)
top_right = SkyCoord(x1, y1, frame=map.coordinate_frame)
# experiment with different styles of plotting the x and y window
# using "limits" lets you plot outside of the image window, which can be important
# for aligning images.
plot_method = 'limits'
if plot_method == 'submap':
map = map.submap(bot_left, top_right)
# setup the optional colormap
if cmap_name is not None:
cmap = plt.get_cmap(cmap_name)
map.plot_settings['cmap'] = cmap
# Set the map plot min/max
pmin = 10.0**(log_min)
pmax = 10.0**(log_max)
map.plot_settings['norm'] = colors.LogNorm(pmin, pmax)
# Change the colormap so undefined values don't show up white
map.plot_settings['cmap'].set_bad(color='black')
# if saving a file, don't use the interactive backend
if outfile is not None and not save_interactive:
matplotlib.use(mpl_backend_non_interactive)
# setup the figure
fig = plt.figure(figsize=(9, 9))
# Manually specify the axis (vs. getting through map.plot) this way you have more control
axis = WCSAxes(fig, [0.0, 0.0, 1.0, 1.0], wcs=map.wcs)
fig.add_axes(axis) # note that the axes have to be explicitly added to the figure
# plot the image
map.plot(axes=axis)
# example for adjusting the tick spacing (see astropy examples for WCSAxes)
custom_ticks = True
if custom_ticks:
spacing = map.rsun_obs
axis.coords[0].set_ticks(spacing=spacing)
axis.coords[1].set_ticks(spacing=spacing)
# if plot is NOT a submap, compute the pixel positions and change the matplotlib limits
if plot_method == 'limits':
pp_bot_left = map.world_to_pixel(bot_left)
pp_top_right = map.world_to_pixel(top_right)
axis.set_xlim(left=pp_bot_left.x.value, right=pp_top_right.x.value)
axis.set_ylim(bottom=pp_bot_left.y.value, top=pp_top_right.y.value)
# save the plot (optional)
if outfile is not None:
print("Saving image plot to: " + outfile)
fig.savefig(outfile, dpi=dpi)
# revert to the default MPL backend
if not save_interactive:
plt.close()
matplotlib.use(mpl_backend_default)
else:
plt.show()
else:
plt.show()
def plot_alignment(map_in, log_min=0.5, log_max=3.5, cmap_name=None, outfile=None,
dpi=100, save_interactive=False):
"""
Quick subroutine to take a sunpy map and plot a view of each limb to asses alignment.
- Here i was exploring how to use subplots and their compatibility astropy WCSAxes
- In the end the subplots required a lot of fine tuning.
- A major issue seems to be with the subplot axis type I lose some of the WCSAxes funcitonality
- could be i dont' understand it well enough, but my conclusion is AVOID SUBPLOTS
- however, i had gotten this far...
- Also, plotting the lines over the zoomed in images required manually changing the x_lim
and y_lims with pixel coordinates. This is pretty crappy --> there's got to be a better way.
- cmap_name (optional) is a string that specifies a sunpy or matplotlib colormap
"""
# I don't want to accidentally modify the input map --> copy the map object just in case
map = copy.deepcopy(map_in)
# info from the map
rs_obs = map.rsun_obs
# size of the window for the limb plots
winsize = 40*u.arcsec
def _set_log_scaling(map):
"""
Sub function to set the plot scaling plot
"""
# setup the optional colormap
if cmap_name is not None:
cmap = plt.get_cmap(cmap_name)
map.plot_settings['cmap'] = cmap
# Set the map plot min/max
pmin = 10.0**(log_min)
pmax = 10.0**(log_max)
map.plot_settings['norm'] = colors.LogNorm(pmin, pmax)
# Change the colormap so undefined values don't show up white
map.plot_settings['cmap'].set_bad(color='black')
# Function to plot limb circles for plotting alignment
def _plot_limb_circles(map, axis):
"""
Sub function to plot the limb circle over a sub map
"""
# construct the lines for plotting
rs_obs = map.rsun_obs
npts = 1000
angles = np.linspace(0.0, np.pi*2, npts)
x = np.cos(angles)*rs_obs
y = np.sin(angles)*rs_obs
c1 = SkyCoord(x*1.0, y*1.0, frame=map.coordinate_frame)
c2 = SkyCoord(x*1.01, y*1.01, frame=map.coordinate_frame)
c3 = SkyCoord(x*1.02, y*1.02, frame=map.coordinate_frame)
lines = [c1, c2, c3]
# set their styles
ls = '-'
lc = 'm'
la = 0.5
lw = 1.5
# turn off axis labels
axis.set_xlabel('')
axis.set_ylabel('')
# plot the lines
for line in lines:
axis.plot_coord(line, color=lc, linestyle=ls, alpha=la, linewidth=lw)
# set the limits of the plot to be the limits of the image (otherwise it will show the whole line).
# for more fine tuning you could use the map's world_to_pixel method
axis.set_xlim(left=0, right=map.data.shape[1])
axis.set_ylim(bottom=0, top=map.data.shape[0])
# if saving a file, don't use an interactive backend unless you want to
if outfile is not None and not save_interactive:
matplotlib.use(mpl_backend_non_interactive)
# setup the figure
fig = plt.figure(figsize=(15, 5))
# set up the subgrid (python orders it y,x ... ugh)
subgrid = (2, 6)
# draw the full sun image
_set_log_scaling(map)
ax1 = plt.subplot2grid(subgrid, (0, 0), colspan=2, rowspan=2, projection=map)
map.plot()
plt.colorbar()
# manually adjust the position to make it look better (plt.tight_layout doesn't play well with WCSAxes)
frac = 0.80
location = (0 + (1 - frac)/3.5, 0 + (1 - frac)/2, 1./3.*frac, frac) # [left, bottom, width, height]]
ax1.set_position(location)
# draw the north limb
bot_left = SkyCoord(-2*winsize, rs_obs - winsize, frame=map.coordinate_frame)
top_right = SkyCoord(2*winsize, rs_obs + winsize, frame=map.coordinate_frame)
submap = map.submap(bot_left, top_right)
_set_log_scaling(submap)
ax = plt.subplot2grid(subgrid, (0, 2), colspan=2, projection=submap)
submap.plot()
ax.set_title('North Limb')
offset_x = 0.01
offset_y = 0.02
scale_fac = 0.9
box = ax.get_position()
pos = (box.x0 + offset_x, box.y0 + offset_y, box.width*scale_fac, box.height*scale_fac)
ax.set_position(pos)
_plot_limb_circles(submap, ax)
# draw the south limb
bot_left = SkyCoord(-2*winsize, -rs_obs - winsize, frame=map.coordinate_frame)
top_right = SkyCoord(2*winsize, -rs_obs + winsize, frame=map.coordinate_frame)
submap = map.submap(bot_left, top_right)
_set_log_scaling(submap)
ax = plt.subplot2grid(subgrid, (1, 2), colspan=2, projection=submap)
submap.plot()
ax.set_title('South Limb')
offset_x = 0.01
offset_y = -0.02
scale_fac = 0.9
box = ax.get_position()
pos = (box.x0 + offset_x, box.y0 + offset_y, box.width*scale_fac, box.height*scale_fac)
ax.set_position(pos)
_plot_limb_circles(submap, ax)
# draw the east limb
bot_left = SkyCoord(-rs_obs - winsize, -2*winsize, frame=map.coordinate_frame)
top_right = SkyCoord(-rs_obs + winsize, 2*winsize, frame=map.coordinate_frame)
submap = map.submap(bot_left, top_right)
_set_log_scaling(submap)
ax = plt.subplot2grid(subgrid, (0, 4), rowspan=2, projection=submap)
submap.plot()
ax.set_title('East Limb')
offset_x = 0.01
offset_y = 0.00
scale_fac = 0.97
box = ax.get_position()
pos = (box.x0 + offset_x, box.y0 + offset_y, box.width*scale_fac, box.height*scale_fac)
ax.set_position(pos)
_plot_limb_circles(submap, ax)
# help(ax)
ax.tick_params(spacing=40*u.arcsec)
# draw the west limb
bot_left = SkyCoord(rs_obs - winsize, -2*winsize, frame=map.coordinate_frame)
top_right = SkyCoord(rs_obs + winsize, 2*winsize, frame=map.coordinate_frame)
submap = map.submap(bot_left, top_right)
_set_log_scaling(submap)
ax = plt.subplot2grid(subgrid, (0, 5), rowspan=2, projection=submap)
submap.plot()
ax.set_title('West Limb')
offset_x = 0.08
offset_y = 0.00
scale_fac = 0.97
box = ax.get_position()
pos = (box.x0 + offset_x, box.y0 + offset_y, box.width*scale_fac, box.height*scale_fac)
ax.set_position(pos)
_plot_limb_circles(submap, ax)
# save the plot (optional)
if outfile is not None:
print("Saving limb plot to: " + outfile)
fig.savefig(outfile, dpi=dpi)
# revert to the default MPL backend
if not save_interactive:
plt.close()
matplotlib.use(mpl_backend_default)
else:
plt.show()
else:
plt.show()
|
{"hexsha": "cf8087c810bdfe7149bd892e0b3bcdb54b694589", "size": 14422, "ext": "py", "lang": "Python", "max_stars_repo_path": "chmap/utilities/plotting/euv_fits_plotting.py", "max_stars_repo_name": "predsci/CHD", "max_stars_repo_head_hexsha": "35f29d1b62861f4ffed57b38d18689b282664bcf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-06-29T00:23:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-17T18:29:05.000Z", "max_issues_repo_path": "chmap/utilities/plotting/euv_fits_plotting.py", "max_issues_repo_name": "predsci/CHD", "max_issues_repo_head_hexsha": "35f29d1b62861f4ffed57b38d18689b282664bcf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chmap/utilities/plotting/euv_fits_plotting.py", "max_forks_repo_name": "predsci/CHD", "max_forks_repo_head_hexsha": "35f29d1b62861f4ffed57b38d18689b282664bcf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-08T06:26:18.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-08T06:26:18.000Z", "avg_line_length": 37.2661498708, "max_line_length": 107, "alphanum_fraction": 0.6639162391, "include": true, "reason": "import numpy,import astropy,from astropy", "num_tokens": 3884}
|
-- ---------------------------------------------------------------------
-- Ejercicio. Demostrar que el rango de la función exponencial es el
-- conjunto de los números positivos,
-- ----------------------------------------------------------------------
import analysis.special_functions.exp_log
open set real
example : range exp = { y | y > 0 } :=
begin
ext y,
split,
{ rintros ⟨x, rfl⟩,
apply exp_pos },
{ intro ypos,
use log y,
rw exp_log ypos },
end
-- Prueba
-- ======
/-
⊢ range exp = {y : ℝ | y > 0}
>> ext y,
y : ℝ
⊢ y ∈ range exp ↔ y ∈ {y : ℝ | y > 0}
>> split,
| y : ℝ
| ⊢ y ∈ range exp ↔ y ∈ {y : ℝ | y > 0}
| >> { rintros ⟨x, rfl⟩
| x : ℝ
| ⊢ x.exp ∈ {y : ℝ | y > 0},
| >> apply exp_pos },
y : ℝ
⊢ y ∈ {y : ℝ | y > 0} → y ∈ range exp
>> { intro ypos,
ypos : y ∈ {y : ℝ | y > 0}
⊢ y ∈ range exp
>> use log y,
⊢ y.log.exp = y
>> rw exp_log ypos },
⊢ y.log.exp = y
-/
-- Comentario: Se ha usado el lema
-- + exp_log : 0 < x → log (exp x) = x
variable (x : ℝ)
-- #check @exp_log x
|
{"author": "jaalonso", "repo": "Matematicas_en_Lean", "sha": "c44e23d87665cb4aa00c813c6bfb3c41ebc83aa8", "save_path": "github-repos/lean/jaalonso-Matematicas_en_Lean", "path": "github-repos/lean/jaalonso-Matematicas_en_Lean/Matematicas_en_Lean-c44e23d87665cb4aa00c813c6bfb3c41ebc83aa8/src/Conjuntos/Rango_de_la_exponencial.lean"}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Otetaan tarvittavat kirjastot mukaan.
import numpy as np
import math
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from flask import Flask
import dash_daq as daq
import os
# Perustetaan sovellus.
server = Flask(__name__)
server.secret_key = os.environ.get('secret_key','secret')
app = dash.Dash(name = __name__, server = server,prevent_initial_callbacks=True)
app.title = 'Solidautoilu'
def kulutus(etäisyys, nopeus, auto, vakionopeus = 1):
"""
Laskee kulutuksen perustuen ajettuun matkaan, nopeuteen sekä auton tyyppiin
vakionopeuden ollessa 1 km / h ja kulmakertoimen 1.009.
"""
kulutusluokka = {'A':3/100,
'B':3.5/100,
'C':4/100}[auto]
kerroin = 1.009
muutos = nopeus - vakionopeus
# Huom. ei if-else lausetta!
return {0: etäisyys * kulutusluokka,
1: etäisyys * kulutusluokka * kerroin ** muutos,
-1: etäisyys * kulutusluokka * kerroin ** muutos}[np.sign(muutos)]
def aika(etäisyys, nopeus):
"""
Laske annetun etäisyyden ajamiseen tarvitun ajan tunneissa ja minuuteissa.
"""
aika = etäisyys / nopeus
minuutit, tunnit = math.modf(aika)
return (int(tunnit), int(minuutit * 60))
# Perustetaan aplikaation rakenne.
def serve_layout():
return html.Div(children = [
html.H1('Solidautoilu',style={'width':'88%', 'margin':20, 'textAlign': 'center'}),
html.H2('Solidabiksen koodaushaasteeseen tehty autoilumittarisovellus',style={'width':'88%', 'margin':20, 'textAlign': 'center'}),
html.Br(),
html.P('Kesälomat lähestyvät ja monien katseet kääntyvät kohti kesämökkejä. Osalla nämä löytyvät lähempää, osalla taas matkustukseen kuluu pitkiäkin aikoja. Monesti tien päällä ollessa tuntuu siltä, että jos hieman vielä kiihdyttäisi, olisi perillä merkittävästi nopeammin… vai olisiko sittenkään? Ovatko voitetut minuutit kasvaneiden matkakustannusten arvoisia? Entä kuinka paljon matkustusajoneuvon tyyppi vaikuttaa tähän?'),
html.Br(),
html.P('Tämä autoilumittari-sovellus suorittaa vertailu matka-ajan ja polttoaineen kulutuksen välillä kahden eri valitun nopeuden mukaan: käyttäjä ilmoittaa saman matkustettavan etäisyyden samalla kulkuneuvotyypillä eri nopeuksilla ja sovellus laskee miten paljon nopeammin käyttäjä on perillä ja kuinka paljon enemmän polttoainetta tähän kuluu. Sovellus näyttää molemmista annetuista matkanopeuksista käytetyn ajan ja polttoaineen, sekä näiden kahden eron.'),
html.Br(),
html.P('Autojen bensankulutus kasvaa 1.009 kulmakertoimella. Eli jos auton nopeus kasvaa 1km/h, niin bensankulutus kasvaa 1.009 kertaiseksi. Eri autojen bensakulutus 1km/h nopeudella on ilmotettu autojen valintojen vieressä.'),
html.Br(),
html.A('Tämä tehtävä on myös vastaus Solidabiksen kesän 2021 koodihaasteeseen.', href='https://koodihaaste.solidabis.com/#/'),
html.Br(),
html.Div(className = 'row',
children = [
html.Div(className = 'six columns',children = [
html.H2('1. Valitse autosi tyyppi.'),
html.P('Alla on lueteltu bensakulutus 1km/h nopeudella.'),
dcc.RadioItems(id = 'autot',
options = [
{'label':'Auto A: 3l / 100km', 'value': 'A'},
{'label':'Auto B: 3.5l / 100km', 'value': 'B'},
{'label':'Auto C: 4l / 100km', 'value': 'C'}
],
value = 'A',
labelStyle={'display': 'inline-block'}
)
]
),
html.Div(className = 'six columns',
children = [
html.H2('2. Ilmoita ajamasi matka kilometreissä.'),
dcc.Input(id = 'matka', type = 'number', min = 1, value = 60, inputMode = 'numeric')
]
)
]
),
html.Div(className = 'row',
children = [
html.Div(className = 'six columns',children = [
html.H2('3. Valitse ensimmäinen nopeusskenaario.'),
dcc.Slider(id = 'nopeus1',
min = 10,
max = 200,
step = 1,
value = 80,
marks = {10:'10 km/h',
30:'30 km/h',
50:'50 km/h',
80:'80 km/h',
100:'100 km/h',
200: '200 km/h'}
),
html.Div(id = 'nopeus1_output')
]
),
html.Div(className = 'six columns',children = [
html.H2('4. Valitse toinen nopeusskenaario.'),
dcc.Slider(id = 'nopeus2',
min = 10,
max = 200,
step = 1,
value = 100,
marks = {10:'10 km/h',
30:'30 km/h',
50:'50 km/h',
80:'80 km/h',
100:'100 km/h',
200: '200 km/h'}
),
html.Div(id = 'nopeus2_output')
]
)
]
),
html.Div(
style={'width':'88%', 'margin':20, 'textAlign': 'center'},
children=[
html.Button( '5. Suorita simulaatio', id = 'sim',n_clicks=0)
]),
html.Div(id = 'valittu_matka',
style={'width':'88%', 'margin':20, 'textAlign': 'center'},
),
html.Div(className = 'row', children=[
html.Div(className = 'six columns', children = [
html.Div(id = 'speed_1_result', style={'width':'88%', 'margin':20, 'textAlign': 'right'})
]),
html.Div(className = 'six columns', children = [
html.Div(id = 'speed_2_result', style={'width':'88%', 'margin':20, 'textAlign': 'left'})
])
]
),
html.Div(id = 'difference', className = 'row', style={'width':'88%', 'margin':20, 'textAlign': 'center'}),
html.Br(),
html.Label(['Tehnyt: Tuomas Poukkula.']),
html.A('Katso projekti Githubissa', href='https://github.com/tuopouk/autoilumittari'),
html.Br(),
html.A('Seuraa Twitterissä', href='https://twitter.com/TuomasPoukkula'),
html.Br(),
html.A(' ja LinkedInissä.', href='https://www.linkedin.com/in/tuomaspoukkula/')
]
)
# Päivitetään ensimmäisen nopeusskenaarion valinta käyttäjälle.
@app.callback(
dash.dependencies.Output('nopeus1_output', 'children'),
[dash.dependencies.Input('nopeus1', 'value')])
def update_output(value):
return 'Valitsit {} km/h.'.format(value)
# Päivitetään toisen nopeusskenaarion valinta käyttäjälle.
@app.callback(
dash.dependencies.Output('nopeus2_output', 'children'),
[dash.dependencies.Input('nopeus2', 'value')])
def update_output(value):
return 'Valitsit {} km/h.'.format(value)
# Päivitetään ajettava matka käyttäjälle.
@app.callback(
Output('valittu_matka','children'),
[Input('sim', 'n_clicks')],
[
State('matka','value')
]
)
def update_selected_distance(n_click, matka ):
return html.Div(children = [
html.P('Ajettava matka: {} km.'.format(matka))
])
# Päivitetään ensimmäisen nopeusskenaarion tulos.
@app.callback(
Output('speed_1_result','children'),
[Input('sim', 'n_clicks')],
[State('autot','value'),
State('matka','value'),
State('nopeus1','value')
]
)
def update_speed_1_result(n_click, auto, matka, nopeus):
consumption = kulutus(matka, nopeus, auto)
tunnit, minuutit = aika(matka, nopeus)
return html.Div(children = [
html.H3('Skenaario A:'),
html.P('Bensaa kuluisi noin {} litraa.'.format(round(consumption,2))),
html.P('Keskikulutus olisi noin {} litraa / 100 km.'.format(round(100*consumption/matka,2))),
html.P('Aikaa kuluisi noin {} tuntia ja {} minuuttia.'.format(int(tunnit), int(minuutit)).replace('0 tuntia ja ', '').replace('ja 0 minuuttia', '').replace('noin 1 tuntia ', 'noin yksi tunti ').replace(' .','.'))
])
# Päivitetään toisen nopeusskenaarion tulos.
@app.callback(
Output('speed_2_result','children'),
[Input('sim', 'n_clicks')],
[State('autot','value'),
State('matka','value'),
State('nopeus2','value')
]
)
def update_speed_2_result(n_click, auto, matka, nopeus):
consumption = kulutus(matka, nopeus, auto)
tunnit, minuutit = aika(matka, nopeus)
return html.Div(children = [
html.H3('Skenaario B:'),
html.P('Bensaa kuluisi noin {} litraa.'.format(round(consumption,2))),
html.P('Keskikulutus olisi noin {} litraa / 100 km.'.format(round(100*consumption/matka,2))),
html.P('Aikaa kuluisi noin {} tuntia ja {} minuuttia.'.format(int(tunnit), int(minuutit)).replace('0 tuntia ja ', '').replace('ja 0 minuuttia', '').replace('noin 1 tuntia ', 'noin yksi tunti ').replace(' .','.'))
])
# Päivitetään skenaarioiden vertailu.
@app.callback(
Output('difference','children'),
[Input('sim', 'n_clicks')],
[State('autot','value'),
State('matka','value'),
State('nopeus1','value'),
State('nopeus2','value')
]
)
def update_difference(n_click, auto, matka, nopeus1, nopeus2):
consumption1 = kulutus(matka, nopeus1, auto)
tunnit1, minuutit1 = aika(matka, nopeus1)
keskikulutus1 = 100*consumption1/matka
consumption2 = kulutus(matka, nopeus2, auto)
tunnit2, minuutit2 = aika(matka, nopeus2)
keskikulutus2 = 100*consumption2/matka
kulutus_ero = consumption1 - consumption2
keskikulutus_ero = keskikulutus1 - keskikulutus2
aika_ero = (tunnit1 + minuutit1/60) - (tunnit2 + minuutit2/60)
ero_min, ero_tunnit = math.modf(aika_ero)
return html.Div(children = [
html.H3('Vertailu:'),
html.P({-1:'Skenaariossa A bensaa kuluisi noin {} litraa vähemmän.'.format(round(-kulutus_ero, 2)),
0: 'Molemmissa skenaarioissa bensaa kuluisi suunnilleen yhtä paljon.',
1:'Skenaariossa B bensaa kuluisi noin {} litraa vähemmän.'.format(round(kulutus_ero, 2))}[np.sign(kulutus_ero)]),
html.P({-1:'Skenaariossa A keskikulutus 100 kilometriä kohden olisi noin {} litraa vähemmän.'.format(round(-keskikulutus_ero, 2)),
0: 'Molemmissa skenaarioissa keskikulutus 100 kilometriä kohde olisi suunnilleen yhtä suuri.',
1:'Skenaariossa B keskikulutus 100 kilometriä kohden olisi noin {} litraa vähemmän.'.format(round(keskikulutus_ero, 2))}[np.sign(keskikulutus_ero)]),
html.P({-1:'Skenaario A olisi noin {} tuntia ja {} minuuttia nopeampi.'.format(int(abs(ero_tunnit)), int(abs(-ero_min)*60)),
0: 'Molemmat skenaariot olisivat suunnilleen yhtä nopeita.',
1:'Skenaario B olisi noin {} tuntia ja {} minuuttia nopeampi.'.format(int(abs(ero_tunnit)), int(abs(ero_min)*60))}[np.sign(aika_ero)].replace('0 tuntia ja ','').replace('ja 0 minuuttia ', ''))
])
app.config['suppress_callback_exceptions']=True
app.layout = serve_layout
# Ajetaan sovellus.
if __name__ == '__main__':
app.run_server(debug=False)
|
{"hexsha": "c33ae25a0a610ec03ee5d73ce1f4325ee410b6bc", "size": 15119, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "tuopouk/solidautoilu", "max_stars_repo_head_hexsha": "c8eba706e6ea04c2958c728d14cc88bd0d47582a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app.py", "max_issues_repo_name": "tuopouk/solidautoilu", "max_issues_repo_head_hexsha": "c8eba706e6ea04c2958c728d14cc88bd0d47582a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "tuopouk/solidautoilu", "max_forks_repo_head_hexsha": "c8eba706e6ea04c2958c728d14cc88bd0d47582a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.9970238095, "max_line_length": 484, "alphanum_fraction": 0.4493022025, "include": true, "reason": "import numpy", "num_tokens": 3579}
|
import numpy as np
from pyscf import lo
from pyscf.tools import molden, cubegen
from functools import reduce
from math import sqrt
from pydmfet import tools
from pydmfet.qcwrap.pyscf_rks_ao import rks_ao
class proj_embed:
def __init__(self, mf_full, cluster, Ne_env = None, loc_method = 'PM', pop_method = 'meta_lowdin', pm_exponent=2, mu = 1e5):
self.mf_full = mf_full
self.mol = mf_full.mol
self.xc_func = self.mf_full.xc
self.cluster = cluster
self.env = 1-cluster
self.Ne_env = Ne_env
self.loc_method = loc_method
self.pop_method = pop_method
self.pm_exponent = pm_exponent
self.mu = mu
self.smear_sigma = 0.0
smear_sigma = getattr(self.mf_full, "smear_sigma", None)
if(smear_sigma is not None):
self.smear_sigma = self.mf_full.smear_sigma
self.mo_lo = None
self.pop_lo = None
self.P_env = None
self.P_ref = self.mf_full.make_rdm1()
def make_frozen_orbs(self, norb = None):
mf = self.mf_full
mol = mf.mol
loc_method = self.loc_method
pop_method = self.pop_method
pm_exponent = self.pm_exponent
if(norb is None):
norb = mol.nelectron // 2
mo_lo = None
if(loc_method.upper() == 'PM'):
pm = lo.pipek.PM(mol)
pm.pop_method = pop_method
pm.exponent = pm_exponent
mo_lo = pm.kernel(mf.mo_coeff[:,:norb], verbose=4)
elif(loc_method.upper() == 'BOYS'):
boys = lo.boys.Boys(mol)
mo_lo = boys.kernel(mf.mo_coeff[:,:norb], verbose=4)
else:
raise NotImplementedError('loc_method %s' % loc_method)
s = mol.intor_symmetric('int1e_ovlp')
nbas = mol.nao_nr()
dm_lo = np.empty((norb,nbas,nbas))
for i in range(norb):
dm_lo[i] = np.outer(mo_lo[:,i],mo_lo[:,i])
pop = np.zeros((norb))
for i in range(norb):
for iatom, (b0, b1, p0, p1) in enumerate(mol.offset_nr_by_atom()):
if(self.env[iatom] == 1):
pop[i] += np.trace(np.dot(dm_lo[i,p0:p1,:],s[:,p0:p1]))
#tools.VecPrint(pop,"Mulliken popupaltion")
ind = np.argsort(-pop)
pop = pop[ind]
mo_lo[:,:norb] = mo_lo[:,ind]
tools.VecPrint(pop,"sorted Mulliken popupaltion: 1.0 for fully occupied")
with open( 'mo_lo.molden', 'w' ) as thefile:
molden.header(mol, thefile)
molden.orbital_coeff(mol, thefile, mo_lo)
self.mo_lo = mo_lo
self.pop_lo = pop
return (mo_lo,pop)
def embedding_potential(self):
mo_lo = self.mo_lo
pop = self.pop_lo
mol = self.mf_full.mol
s = mol.intor_symmetric('int1e_ovlp')
norb_frozen = 0
Ne_env = self.Ne_env
if Ne_env is None:
is_env = pop > 0.4
norb_frozen = np.sum(is_env)
elif Ne_env > 1:
norb_frozen = Ne_env//2
print("{n:2d} enviroment orbitals kept frozen.".format(n=norb_frozen) )
if(norb_frozen>0):
self.P_env = np.dot(mo_lo[:,:norb_frozen], mo_lo[:,:norb_frozen].T)
self.P_env = self.P_env + self.P_env.T
else:
raise RuntimeError("There is no frozen orbital!")
proj_op = 0.5*self.mu * reduce(np.dot, (s,self.P_env,s))
proj_ks = rks_ao(mol, xc_func=self.xc_func, coredm=self.P_env, vext_1e=proj_op, smear_sigma=self.smear_sigma)
proj_ks.kernel(dm0 = self.P_ref-self.P_env)
P_frag = proj_ks.make_rdm1()
print ("level shift energy:" , np.trace(np.dot(P_frag, proj_op)) )
P_diff = P_frag + self.P_env - self.P_ref
print ('|P_frag + P_bath - P_ref| / N = ', np.linalg.norm(P_diff)/P_diff.shape[0] )
print ('max(P_frag + P_bath - P_ref) = ', np.amax(np.absolute(P_diff)))
cubegen.density(mol, "dens_error.cube", P_diff, nx=100, ny=100, nz=100)
cubegen.density(mol, "dens_frag.cube", P_frag, nx=100, ny=100, nz=100)
cubegen.density(mol, "dens_env.cube", self.P_env, nx=100, ny=100, nz=100)
return None
def correction_energy(self):
return None
|
{"hexsha": "227252aea738b3bfb593a9a8776dddc62224d8c2", "size": 4259, "ext": "py", "lang": "Python", "max_stars_repo_path": "pydmfet/proj_ao/proj_embed.py", "max_stars_repo_name": "fishjojo/pydmfe", "max_stars_repo_head_hexsha": "93cfc655314933d3531b5733521a1f95a044f6cb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-02-26T06:26:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-20T08:58:20.000Z", "max_issues_repo_path": "pydmfet/proj_ao/proj_embed.py", "max_issues_repo_name": "fishjojo/pydmfet", "max_issues_repo_head_hexsha": "93cfc655314933d3531b5733521a1f95a044f6cb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pydmfet/proj_ao/proj_embed.py", "max_forks_repo_name": "fishjojo/pydmfet", "max_forks_repo_head_hexsha": "93cfc655314933d3531b5733521a1f95a044f6cb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7835820896, "max_line_length": 128, "alphanum_fraction": 0.5848790796, "include": true, "reason": "import numpy", "num_tokens": 1233}
|
/*
* Copyright (c) 2017 Lech Kulina
*
* This file is part of the Realms Of Steel.
* For conditions of distribution and use, see copyright details in the LICENSE file.
*/
#include <boost/bind.hpp>
#include <boost/range.hpp>
#include <Application/Logger.h>
#include "OpenGLErrors.h"
#include "OpenGLIndexBuffer.h"
static const struct IndexBufferUsageGLenumMapping {
ros::IndexBufferUsage first;
GLenum second;
} indexBufferUsageGLenumMappings[] = {
{ros::IndexBufferUsage_StreamDraw, GL_STREAM_DRAW},
{ros::IndexBufferUsage_StreamRead, GL_STREAM_READ},
{ros::IndexBufferUsage_StreamCopy, GL_STREAM_COPY},
{ros::IndexBufferUsage_StaticDraw, GL_STATIC_DRAW},
{ros::IndexBufferUsage_StaticRead, GL_STATIC_READ},
{ros::IndexBufferUsage_StaticCopy, GL_STATIC_COPY},
{ros::IndexBufferUsage_DynamicDraw, GL_DYNAMIC_DRAW},
{ros::IndexBufferUsage_DynamicRead, GL_DYNAMIC_READ},
{ros::IndexBufferUsage_DynamicCopy, GL_DYNAMIC_COPY}
};
static ros::GLenumOpt IndexBufferUsage_toGLenum(ros::IndexBufferUsage usage) {
const IndexBufferUsageGLenumMapping* iter = std::find_if(
boost::begin(indexBufferUsageGLenumMappings), boost::end(indexBufferUsageGLenumMappings),
boost::bind(&IndexBufferUsageGLenumMapping::first, _1) == usage);
if (iter != boost::end(indexBufferUsageGLenumMappings)) {
return iter->second;
}
return ros::GLenumOpt();
}
static const struct IndexBufferAccessGLenumMapping {
ros::IndexBufferAccess first;
GLenum second;
} indexBufferAccessGLenumMappings[] = {
{ros::IndexBufferAccess_Read, GL_READ_ONLY},
{ros::IndexBufferAccess_Write, GL_WRITE_ONLY},
{ros::IndexBufferAccess_ReadWrite, GL_READ_WRITE}
};
static ros::GLenumOpt IndexBufferAccess_toGLenum(ros::IndexBufferAccess access) {
const IndexBufferAccessGLenumMapping* iter = std::find_if(
boost::begin(indexBufferAccessGLenumMappings), boost::end(indexBufferAccessGLenumMappings),
boost::bind(&IndexBufferAccessGLenumMapping::first, _1) == access);
if (iter != boost::end(indexBufferAccessGLenumMappings)) {
return iter->second;
}
return ros::GLenumOpt();
}
ros::OpenGLIndexBuffer::OpenGLIndexBuffer()
: handle(0)
, indices(0) {
}
ros::OpenGLIndexBuffer::~OpenGLIndexBuffer() {
free();
}
bool ros::OpenGLIndexBuffer::create() {
free();
glGenBuffers(1, &handle);
return !OpenGL_checkForErrors();
}
bool ros::OpenGLIndexBuffer::bind() {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, handle);
return !OpenGL_checkForErrors();
}
void ros::OpenGLIndexBuffer::unbind() {
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
bool ros::OpenGLIndexBuffer::allocate(U32 indices, const U32* data /*= ROS_NULL */, IndexBufferUsage usage /*= IndexBufferUsage_StaticDraw*/) {
GLenumOpt glUsage = IndexBufferUsage_toGLenum(usage);
if (!glUsage) {
ROS_ERROR(boost::format("Unknown index buffer usage %d") % usage);
return false;
}
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices * sizeof(U32), data, *glUsage);
if (OpenGL_checkForErrors()) {
return false;
}
this->indices = indices;
this->usage = usage;
return true;
}
void ros::OpenGLIndexBuffer::free() {
if (handle) {
glDeleteBuffers(1, &handle);
handle = 0;
}
indices = 0;
usage.reset();
}
bool ros::OpenGLIndexBuffer::isAllocated() const {
return glIsBuffer(handle);
}
ros::U32* ros::OpenGLIndexBuffer::map(IndexBufferAccess access /*= IndexBufferAccess_Write*/) {
GLenumOpt glAccess = IndexBufferAccess_toGLenum(access);
if (!glAccess) {
ROS_ERROR(boost::format("Unknown index buffer access %d") % access);
return ROS_NULL;
}
U32* data = (U32*)glMapBuffer(GL_ELEMENT_ARRAY_BUFFER, *glAccess);
if (!data || OpenGL_checkForErrors()) {
return ROS_NULL;
}
return data;
}
void ros::OpenGLIndexBuffer::unmap() {
glUnmapBuffer(GL_ELEMENT_ARRAY_BUFFER);
}
|
{"hexsha": "b157dcfd0c5c3bc562ac0664ae2f811fca83908a", "size": 3980, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "engine/source/graphics/OpenGLIndexBuffer.cpp", "max_stars_repo_name": "lechkulina/RealmsOfSteel", "max_stars_repo_head_hexsha": "adeb53295abfa236a273c2641f3f9f4d4c6110e1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "engine/source/graphics/OpenGLIndexBuffer.cpp", "max_issues_repo_name": "lechkulina/RealmsOfSteel", "max_issues_repo_head_hexsha": "adeb53295abfa236a273c2641f3f9f4d4c6110e1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "engine/source/graphics/OpenGLIndexBuffer.cpp", "max_forks_repo_name": "lechkulina/RealmsOfSteel", "max_forks_repo_head_hexsha": "adeb53295abfa236a273c2641f3f9f4d4c6110e1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5873015873, "max_line_length": 143, "alphanum_fraction": 0.7140703518, "num_tokens": 971}
|
# Upbit Open API
#
# ## REST API for Upbit Exchange - Base URL: [https://api.upbit.com] - Official Upbit API Documents: [https://docs.upbit.com] - Official Support email: [open-api@upbit.com]
#
# OpenAPI spec version: 1.0.0
# Contact: ujhin942@gmail.com
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' CandleDays Class
#'
#' @field market
#' @field candle_date_time_utc
#' @field candle_date_time_kst
#' @field opening_price
#' @field high_price
#' @field low_price
#' @field trade_price
#' @field timestamp
#' @field candle_acc_trade_price
#' @field candle_acc_trade_volume
#' @field prev_closing_price
#' @field change_price
#' @field change_rate
#' @field converted_trade_price
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
CandleDays <- R6::R6Class(
'CandleDays',
public = list(
`market` = NULL,
`candle_date_time_utc` = NULL,
`candle_date_time_kst` = NULL,
`opening_price` = NULL,
`high_price` = NULL,
`low_price` = NULL,
`trade_price` = NULL,
`timestamp` = NULL,
`candle_acc_trade_price` = NULL,
`candle_acc_trade_volume` = NULL,
`prev_closing_price` = NULL,
`change_price` = NULL,
`change_rate` = NULL,
`converted_trade_price` = NULL,
initialize = function(`market`, `candle_date_time_utc`, `candle_date_time_kst`, `opening_price`, `high_price`, `low_price`, `trade_price`, `timestamp`, `candle_acc_trade_price`, `candle_acc_trade_volume`, `prev_closing_price`, `change_price`, `change_rate`, `converted_trade_price`){
if (!missing(`market`)) {
stopifnot(is.character(`market`), length(`market`) == 1)
self$`market` <- `market`
}
if (!missing(`candle_date_time_utc`)) {
stopifnot(is.character(`candle_date_time_utc`), length(`candle_date_time_utc`) == 1)
self$`candle_date_time_utc` <- `candle_date_time_utc`
}
if (!missing(`candle_date_time_kst`)) {
stopifnot(is.character(`candle_date_time_kst`), length(`candle_date_time_kst`) == 1)
self$`candle_date_time_kst` <- `candle_date_time_kst`
}
if (!missing(`opening_price`)) {
stopifnot(is.numeric(`opening_price`), length(`opening_price`) == 1)
self$`opening_price` <- `opening_price`
}
if (!missing(`high_price`)) {
stopifnot(is.numeric(`high_price`), length(`high_price`) == 1)
self$`high_price` <- `high_price`
}
if (!missing(`low_price`)) {
stopifnot(is.numeric(`low_price`), length(`low_price`) == 1)
self$`low_price` <- `low_price`
}
if (!missing(`trade_price`)) {
stopifnot(is.numeric(`trade_price`), length(`trade_price`) == 1)
self$`trade_price` <- `trade_price`
}
if (!missing(`timestamp`)) {
self$`timestamp` <- `timestamp`
}
if (!missing(`candle_acc_trade_price`)) {
stopifnot(is.numeric(`candle_acc_trade_price`), length(`candle_acc_trade_price`) == 1)
self$`candle_acc_trade_price` <- `candle_acc_trade_price`
}
if (!missing(`candle_acc_trade_volume`)) {
stopifnot(is.numeric(`candle_acc_trade_volume`), length(`candle_acc_trade_volume`) == 1)
self$`candle_acc_trade_volume` <- `candle_acc_trade_volume`
}
if (!missing(`prev_closing_price`)) {
stopifnot(is.numeric(`prev_closing_price`), length(`prev_closing_price`) == 1)
self$`prev_closing_price` <- `prev_closing_price`
}
if (!missing(`change_price`)) {
stopifnot(is.numeric(`change_price`), length(`change_price`) == 1)
self$`change_price` <- `change_price`
}
if (!missing(`change_rate`)) {
stopifnot(is.numeric(`change_rate`), length(`change_rate`) == 1)
self$`change_rate` <- `change_rate`
}
if (!missing(`converted_trade_price`)) {
stopifnot(is.numeric(`converted_trade_price`), length(`converted_trade_price`) == 1)
self$`converted_trade_price` <- `converted_trade_price`
}
},
toJSON = function() {
CandleDaysObject <- list()
if (!is.null(self$`market`)) {
CandleDaysObject[['market']] <- self$`market`
}
if (!is.null(self$`candle_date_time_utc`)) {
CandleDaysObject[['candle_date_time_utc']] <- self$`candle_date_time_utc`
}
if (!is.null(self$`candle_date_time_kst`)) {
CandleDaysObject[['candle_date_time_kst']] <- self$`candle_date_time_kst`
}
if (!is.null(self$`opening_price`)) {
CandleDaysObject[['opening_price']] <- self$`opening_price`
}
if (!is.null(self$`high_price`)) {
CandleDaysObject[['high_price']] <- self$`high_price`
}
if (!is.null(self$`low_price`)) {
CandleDaysObject[['low_price']] <- self$`low_price`
}
if (!is.null(self$`trade_price`)) {
CandleDaysObject[['trade_price']] <- self$`trade_price`
}
if (!is.null(self$`timestamp`)) {
CandleDaysObject[['timestamp']] <- self$`timestamp`
}
if (!is.null(self$`candle_acc_trade_price`)) {
CandleDaysObject[['candle_acc_trade_price']] <- self$`candle_acc_trade_price`
}
if (!is.null(self$`candle_acc_trade_volume`)) {
CandleDaysObject[['candle_acc_trade_volume']] <- self$`candle_acc_trade_volume`
}
if (!is.null(self$`prev_closing_price`)) {
CandleDaysObject[['prev_closing_price']] <- self$`prev_closing_price`
}
if (!is.null(self$`change_price`)) {
CandleDaysObject[['change_price']] <- self$`change_price`
}
if (!is.null(self$`change_rate`)) {
CandleDaysObject[['change_rate']] <- self$`change_rate`
}
if (!is.null(self$`converted_trade_price`)) {
CandleDaysObject[['converted_trade_price']] <- self$`converted_trade_price`
}
CandleDaysObject
},
fromJSON = function(CandleDaysJson) {
CandleDaysObject <- jsonlite::fromJSON(CandleDaysJson)
if (!is.null(CandleDaysObject$`market`)) {
self$`market` <- CandleDaysObject$`market`
}
if (!is.null(CandleDaysObject$`candle_date_time_utc`)) {
self$`candle_date_time_utc` <- CandleDaysObject$`candle_date_time_utc`
}
if (!is.null(CandleDaysObject$`candle_date_time_kst`)) {
self$`candle_date_time_kst` <- CandleDaysObject$`candle_date_time_kst`
}
if (!is.null(CandleDaysObject$`opening_price`)) {
self$`opening_price` <- CandleDaysObject$`opening_price`
}
if (!is.null(CandleDaysObject$`high_price`)) {
self$`high_price` <- CandleDaysObject$`high_price`
}
if (!is.null(CandleDaysObject$`low_price`)) {
self$`low_price` <- CandleDaysObject$`low_price`
}
if (!is.null(CandleDaysObject$`trade_price`)) {
self$`trade_price` <- CandleDaysObject$`trade_price`
}
if (!is.null(CandleDaysObject$`timestamp`)) {
self$`timestamp` <- CandleDaysObject$`timestamp`
}
if (!is.null(CandleDaysObject$`candle_acc_trade_price`)) {
self$`candle_acc_trade_price` <- CandleDaysObject$`candle_acc_trade_price`
}
if (!is.null(CandleDaysObject$`candle_acc_trade_volume`)) {
self$`candle_acc_trade_volume` <- CandleDaysObject$`candle_acc_trade_volume`
}
if (!is.null(CandleDaysObject$`prev_closing_price`)) {
self$`prev_closing_price` <- CandleDaysObject$`prev_closing_price`
}
if (!is.null(CandleDaysObject$`change_price`)) {
self$`change_price` <- CandleDaysObject$`change_price`
}
if (!is.null(CandleDaysObject$`change_rate`)) {
self$`change_rate` <- CandleDaysObject$`change_rate`
}
if (!is.null(CandleDaysObject$`converted_trade_price`)) {
self$`converted_trade_price` <- CandleDaysObject$`converted_trade_price`
}
},
toJSONString = function() {
sprintf(
'{
"market": %s,
"candle_date_time_utc": %s,
"candle_date_time_kst": %s,
"opening_price": %d,
"high_price": %d,
"low_price": %d,
"trade_price": %d,
"timestamp": %s,
"candle_acc_trade_price": %d,
"candle_acc_trade_volume": %d,
"prev_closing_price": %d,
"change_price": %d,
"change_rate": %d,
"converted_trade_price": %d
}',
self$`market`,
self$`candle_date_time_utc`,
self$`candle_date_time_kst`,
self$`opening_price`,
self$`high_price`,
self$`low_price`,
self$`trade_price`,
self$`timestamp`,
self$`candle_acc_trade_price`,
self$`candle_acc_trade_volume`,
self$`prev_closing_price`,
self$`change_price`,
self$`change_rate`,
self$`converted_trade_price`
)
},
fromJSONString = function(CandleDaysJson) {
CandleDaysObject <- jsonlite::fromJSON(CandleDaysJson)
self$`market` <- CandleDaysObject$`market`
self$`candle_date_time_utc` <- CandleDaysObject$`candle_date_time_utc`
self$`candle_date_time_kst` <- CandleDaysObject$`candle_date_time_kst`
self$`opening_price` <- CandleDaysObject$`opening_price`
self$`high_price` <- CandleDaysObject$`high_price`
self$`low_price` <- CandleDaysObject$`low_price`
self$`trade_price` <- CandleDaysObject$`trade_price`
self$`timestamp` <- CandleDaysObject$`timestamp`
self$`candle_acc_trade_price` <- CandleDaysObject$`candle_acc_trade_price`
self$`candle_acc_trade_volume` <- CandleDaysObject$`candle_acc_trade_volume`
self$`prev_closing_price` <- CandleDaysObject$`prev_closing_price`
self$`change_price` <- CandleDaysObject$`change_price`
self$`change_rate` <- CandleDaysObject$`change_rate`
self$`converted_trade_price` <- CandleDaysObject$`converted_trade_price`
}
)
)
|
{"hexsha": "b19f24c1d47be50ed952a5bf875fc05d1b134790", "size": 9901, "ext": "r", "lang": "R", "max_stars_repo_path": "swg_generated/r/R/CandleDays.r", "max_stars_repo_name": "Reclusive-Trader/upbit-client", "max_stars_repo_head_hexsha": "ca1fb02c9d4e22f6d726baf30a455a235ce0324a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 46, "max_stars_repo_stars_event_min_datetime": "2021-01-07T14:53:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T10:11:16.000Z", "max_issues_repo_path": "swg_generated/r/R/CandleDays.r", "max_issues_repo_name": "Reclusive-Trader/upbit-client", "max_issues_repo_head_hexsha": "ca1fb02c9d4e22f6d726baf30a455a235ce0324a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-02-20T05:21:29.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-01T12:53:02.000Z", "max_forks_repo_path": "swg_generated/r/R/CandleDays.r", "max_forks_repo_name": "Reclusive-Trader/upbit-client", "max_forks_repo_head_hexsha": "ca1fb02c9d4e22f6d726baf30a455a235ce0324a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 59, "max_forks_repo_forks_event_min_datetime": "2021-01-07T11:58:10.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-15T06:11:33.000Z", "avg_line_length": 39.7630522088, "max_line_length": 287, "alphanum_fraction": 0.6383193617, "num_tokens": 2695}
|
//
// Copyright (c) 2021 Vinnie Falco (vinnie dot falco at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// Official repository: https://github.com/boostorg/beast
//
#ifndef BOOST_HTTP_PROTO_IMPL_HEADERS_VIEW_HPP
#define BOOST_HTTP_PROTO_IMPL_HEADERS_VIEW_HPP
#include <boost/http_proto/arrow_proxy.hpp>
#include <boost/assert.hpp>
#include <iterator>
namespace boost {
namespace http_proto {
class headers_view::iterator
{
headers_view const* h_ = nullptr;
std::size_t i_ = 0;
friend class headers_view;
friend class headers_view::subrange;
iterator(
headers_view const* h,
std::size_t i) noexcept
: h_(h)
, i_(i)
{
}
public:
using value_type = typename
headers_view::value_type;
using pointer = value_type;
using reference = value_type;
using iterator_category =
std::bidirectional_iterator_tag;
iterator() = default;
bool
operator==(
iterator const& other) const
{
return
h_ == other.h_ &&
i_ == other.i_;
}
bool
operator!=(
iterator const& other) const
{
return !(*this == other);
}
value_type
operator*() const noexcept
{
return (*h_)[i_];
}
arrow_proxy<value_type>
operator->() const noexcept
{
return arrow_proxy<
value_type>{ (*h_)[i_] };
}
iterator&
operator++()
{
BOOST_ASSERT(
i_ < h_->count_);
++i_;
return *this;
}
iterator
operator++(int)
{
auto temp = *this;
++(*this);
return temp;
}
iterator&
operator--()
{
BOOST_ASSERT(
i_ != 0);
--i_;
return *this;
}
iterator
operator--(int)
{
auto temp = *this;
--(*this);
return temp;
}
};
//------------------------------------------------
auto
headers_view::
begin() const noexcept ->
iterator
{
return iterator(this, 0);
}
auto
headers_view::
end() const noexcept ->
iterator
{
return iterator(this, count_);
}
//------------------------------------------------
class headers_view::subrange::iterator
{
headers_view const* h_ = nullptr;
std::size_t i_ = 0;
friend class subrange;
iterator(
headers_view const* h,
std::size_t i) noexcept
: h_(h)
, i_(i)
{
}
public:
using value_type =
typename headers_view::value_type;
using pointer = value_type;
using reference = value_type;
using iterator_category =
std::forward_iterator_tag;
iterator() noexcept = default;
bool
operator==(
iterator const& other) const
{
return
h_ == other.h_ &&
i_ == other.i_;
}
bool
operator!=(
iterator const& other) const
{
return !(*this == other);
}
value_type
operator*() const noexcept
{
return (*h_)[i_];
}
arrow_proxy<value_type>
operator->() const noexcept
{
return arrow_proxy<
value_type>{ **this };
}
BOOST_HTTP_PROTO_DECL
iterator&
operator++() noexcept;
iterator
operator++(int)
{
auto temp = *this;
++(*this);
return temp;
}
};
//------------------------------------------------
headers_view::
subrange::
subrange(
headers_view const* h,
std::size_t first) noexcept
: h_(h)
, first_(first)
{
}
auto
headers_view::
subrange::
begin() const noexcept ->
iterator
{
return iterator(
h_, first_);
}
auto
headers_view::
subrange::
end() const noexcept ->
iterator
{
return iterator(
h_, h_->size());
}
//------------------------------------------------
} // http_proto
} // boost
#endif
|
{"hexsha": "cbb6e7fef3bbe4beb437704a2ac035ee2bb41403", "size": 3979, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/http_proto/impl/headers_view.hpp", "max_stars_repo_name": "vinniefalco/http_proto", "max_stars_repo_head_hexsha": "05e124a35d6b1d6addabf2875ebcc4a444d9ec3f", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2021-07-19T12:37:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-10T18:21:45.000Z", "max_issues_repo_path": "include/boost/http_proto/impl/headers_view.hpp", "max_issues_repo_name": "vinniefalco/http_proto", "max_issues_repo_head_hexsha": "05e124a35d6b1d6addabf2875ebcc4a444d9ec3f", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-09-28T16:58:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-26T20:21:32.000Z", "max_forks_repo_path": "include/boost/http_proto/impl/headers_view.hpp", "max_forks_repo_name": "vinniefalco/http_proto", "max_forks_repo_head_hexsha": "05e124a35d6b1d6addabf2875ebcc4a444d9ec3f", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2021-09-26T18:41:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-25T18:14:30.000Z", "avg_line_length": 16.9319148936, "max_line_length": 79, "alphanum_fraction": 0.5355616989, "num_tokens": 939}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Plots Figure 13.5 which illustrates Hüurzeler and Künsch's method for using CRN
(common random numbers) when evaluating the log-likelihood function. See
Chapter 13 (MLE) for more details.
Note: takes less than one minute.
"""
from __future__ import division, print_function
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.cm as cm
import seaborn as sb
from scipy.optimize import fmin
import particles
from particles import resampling as rs
from particles import state_space_models as ssms
# data
raw_data = np.loadtxt('../../datasets/GBP_vs_USD_9798.txt',
skiprows=2, usecols=(3,), comments='(C)')
data = 100. * np.diff(np.log(raw_data))
# ssm model
def ssmod(theta):
mu = theta[0]
rho = theta[1]
sigma = theta[2]
return ssms.StochVol(mu=mu, rho=rho, sigma=sigma)
def log_joint_density(theta, x):
mod = ssmod(theta)
l = mod.PX0().logpdf(x[0])
for t, xt in enumerate(x):
if t == 0:
l = mod.PX0().logpdf(xt) + mod.PY(0, None, x[t]).logpdf(data[t])
else:
l += (mod.PX(t, x[t - 1]).logpdf(x[t])
+ mod.PY(t, x[t - 1], x[t]).logpdf(data[t]))
return l
# FK models
def fkmod(theta, T):
return ssms.Bootstrap(ssm=ssmod(theta), data=data[:T])
# Choice of theta_0 and range of theta's
mu0 = -1.
rho0 = 0.9
sigma0 = 0.3
theta0 = [mu0, rho0, sigma0]
sigmas = sigma0 + np.linspace(-.199, .2, 401)
thetas = [[mu0, rho0, sig] for sig in sigmas]
# range of T's
Ts = [10, 100, 1000]
colors = {10: 'lightgray', 100: 'gray', 1000: 'black'}
plt.style.use('ggplot')
plt.figure()
for T in Ts:
print('FFBS for T=%i' % T)
alg = particles.SMC(fk=fkmod(theta0, T), N=100, store_history=True)
alg.run()
trajs = alg.hist.backward_sampling(M=100)
ll0 = log_joint_density(theta0, trajs)
ess_ls = []
for theta in thetas:
ll = log_joint_density(theta, trajs)
ess = rs.essl(ll - ll0)
ess_ls.append(ess)
plt.plot(sigmas, ess_ls, label='T=%i' % T, color=colors[T])
plt.xlabel('sigma')
plt.ylabel('ESS')
plt.legend(loc=2)
savefigs = False # change this if you want to save the plot as a PDF
if savefigs:
plt.savefig('hurzeler_kunsch.pdf')
plt.show()
|
{"hexsha": "9eedd1d9c3018d471d2c1301dd4677c7e3764100", "size": 2283, "ext": "py", "lang": "Python", "max_stars_repo_path": "book/mle/hurzeler_kunsch.py", "max_stars_repo_name": "jonjonw/particles", "max_stars_repo_head_hexsha": "95dfe311e3a2a615fbdfdffa6492cd4391c152ad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "book/mle/hurzeler_kunsch.py", "max_issues_repo_name": "jonjonw/particles", "max_issues_repo_head_hexsha": "95dfe311e3a2a615fbdfdffa6492cd4391c152ad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "book/mle/hurzeler_kunsch.py", "max_forks_repo_name": "jonjonw/particles", "max_forks_repo_head_hexsha": "95dfe311e3a2a615fbdfdffa6492cd4391c152ad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.2413793103, "max_line_length": 79, "alphanum_fraction": 0.6416995182, "include": true, "reason": "import numpy,from scipy", "num_tokens": 712}
|
#ifdef STAND_ALONE
# define BOOST_TEST_MODULE Main
#else
#ifndef _WIN32
# define BOOST_TEST_MODULE Assembler
#endif
#endif
#include <boost/test/unit_test.hpp>
#include "assembler_tests.h"
BOOST_FIXTURE_TEST_SUITE( AssemblerTestSuite, AssemblerFixture )
BOOST_AUTO_TEST_CASE( DummyTest )
{
// TODO
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "72075cc06e73d2e6af385933f6b359e59fd2676b", "size": 337, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/AssemblerTests/assembler_tests.cpp", "max_stars_repo_name": "faouellet/CHIP-16", "max_stars_repo_head_hexsha": "f85aed30d849052ac057092b124321953de80db6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/AssemblerTests/assembler_tests.cpp", "max_issues_repo_name": "faouellet/CHIP-16", "max_issues_repo_head_hexsha": "f85aed30d849052ac057092b124321953de80db6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/AssemblerTests/assembler_tests.cpp", "max_forks_repo_name": "faouellet/CHIP-16", "max_forks_repo_head_hexsha": "f85aed30d849052ac057092b124321953de80db6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.0476190476, "max_line_length": 64, "alphanum_fraction": 0.7952522255, "num_tokens": 84}
|
[STATEMENT]
lemma bernoulli_1 [simp]: "bernoulli 1 = -1/2"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bernoulli 1 = - 1 / 2
[PROOF STEP]
by (simp add: bernoulli_Suc)
|
{"llama_tokens": 86, "file": "Bernoulli_Bernoulli", "length": 1}
|
import spglib
import numpy as np
def irreducible_kpoints(structure, kpoints):
"""use spglib to compute number of irreducible k-points.
Keyword arguments:
structure -- aiida structure object
kpoints -- aiida kpoint object
Returns:
nr -- number of irreducible k-points
mapping -- spglib mapping
grid -- spglib grid
"""
pymatgen_st = structure.get_pymatgen()
# build indicator array
# list of atom symbols (string)
atom_symbols = np.array([str(site.specie) for site in pymatgen_st.sites])
# convert to indicators array
indicators = np.zeros(len(atom_symbols))
for i, symbol in enumerate(set(atom_symbols)):
indicators[atom_symbols == symbol] = i
rpos = [site.frac_coords for site in pymatgen_st.sites]
cell = (pymatgen_st.lattice.matrix, rpos, indicators)
mapping, grid = spglib.get_ir_reciprocal_mesh(kpoints.attributes['mesh'],
cell, is_shift=kpoints.attributes['offset'])
return len(np.unique(mapping)), mapping, grid
|
{"hexsha": "70e562053e228ecf2520bdf351510a6ffbb8195d", "size": 1070, "ext": "py", "lang": "Python", "max_stars_repo_path": "aiida_sirius/helpers/kpoints.py", "max_stars_repo_name": "simonpintarelli/aiida-sirius", "max_stars_repo_head_hexsha": "5dc968cc4a98a5d0b018f54c4c7023b2a2682795", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "aiida_sirius/helpers/kpoints.py", "max_issues_repo_name": "simonpintarelli/aiida-sirius", "max_issues_repo_head_hexsha": "5dc968cc4a98a5d0b018f54c4c7023b2a2682795", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "aiida_sirius/helpers/kpoints.py", "max_forks_repo_name": "simonpintarelli/aiida-sirius", "max_forks_repo_head_hexsha": "5dc968cc4a98a5d0b018f54c4c7023b2a2682795", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.4375, "max_line_length": 94, "alphanum_fraction": 0.6682242991, "include": true, "reason": "import numpy", "num_tokens": 258}
|
export BallOnPlate
"""
BallOnPlate
The single-axis ball on a plate system example with a single actuator.
This system is a 2-state 1-input discrete-time system with bounds on both
the states and the inputs.
This example is from Section 9.6 of [1].
[1] S. Richter, ‘Computational complexity certification of gradient methods for real-time model predictive control’, Doctoral Thesis, ETH Zurich, 2012.
"""
struct BallOnPlate <: ControlBenchmarkProblem end
@addbenchmark BallOnPlate "lin01" "Ball on Plate (Single Axis)" "Single axis ball on plate" Set([:linear, :stable, :discrete])
function controlbenchmark( ::BallOnPlate )
# System matrices
A = [ 1.0 0.01;
0.0 1.0 ]
B = [-0.0004
-0.0701]
C = Matrix{Float64}( I, 2, 2 )
D = fill( 0.0, (2, 1) )
# State constraints
stateBounds = Bounds( [ Bound( -0.2, 0.01 );
Bound( -0.1, 0.1 ) ] )
# Input constraints
inputBounds = Bounds( [ Bound( -0.0524, 0.0524 ) ] )
return (; :sys => StateSpace( A, B, C, D, 0.01 ),
:stateBounds => stateBounds,
:inputBounds => inputBounds )
end
|
{"hexsha": "20d882bec81ad34f4d3238a4de5faa98be6272b8", "size": 1154, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/benchmarks/ballOnPlate.jl", "max_stars_repo_name": "imciner2/ControlBenchmarks.jl", "max_stars_repo_head_hexsha": "f11f4a98bd8ab2923771b72e7f0d4c60302207c8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/benchmarks/ballOnPlate.jl", "max_issues_repo_name": "imciner2/ControlBenchmarks.jl", "max_issues_repo_head_hexsha": "f11f4a98bd8ab2923771b72e7f0d4c60302207c8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/benchmarks/ballOnPlate.jl", "max_forks_repo_name": "imciner2/ControlBenchmarks.jl", "max_forks_repo_head_hexsha": "f11f4a98bd8ab2923771b72e7f0d4c60302207c8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.6444444444, "max_line_length": 151, "alphanum_fraction": 0.6247833622, "num_tokens": 339}
|
import numpy as np
import torch
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import pickle
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from copy import deepcopy
from tensorboardX import SummaryWriter
import os
import cProfile
def plot_regression(model, samples, directory, no_plot_samples):
plt.figure()
plt.rcParams.update({'font.size': 14})
plt.xlabel('$x$')
plt.ylabel('$y$')
axes = plt.gca()
axes.set_xlim([-3, 3])
axes.set_ylim([-3, 3])
plt.plot(data_load[:,0], data_load[:,1], '+k', markersize=16)
N = 1000
x_lower = -6
x_upper = 8
x_values = np.linspace(x_lower, x_upper, N)
test_inputs = torch.FloatTensor(x_values).cuda(async=True)
# subsample for plotting only
indeces = np.arange(0, len(samples))
subsampled_indeces = np.random.choice(indeces, no_plot_samples, replace=False)
subsampled_indeces = subsampled_indeces.astype(int)
samples = [samples[i] for i in subsampled_indeces]
# plot all the samples
no_samp = len(samples)
all_test_outputs = np.zeros((no_samp, N))
for i, sample in enumerate(samples):
for j, param in enumerate(model.parameters()):
param.data = sample[j] # fill in the model with these weights
# plot regression
test_outputs = model(test_inputs)
# convert back to np array
test_outputs = test_outputs.data.cpu().numpy()
test_outputs = test_outputs.reshape(N)
plt.plot(x_values, test_outputs, linewidth=1)
# save data for ensemble mean and s.d. calculation
all_test_outputs[i,:] = test_outputs
# calculate mean and variance
mean = np.mean(all_test_outputs, 0)
variance = model.noise_variance + np.mean(all_test_outputs**2, 0) - mean**2
plt.plot(x_values, mean, color='b')
plt.fill_between(x_values, mean + np.sqrt(variance), mean - np.sqrt(variance), color='b', alpha=0.3)
filepath = os.path.join(directory, 'HMC_regression.pdf')
plt.savefig(filepath)
plt.close()
# pickle everything as numpy arrays for posterity
inputs_hmc = x_values
mean_hmc = mean
sd_hmc = np.sqrt(variance)
pickle_location = os.path.join(directory, 'plot_hmc_relu')
outfile = open(pickle_location, 'wb')
pickle.dump(inputs_hmc, outfile)
pickle.dump(mean_hmc, outfile)
pickle.dump(sd_hmc, outfile)
outfile.close()
def plot_covariance(hidden_sizes, samples, directory):
# plot the empirical covariance matrix (use unbiased estimator?) and the correlation matrix
# calculate number of parameters in the network
no_params = hidden_sizes[0] # first weight matrix
for i in range(len(hidden_sizes)-1):
no_params = no_params + hidden_sizes[i] + hidden_sizes[i]*hidden_sizes[i+1]
no_params = no_params + hidden_sizes[-1] + hidden_sizes[-1] + 1 # final weight matrix and last 2 biases
sampled_parameters = np.zeros((no_params, len(samples)))
for i, sample in enumerate(samples):
# send parameters to numpy arrays and pack the parameters into a vector
start_index = 0
for _, param in enumerate(sample):
if len(param.size()) > 1:
param = torch.t(param) # nn.Linear does a transpose for some reason, so undo this
param = param.cpu().detach().numpy()
param = param.reshape(-1) # flatten into a vector
end_index = start_index + param.size
sampled_parameters[start_index:end_index, i] = param # fill into array
start_index = start_index + param.size
# calculate sample mean
sample_mean = np.mean(sampled_parameters, axis=1)
sample_mean = sample_mean.reshape(-1, 1) # numpy broadcasting thing
centered_samples = sampled_parameters - sample_mean
# calculate empirical covariances
cov = np.zeros((no_params, no_params))
for i in range(len(samples)):
cov = cov + np.outer(centered_samples[:,i], centered_samples[:,i])
cov = cov/len(samples) # max likelihood estimator of covariance
fig, ax = plt.subplots()
im = ax.imshow(np.abs(cov) , interpolation='nearest', cmap=cm.Greys_r)
filepath = os.path.join(directory, 'covariance.pdf')
fig.savefig(filepath)
plt.close()
# plot correlation matrix using cov matrix estimate
variance_vector = np.diag(cov)
sd_vector = np.sqrt(variance_vector)
outer_prod = np.outer(sd_vector, sd_vector)
correlations = cov/outer_prod
fig, ax = plt.subplots()
im = ax.imshow(correlations , interpolation='nearest')
fig.colorbar(im)
filepath = os.path.join(directory, 'correlation.pdf')
fig.savefig(filepath)
plt.close()
class MLP(nn.Module):
def __init__(self, noise_variance, hidden_sizes, omega):
super(MLP, self).__init__()
self.omega = omega
self.noise_variance = noise_variance
self.hidden_sizes = hidden_sizes
self.linears = nn.ModuleList([nn.Linear(1, self.hidden_sizes[0])])
self.linears.extend([nn.Linear(self.hidden_sizes[i], self.hidden_sizes[i+1]) for i in range(0, len(self.hidden_sizes)-1)])
self.linears.append(nn.Linear(self.hidden_sizes[-1], 1))
# ################ custom initialisation
# self.linears[0].weight.data = torch.Tensor([[-1.414]])
# self.linears[0].bias.data = torch.Tensor([0])
# self.linears[1].weight.data = torch.Tensor([[-1.414]])
# self.linears[1].bias.data = torch.Tensor([2])
# ################
################ custom initialisation for 2 points in-between uncertainty
# self.linears[0].weight.data = torch.Tensor([[1], [1]])
# self.linears[0].bias.data = torch.Tensor([0.5, -0.5])
# self.linears[1].weight.data = torch.Tensor([[1, 1]])
# self.linears[1].bias.data = torch.Tensor([0.2])
print(self.linears)
def forward(self, x):
batch_size = x.size()[0]
x = x.view(batch_size, 1)
for i, l in enumerate(self.linears):
x = l(x)
if i < len(self.linears) - 1:
x = F.relu(x) ######################## playing with activation
return x
def get_U(self, inputs, labels):
outputs = self.forward(inputs)
labels = labels.reshape(labels.size()[0], 1)
L2_term = 0
for _, l in enumerate(self.linears): # Neal's prior (bias has variance 1)
n_inputs = l.weight.size()[1]
single_layer_L2 = 0.5*(n_inputs/(self.omega**2))*torch.sum(l.weight**2) + 0.5*torch.sum(l.bias**2)
L2_term = L2_term + single_layer_L2
error = (1/(2*self.noise_variance))*torch.sum((labels - outputs)**2)
U = error + L2_term
return U
"""This version of HMC follows https://arxiv.org/pdf/1206.1901.pdf. Identity mass matrix used"""
class HMC_Sampler:
def __init__(self, inputs, targets, step_size = 0.002, num_steps = 20, no_samples = 20000, burn_in = 1000, thinning = 1):
self.step_size = step_size
self.num_steps = num_steps
self.no_samples = no_samples
self.burn_in = burn_in
self.inputs = inputs
self.targets = targets
self.thinning = thinning
self.no_accept = 0
def get_samples(self, model):
"""run the HMC sampler and save the samples"""
print('Beginning burn-in phase of {} samples'.format(self.burn_in))
# don't save the burn-in samples
for i in tqdm(range(self.burn_in)):
new_parameters, energy = self.HMC_transition(model)
writer.add_scalar('Energy', energy, i)
if i%100 == 0:
if i != 0:
print('Acceptance rate: {}%'.format(self.no_accept))
self.no_accept = 0
for i, param in enumerate(model.parameters()):
param.data = new_parameters[i]
print('Burn-in phase finished, collecting {} samples with thinning of {}.'.format(self.no_samples, self.thinning))
samples = []
for i in tqdm(range(self.no_samples)):
# get new parameters and use them to replace the old ones
new_parameters, energy = self.HMC_transition(model)
writer.add_scalar('Energy', energy, i + self.burn_in)
for j, param in enumerate(model.parameters()):
param.data = new_parameters[j]
# save the new parameters
if i%self.thinning == 0:
samples.append(deepcopy(new_parameters))
# print the acceptance rate
if i%100 == 0:
if i != 0:
print('Acceptance rate: {}%'.format(self.no_accept))
self.no_accept = 0
print('Done collecting samples')
return samples
def HMC_transition(self, model):
"""perform one transition of the markov chain"""
# randomise the step size and number of steps
step_size = np.random.uniform(self.step_size[0], self.step_size[1])
num_steps = np.random.randint(self.num_steps[0], self.num_steps[1] + 1)
saved_params = deepcopy(list(model.parameters())) # list of all the parameter objects - positions
p = [] # list of momenta
for _, param in enumerate(model.parameters()):
param_size = param.size()
# independent standard normal variates for corresponding momenta
p.append(torch.cuda.FloatTensor(param_size).normal_(0, 1))
# get gradients of U wrt parameters q
U = model.get_U(self.inputs, self.targets) # get log posterior (up to constant)
start_U = U.clone() # save the starting potential energy
# save the starting kinetic energy
start_K = torch.cuda.FloatTensor(1).fill_(0) # a zero
for momentum in p:
start_K = start_K + torch.sum(momentum**2)/2
U.backward()
# make half step for momentum at the beginning
for i, momentum in enumerate(p):
momentum += - step_size*list(model.parameters())[i].grad.data/2
# alternate full steps for position and momentum
for i in range(num_steps):
# make a full step for the position
for l, param in enumerate(model.parameters()):
param.data += step_size*p[l]
# zero gradients of U wrt parameters q
for _, param in enumerate(model.parameters()):
param.grad.data.zero_()
# get gradients of U wrt parameters q
U = model.get_U(self.inputs, self.targets) # get log posterior (up to constant)
U.backward()
# make a full step for the momentum, except at end of trajectory <-- check this ################
if not (i == (num_steps-1)):
for j, momentum in enumerate(p):
momentum += - step_size*list(model.parameters())[j].grad.data
# make a half step for momentum at the end
for i, momentum in enumerate(p):
momentum += - step_size*list(model.parameters())[i].grad.data/2
# negate momentum at the end of trajectory to make the proposal symmetric
# can probably skip this without effect
# evaluate potential and kinetic energies at end of trajectory
end_U = model.get_U(self.inputs, self.targets)
end_K = torch.cuda.FloatTensor(1).fill_(0) # a zero
for momentum in p:
end_K = end_K + torch.sum(momentum**2)/2
# zero gradients of U wrt parameters q
for _, param in enumerate(model.parameters()):
param.grad.data.zero_()
# Accept or reject the state at end of trajectory, returning either the position
# at the end of the trajectory or the initial position
if np.random.uniform(0, 1) < torch.exp(start_U - end_U + start_K - end_K).cpu().detach().numpy():
self.no_accept = self.no_accept + 1
return list(model.parameters()), end_U # accept
else:
return saved_params, start_U # reject
if __name__ == "__main__":
# set RNG
np.random.seed(0) # 0
torch.manual_seed(230) # 230
# hyperparameters
noise_variance = 0.01 # 0.01
hidden_sizes = [50]
omega = 4
burn_in = 10000
no_samples = 20000
no_saved_samples = 1000
no_plot_samples = 100 #32
step_size = [0.001, 0.0015]
num_steps = [5, 10]
directory = './/experiments//ICML_relu_fixed_prior'
#data_location = './/experiments//2_points_init//prior_dataset.pkl'
data_location = '..//vision//data//1D_COSINE//1d_cosine_separated.pkl'
# set up tensorboard
tensorboard_path = os.path.join(directory, 'tensorboard')
writer = SummaryWriter(tensorboard_path)
# model
net = MLP(noise_variance, hidden_sizes, omega)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Assume that we are on a CUDA machine, then this should print a CUDA device:
print(device)
net.to(device)
for param in net.parameters():
print(type(param.data), param.size())
# get dataset
with open(data_location, 'rb') as f:
data_load = pickle.load(f)
#data_load = data_load[0] # just the points not the line
x_train = torch.Tensor(data_load[:,0]).cuda()
y_train = torch.Tensor(data_load[:,1]).cuda()
# # profile the code
# pr = cProfile.Profile()
# pr.enable()
# HMC sample
thinning = int(np.ceil(no_samples/no_saved_samples))
sampler = HMC_Sampler(inputs = x_train, targets = y_train, step_size = step_size, num_steps = num_steps,
no_samples = no_samples, burn_in = burn_in, thinning=thinning)
samples = sampler.get_samples(net)
no_saved_samples = len(samples) # the actual number of samples saved
# pr.disable()
# pr.print_stats()
# pickle the samples
filename = directory + '//HMC_samples'
outfile = open(filename, 'wb')
pickle.dump(samples, outfile)
outfile.close()
# plot and save plot of network output
plot_regression(net, samples, directory, no_plot_samples)
# plot empirical covariance
plot_covariance(hidden_sizes, samples, directory)
# save text file with hyperparameters
file = open(directory + '/hyperparameters.txt','w')
file.write('noise_variance: {} \n'.format(noise_variance))
file.write('hidden_sizes: {} \n'.format(hidden_sizes))
file.write('omega: {} \n'.format(omega))
file.write('burn_in: {} \n'.format(burn_in))
file.write('no_samples: {} \n'.format(no_samples))
file.write('no_saved_samples: {} \n'.format(no_saved_samples))
file.write('no_plot_samples: {} \n'.format(no_plot_samples))
file.write('step_size: {} \n'.format(step_size))
file.write('num_steps: {} \n'.format(num_steps))
file.close()
|
{"hexsha": "638303071d60a72f36e3b040ad65ad08928940eb", "size": 14899, "ext": "py", "lang": "Python", "max_stars_repo_path": "pytorch/HMC/hmc_sampler.py", "max_stars_repo_name": "andrewfoongyk/cs230-code-examples", "max_stars_repo_head_hexsha": "8e12aa3414bdada6ec6002bedf919a6816ba237c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pytorch/HMC/hmc_sampler.py", "max_issues_repo_name": "andrewfoongyk/cs230-code-examples", "max_issues_repo_head_hexsha": "8e12aa3414bdada6ec6002bedf919a6816ba237c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pytorch/HMC/hmc_sampler.py", "max_forks_repo_name": "andrewfoongyk/cs230-code-examples", "max_forks_repo_head_hexsha": "8e12aa3414bdada6ec6002bedf919a6816ba237c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3113456464, "max_line_length": 130, "alphanum_fraction": 0.6300422847, "include": true, "reason": "import numpy", "num_tokens": 3586}
|
from typing import Dict
from typing import List
import numpy as np
import pandas as pd
import pytest
from etna.analysis.feature_relevance import StatisticsRelevanceTable
from etna.datasets import TSDataset
from etna.datasets import generate_ar_df
from etna.datasets import generate_periodic_df
from etna.transforms.gale_shapley import BaseGaleShapley
from etna.transforms.gale_shapley import GaleShapleyFeatureSelectionTransform
from etna.transforms.gale_shapley import GaleShapleyMatcher
from etna.transforms.gale_shapley import RegressorGaleShapley
from etna.transforms.gale_shapley import SegmentGaleShapley
@pytest.fixture
def ts_with_large_regressors_number(random_seed) -> TSDataset:
df = generate_periodic_df(periods=100, start_time="2020-01-01", n_segments=3, period=7, scale=10)
exog_df = generate_periodic_df(periods=150, start_time="2020-01-01", n_segments=3, period=7).rename(
{"target": "regressor_1"}, axis=1
)
for i in range(1, 4):
tmp = generate_periodic_df(periods=150, start_time="2020-01-01", n_segments=3, period=7)
tmp["target"] += np.random.uniform(low=-i / 5, high=i / 5, size=(450,))
exog_df = exog_df.merge(tmp.rename({"target": f"regressor_{i + 1}"}, axis=1), on=["timestamp", "segment"])
for i in range(4, 8):
tmp = generate_ar_df(periods=150, start_time="2020-01-01", n_segments=3, ar_coef=[1], random_seed=i)
exog_df = exog_df.merge(tmp.rename({"target": f"regressor_{i + 1}"}, axis=1), on=["timestamp", "segment"])
ts = TSDataset(df=TSDataset.to_dataset(df), freq="D", df_exog=TSDataset.to_dataset(exog_df))
return ts
@pytest.fixture
def relevance_matrix() -> pd.DataFrame:
table = pd.DataFrame({"regressor_1": [1, 2, 3, 4], "regressor_2": [4, 1, 5, 2], "regressor_3": [2, 4, 1, 3]})
table.index = ["segment_1", "segment_2", "segment_3", "segment_4"]
return table
@pytest.fixture
def base_gale_shapley_player() -> BaseGaleShapley:
base = BaseGaleShapley(name="regressor_1", ranked_candidates=["segment_1", "segment_3", "segment_2", "segment_4"])
return base
@pytest.fixture
def regressor() -> RegressorGaleShapley:
reg = RegressorGaleShapley(
name="regressor_1", ranked_candidates=["segment_1", "segment_3", "segment_2", "segment_4"]
)
return reg
@pytest.fixture
def segment() -> SegmentGaleShapley:
segment = SegmentGaleShapley(
name="segment_1", ranked_candidates=["regressor_1", "regressor_2", "regressor_3", "regressor_4"]
)
return segment
@pytest.fixture
def matcher() -> GaleShapleyMatcher:
segments = [
SegmentGaleShapley(
name="segment_1",
ranked_candidates=["regressor_1", "regressor_2", "regressor_3"],
),
SegmentGaleShapley(
name="segment_2",
ranked_candidates=["regressor_1", "regressor_3", "regressor_2"],
),
SegmentGaleShapley(
name="segment_3",
ranked_candidates=["regressor_2", "regressor_3", "regressor_1"],
),
]
regressors = [
RegressorGaleShapley(
name="regressor_1",
ranked_candidates=["segment_3", "segment_1", "segment_2"],
),
RegressorGaleShapley(
name="regressor_2",
ranked_candidates=["segment_2", "segment_3", "segment_1"],
),
RegressorGaleShapley(
name="regressor_3",
ranked_candidates=["segment_1", "segment_2", "segment_3"],
),
]
gsh = GaleShapleyMatcher(segments=segments, regressors=regressors)
return gsh
@pytest.fixture
def relevance_matrix_big() -> pd.DataFrame:
matrix = np.array([[1, 2, 3, 4, 5, 6, 7], [6, 1, 3, 4, 7, 5, 2], [1, 5, 4, 3, 2, 7, 6]])
table = pd.DataFrame(
matrix,
index=["segment_1", "segment_2", "segment_3"],
columns=[
"regressor_1",
"regressor_2",
"regressor_3",
"regressor_4",
"regressor_5",
"regressor_6",
"regressor_7",
],
)
return table
@pytest.mark.parametrize(
"ascending,expected",
(
(
True,
{
"segment_1": ["regressor_1", "regressor_3", "regressor_2"],
"segment_2": ["regressor_2", "regressor_1", "regressor_3"],
"segment_3": ["regressor_3", "regressor_1", "regressor_2"],
"segment_4": ["regressor_2", "regressor_3", "regressor_1"],
},
),
(
False,
{
"segment_1": ["regressor_2", "regressor_3", "regressor_1"],
"segment_2": ["regressor_3", "regressor_1", "regressor_2"],
"segment_3": ["regressor_2", "regressor_1", "regressor_3"],
"segment_4": ["regressor_1", "regressor_3", "regressor_2"],
},
),
),
)
def test_get_ranked_list(relevance_matrix: pd.DataFrame, ascending: bool, expected: Dict[str, List[str]]):
result = GaleShapleyFeatureSelectionTransform._get_ranked_list(table=relevance_matrix, ascending=ascending)
for key in expected.keys():
assert key in result
assert result[key] == expected[key]
@pytest.mark.parametrize(
"ascending,expected",
(
(
True,
{
"regressor_1": ["segment_1", "segment_2", "segment_3", "segment_4"],
"regressor_2": ["segment_2", "segment_4", "segment_1", "segment_3"],
"regressor_3": ["segment_3", "segment_1", "segment_4", "segment_2"],
},
),
(
False,
{
"regressor_1": ["segment_4", "segment_3", "segment_2", "segment_1"],
"regressor_2": ["segment_3", "segment_1", "segment_4", "segment_2"],
"regressor_3": ["segment_2", "segment_4", "segment_1", "segment_3"],
},
),
),
)
def test_get_ranked_list_regressors(relevance_matrix: pd.DataFrame, ascending: bool, expected: Dict[str, List[str]]):
result = GaleShapleyFeatureSelectionTransform._get_ranked_list(table=relevance_matrix.T, ascending=ascending)
for key in expected.keys():
assert key in result
assert result[key] == expected[key]
@pytest.mark.parametrize(
"top_k,n_segments,n_regressors,expected",
(
(20, 10, 50, 2),
(27, 10, 40, 3),
(15, 4, 16, 4),
(7, 10, 50, 1),
(30, 5, 20, 1),
),
)
def test_compute_gale_shapley_steps_number(top_k: int, n_segments: int, n_regressors: int, expected: int):
result = GaleShapleyFeatureSelectionTransform._compute_gale_shapley_steps_number(
top_k=top_k, n_segments=n_segments, n_regressors=n_regressors
)
assert result == expected
@pytest.mark.parametrize(
"ranked_regressors,regressors_to_drop,expected",
(
(
{
"segment_1": ["regressor_1", "regressor_2", "regressor_3", "regressor_4"],
"segment_2": ["regressor_3", "regressor_2", "regressor_1", "regressor_4"],
"segment_3": ["regressor_4", "regressor_3", "regressor_1", "regressor_2"],
},
["regressor_2", "regressor_3"],
{
"segment_1": ["regressor_1", "regressor_4"],
"segment_2": ["regressor_1", "regressor_4"],
"segment_3": ["regressor_4", "regressor_1"],
},
),
(
{
"segment_1": ["regressor_1", "regressor_2", "regressor_3", "regressor_4"],
"segment_2": ["regressor_3", "regressor_2", "regressor_1", "regressor_4"],
"segment_3": ["regressor_4", "regressor_3", "regressor_1", "regressor_2"],
},
["regressor_2", "regressor_3", "regressor_1", "regressor_4"],
{
"segment_1": [],
"segment_2": [],
"segment_3": [],
},
),
),
)
def test_gale_shapley_transform_update_ranking_list(
ranked_regressors: Dict[str, List[str]], regressors_to_drop: List[str], expected: Dict[str, List[str]]
):
result = GaleShapleyFeatureSelectionTransform._update_ranking_list(
segment_regressors_ranking=ranked_regressors, regressors_to_drop=regressors_to_drop
)
for key in result:
assert result[key] == expected[key]
def test_base_update_segment(base_gale_shapley_player: BaseGaleShapley):
base_gale_shapley_player.update_tmp_match("segment_2")
assert base_gale_shapley_player.tmp_match == "segment_2"
assert base_gale_shapley_player.tmp_match_rank == 2
def test_regressor_check_segment(regressor: RegressorGaleShapley):
assert regressor.check_segment("segment_4")
regressor.update_tmp_match("segment_2")
assert not regressor.check_segment("segment_4")
assert regressor.check_segment("segment_1")
def test_segment_get_next_candidate(segment: SegmentGaleShapley):
assert segment.get_next_candidate() == "regressor_1"
segment.update_tmp_match("regressor_1")
assert segment.get_next_candidate() == "regressor_2"
def test_gale_shapley_matcher_match(matcher: GaleShapleyMatcher):
segment = matcher.segments[0]
regressor = matcher.regressors[0]
assert segment.tmp_match is None
assert segment.is_available
assert regressor.tmp_match is None
assert regressor.is_available
matcher.match(segment=segment, regressor=regressor)
assert segment.tmp_match == regressor.name
assert segment.tmp_match_rank == 0
assert not segment.is_available
assert regressor.tmp_match == segment.name
assert regressor.tmp_match_rank == 1
assert not regressor.is_available
def test_gale_shapley_matcher_break_match(matcher: GaleShapleyMatcher):
segment = matcher.segments[0]
regressor = matcher.regressors[0]
assert segment.tmp_match is None
assert segment.is_available
assert regressor.tmp_match is None
assert regressor.is_available
matcher.match(segment=segment, regressor=regressor)
matcher.break_match(segment=segment, regressor=regressor)
assert segment.tmp_match is None
assert segment.is_available
assert regressor.tmp_match is None
assert regressor.is_available
@pytest.mark.parametrize(
"segments,regressors,expected",
(
(
[
SegmentGaleShapley(
name="segment_1",
ranked_candidates=["regressor_1", "regressor_2", "regressor_3", "regressor_4"],
),
SegmentGaleShapley(
name="segment_2",
ranked_candidates=["regressor_1", "regressor_3", "regressor_2", "regressor_4"],
),
SegmentGaleShapley(
name="segment_3",
ranked_candidates=["regressor_2", "regressor_4", "regressor_1", "regressor_3"],
),
SegmentGaleShapley(
name="segment_4",
ranked_candidates=["regressor_3", "regressor_1", "regressor_4", "regressor_2"],
),
],
[
RegressorGaleShapley(
name="regressor_1",
ranked_candidates=["segment_2", "segment_1", "segment_3", "segment_4"],
),
RegressorGaleShapley(
name="regressor_2",
ranked_candidates=["segment_1", "segment_2", "segment_3", "segment_4"],
),
RegressorGaleShapley(
name="regressor_3",
ranked_candidates=["segment_3", "segment_2", "segment_4", "segment_1"],
),
RegressorGaleShapley(
name="regressor_4",
ranked_candidates=["segment_3", "segment_1", "segment_4", "segment_2"],
),
],
{
"segment_1": "regressor_2",
"segment_2": "regressor_1",
"segment_3": "regressor_4",
"segment_4": "regressor_3",
},
),
(
[
SegmentGaleShapley(
name="segment_1",
ranked_candidates=["regressor_1", "regressor_2", "regressor_3", "regressor_4"],
),
SegmentGaleShapley(
name="segment_2",
ranked_candidates=["regressor_1", "regressor_2", "regressor_3", "regressor_4"],
),
SegmentGaleShapley(
name="segment_3",
ranked_candidates=["regressor_1", "regressor_2", "regressor_3", "regressor_4"],
),
SegmentGaleShapley(
name="segment_4",
ranked_candidates=["regressor_1", "regressor_2", "regressor_3", "regressor_4"],
),
],
[
RegressorGaleShapley(
name="regressor_1",
ranked_candidates=["segment_2", "segment_1", "segment_3", "segment_4"],
),
RegressorGaleShapley(
name="regressor_2",
ranked_candidates=["segment_1", "segment_2", "segment_3", "segment_4"],
),
RegressorGaleShapley(
name="regressor_3",
ranked_candidates=["segment_3", "segment_2", "segment_4", "segment_1"],
),
RegressorGaleShapley(
name="regressor_4",
ranked_candidates=["segment_3", "segment_1", "segment_4", "segment_2"],
),
],
{
"segment_1": "regressor_2",
"segment_2": "regressor_1",
"segment_3": "regressor_3",
"segment_4": "regressor_4",
},
),
(
[
SegmentGaleShapley(
name="segment_1",
ranked_candidates=["regressor_1", "regressor_5", "regressor_2", "regressor_4", "regressor_3"],
),
SegmentGaleShapley(
name="segment_2",
ranked_candidates=["regressor_5", "regressor_2", "regressor_3", "regressor_4", "regressor_1"],
),
SegmentGaleShapley(
name="segment_3",
ranked_candidates=["regressor_1", "regressor_2", "regressor_3", "regressor_4", "regressor_5"],
),
],
[
RegressorGaleShapley(
name="regressor_1",
ranked_candidates=["segment_3", "segment_1", "segment_2"],
),
RegressorGaleShapley(
name="regressor_2",
ranked_candidates=["segment_3", "segment_2", "segment_1"],
),
RegressorGaleShapley(
name="regressor_3",
ranked_candidates=["segment_3", "segment_1", "segment_2"],
),
RegressorGaleShapley(
name="regressor_4",
ranked_candidates=["segment_1", "segment_2", "segment_3"],
),
RegressorGaleShapley(
name="regressor_5",
ranked_candidates=["segment_1", "segment_3", "segment_2"],
),
],
{
"segment_1": "regressor_5",
"segment_2": "regressor_2",
"segment_3": "regressor_1",
},
),
),
)
def test_gale_shapley_result(
segments: List[SegmentGaleShapley],
regressors: List[RegressorGaleShapley],
expected: Dict[str, str],
):
matcher = GaleShapleyMatcher(segments=segments, regressors=regressors)
matches = matcher()
for k, v in expected.items():
assert k in matches
assert matches[k] == v
@pytest.mark.parametrize(
"segment_regressor_ranking,regressor_segments_ranking,expected",
(
(
{
"segment_1": ["regressor_1", "regressor_2", "regressor_3", "regressor_4"],
"segment_2": ["regressor_1", "regressor_3", "regressor_2", "regressor_4"],
"segment_3": ["regressor_2", "regressor_4", "regressor_1", "regressor_3"],
"segment_4": ["regressor_3", "regressor_1", "regressor_4", "regressor_2"],
},
{
"regressor_1": ["segment_2", "segment_1", "segment_3", "segment_4"],
"regressor_2": ["segment_1", "segment_2", "segment_3", "segment_4"],
"regressor_3": ["segment_3", "segment_2", "segment_4", "segment_1"],
"regressor_4": ["segment_3", "segment_1", "segment_4", "segment_2"],
},
{
"segment_1": "regressor_2",
"segment_2": "regressor_1",
"segment_3": "regressor_4",
"segment_4": "regressor_3",
},
),
(
{
"segment_1": ["regressor_1", "regressor_2", "regressor_3", "regressor_4"],
"segment_2": ["regressor_1", "regressor_2", "regressor_3", "regressor_4"],
"segment_3": ["regressor_1", "regressor_2", "regressor_3", "regressor_4"],
"segment_4": ["regressor_1", "regressor_2", "regressor_3", "regressor_4"],
},
{
"regressor_1": ["segment_2", "segment_1", "segment_3", "segment_4"],
"regressor_2": ["segment_1", "segment_2", "segment_3", "segment_4"],
"regressor_3": ["segment_3", "segment_2", "segment_4", "segment_1"],
"regressor_4": ["segment_3", "segment_1", "segment_4", "segment_2"],
},
{
"segment_1": "regressor_2",
"segment_2": "regressor_1",
"segment_3": "regressor_3",
"segment_4": "regressor_4",
},
),
(
{
"segment_1": ["regressor_1", "regressor_5", "regressor_2", "regressor_4", "regressor_3"],
"segment_2": ["regressor_5", "regressor_2", "regressor_3", "regressor_4", "regressor_1"],
"segment_3": ["regressor_1", "regressor_2", "regressor_3", "regressor_4", "regressor_5"],
},
{
"regressor_1": ["segment_3", "segment_1", "segment_2"],
"regressor_2": ["segment_3", "segment_2", "segment_1"],
"regressor_3": ["segment_3", "segment_1", "segment_2"],
"regressor_4": ["segment_1", "segment_2", "segment_3"],
"regressor_5": ["segment_1", "segment_3", "segment_2"],
},
{
"segment_1": "regressor_5",
"segment_2": "regressor_2",
"segment_3": "regressor_1",
},
),
),
)
def test_gale_shapley_transform_gale_shapley_iteration(
segment_regressor_ranking: Dict[str, List[str]],
regressor_segments_ranking: Dict[str, List[str]],
expected: Dict[str, str],
):
GaleShapleyFeatureSelectionTransform._gale_shapley_iteration(
segment_regressors_ranking=segment_regressor_ranking, regressor_segments_ranking=regressor_segments_ranking
)
@pytest.mark.parametrize(
"matches,n,greater_is_better,expected",
(
(
{
"segment_1": "regressor_4",
"segment_2": "regressor_7",
"segment_3": "regressor_5",
},
2,
False,
["regressor_5", "regressor_7"],
),
(
{
"segment_1": "regressor_4",
"segment_2": "regressor_7",
"segment_3": "regressor_5",
},
1,
True,
["regressor_4"],
),
(
{
"segment_1": "regressor_3",
"segment_2": "regressor_2",
"segment_3": "regressor_1",
},
2,
False,
["regressor_1", "regressor_2"],
),
(
{
"segment_1": "regressor_3",
"segment_2": "regressor_2",
"segment_3": "regressor_1",
},
3,
False,
["regressor_1", "regressor_2", "regressor_3"],
),
),
)
def test_gale_shapley_transform_process_last_step(
matches: Dict[str, str], n: int, greater_is_better: bool, expected: List[str], relevance_matrix_big: pd.DataFrame
):
result = GaleShapleyFeatureSelectionTransform._process_last_step(
matches=matches, relevance_table=relevance_matrix_big, n=n, greater_is_better=greater_is_better
)
assert sorted(result) == sorted(expected)
@pytest.mark.parametrize("use_rank", (True, False))
@pytest.mark.parametrize("top_k", (2, 3, 5, 6, 7))
def test_gale_shapley_transform_fit(ts_with_large_regressors_number: TSDataset, top_k: int, use_rank: bool):
df = ts_with_large_regressors_number.df
transform = GaleShapleyFeatureSelectionTransform(
relevance_table=StatisticsRelevanceTable(), top_k=top_k, use_rank=use_rank
)
transform.fit(df=df)
def test_gale_shapley_transform_fit_transform(ts_with_large_regressors_number: TSDataset):
df = ts_with_large_regressors_number.df
transform = GaleShapleyFeatureSelectionTransform(
relevance_table=StatisticsRelevanceTable(), top_k=5, use_rank=False
)
transformed = transform.fit_transform(df=df)
assert set(transformed.columns.get_level_values("feature")) == {
"target",
"regressor_1",
"regressor_2",
"regressor_3",
"regressor_4",
"regressor_5",
}
|
{"hexsha": "08fdb04013d927c6798a466f66db41ff0d0fa70b", "size": 21905, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_transforms/test_gale_shapley.py", "max_stars_repo_name": "Carlosbogo/etna", "max_stars_repo_head_hexsha": "b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_transforms/test_gale_shapley.py", "max_issues_repo_name": "Carlosbogo/etna", "max_issues_repo_head_hexsha": "b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_transforms/test_gale_shapley.py", "max_forks_repo_name": "Carlosbogo/etna", "max_forks_repo_head_hexsha": "b6210f0e79ee92aa9ae8ff4fcfb267be9fb7cc94", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3805460751, "max_line_length": 118, "alphanum_fraction": 0.5662634102, "include": true, "reason": "import numpy", "num_tokens": 6032}
|
#encoding=utf-8
import os
import json
import argparse
import numpy as np
import torch
import torch.nn as nn
from typing import NamedTuple
from transformers import AdamW, get_linear_schedule_with_warmup
from dataset import MyCLVCGTokenizer
from models import Config as ModelConfig
from models import MyCLVCG_POINTER
from dataset import Dataset
from train import train, inference, print_results
parser = argparse.ArgumentParser(description='pretrain.py')
parser.add_argument('-input_path', type=str, default='/input/folder/path', help="input folder path")
parser.add_argument('-workspace_path', type=str, default='/output/and/config/folders/path', help="output and config folders path")
parser.add_argument('-cfg_path', type=str, default='/config/file/path', help="config file path")
parser.add_argument('-model_cfg_file', type=str, default='model.json', help="model config file")
parser.add_argument('-pretrain_cfg_file', type=str, default='pretrain.json', help="pretrain config file")
parser.add_argument('-img_file', type=str, default=os.path.join('LB','image','res18.pkl'), help="image file")
parser.add_argument('-corpus_file', type=str, default=os.path.join('LB','corpus','LB_train_context.json'), help="train corpus json file")
parser.add_argument('-eval_corpus_file', type=str, default=os.path.join('LB','corpus','LB_dev_context.json'), help="evaluation corpus json file")
parser.add_argument('-vocab_file', type=str, default='vocabulary.json', help="vocabulary json file")
parser.add_argument('-preprocess_dir', type=str, default=os.path.join('LB','preprocessed_data'), help="path of preprocessed files")
parser.add_argument('-model_file', type=str, default=None, help="Restoring model file for train")
parser.add_argument('-eval_model_file', type=str, default='best-model.pt', help="Restoring model file for eval")
parser.add_argument('-save_dir', type=str, default='ckpt', help="checkpoint folder")
parser.add_argument('-eval', default=False, action='store_true', help="evaluate mod")
parser.add_argument('-parallel', default=False, action='store_true', help="DataParallel")
class PretrainConfig(NamedTuple):
""" Hyperparameters for training """
seed: int = 3431 # random seed
batch_size: int = 4
predict_batch_size: int = 4
lr: int = 1e-4 # learning rate
n_epochs: int = 12 # the number of epoch
save_steps: int = 10000 # interval for saving model
print_steps: int = 100 # interval for print time
eval_steps: int = 10000# interval for evaluation
mask_prob: int = 0.15
next_sentence_prob : float = 0.5
p_geom : float = 0.5
adam_epsilon: float = 1e-6
gradient_accumulation_steps: int = 10
max_grad_norm : float = 1.0
weight_decay : float = 1e-3
@classmethod
def load_from_json(cls, file): # load config from json file
return cls(**json.load(open(file, "r")))
def pretrain():
opt = parser.parse_args()
print(torch.cuda.is_available())
pretrain_cfg = PretrainConfig.load_from_json(os.path.join(opt.cfg_path, opt.pretrain_cfg_file))
model_cfg = ModelConfig.load_from_json(os.path.join(opt.cfg_path, opt.model_cfg_file))
img_file = os.path.join(opt.input_path, opt.img_file)
corpus_file = os.path.join(opt.input_path, opt.corpus_file)
eval_corpus_file = os.path.join(opt.input_path, opt.eval_corpus_file)
vocab_file = os.path.join(opt.input_path, opt.vocab_file)
preprocess_dir = os.path.join(opt.input_path, opt.preprocess_dir)
if not os.path.exists(preprocess_dir):
os.mkdir(preprocess_dir)
save_dir = os.path.join(opt.workspace_path, opt.save_dir)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if opt.model_file is not None:
model_file = os.path.join(save_dir, opt.model_file)
else:
model_file = None
tokenizer = MyCLVCGTokenizer(vocab_file)
dev_data = Dataset(eval_corpus_file, img_file, preprocess_dir, model_cfg, pretrain_cfg, imgs=None, is_training=False, type ='pretrain')
dev_data.load_dataset_LB(tokenizer, model_type = 'POINTER')
dev_data.load_dataloader()
if opt.eval is False:
train_data = Dataset(corpus_file, img_file, preprocess_dir, model_cfg, pretrain_cfg, imgs=dev_data.imgs, is_training=True, type ='pretrain')
train_data.load_dataset_LB(tokenizer, model_type = 'POINTER')
train_data.load_dataloader()
device = torch.device("cuda", 0)
print("model..")
model = MyCLVCG_POINTER(model_cfg, type="pretrain")
if opt.eval is False:
#Train
if model_file is not None:
model.load_state_dict(torch.load(model_file))
if opt.parallel:
model = nn.DataParallel(model,device_ids=[0,1])
print("Data num:",len(train_data))
print("Total steps:",int(len(train_data)*pretrain_cfg.n_epochs/pretrain_cfg.batch_size))
optimizer = AdamW(filter(lambda p: p.requires_grad,model.parameters()), lr=pretrain_cfg.lr, eps=pretrain_cfg.adam_epsilon, weight_decay = pretrain_cfg.weight_decay)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps= int(len(train_data)/pretrain_cfg.batch_size),
num_training_steps= int(1.1*len(train_data)*pretrain_cfg.n_epochs/pretrain_cfg.batch_size))
print("training...")
train(pretrain_cfg, save_dir, model, train_data, dev_data, optimizer, scheduler, device, opt.parallel, model_type = 'POINTER', type = 'pretrain')
else:
#Evaluation
checkpoint = os.path.join(save_dir, opt.eval_model_file)
model.load_state_dict(torch.load(checkpoint))
if opt.parallel:
model = nn.DataParallel(model,device_ids=[0,1])
if torch.cuda.is_available():
model.to(device)
model.eval()
with(torch.no_grad()):
total_loss, total_cls_loss, total_ns_acc, predictions, ns_predictions, input_ids, masked_lm_labels = inference(pretrain_cfg, model, dev_data, device, opt.parallel, type, model_type = 'POINTER')
print_results(save_dir, 0, total_loss, total_cls_loss, total_ns_acc, predictions, ns_predictions, input_ids, masked_lm_labels)
if __name__ == '__main__':
pretrain()
|
{"hexsha": "26c15e0db5d82a9e23c14a9241b1561db082d9ab", "size": 6440, "ext": "py", "lang": "Python", "max_stars_repo_path": "pretrain.py", "max_stars_repo_name": "zengzh72/CMVCG", "max_stars_repo_head_hexsha": "d8e308882c8d6af9c402d6c917afea6440d7a2d3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pretrain.py", "max_issues_repo_name": "zengzh72/CMVCG", "max_issues_repo_head_hexsha": "d8e308882c8d6af9c402d6c917afea6440d7a2d3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pretrain.py", "max_forks_repo_name": "zengzh72/CMVCG", "max_forks_repo_head_hexsha": "d8e308882c8d6af9c402d6c917afea6440d7a2d3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.6737588652, "max_line_length": 206, "alphanum_fraction": 0.6951863354, "include": true, "reason": "import numpy", "num_tokens": 1498}
|
# BLS_QCEW.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
'''
Pulls Quarterly Census of Employment and Wages data in NAICS from Bureau of Labor Statistics
Writes out to various FlowBySector class files for these data items
EMP = Number of employees, Class = Employment
PAYANN = Annual payroll ($1,000), Class = Money
ESTAB = Number of establishments, Class = Other
This script is designed to run with a configuration parameter
--year = 'year' e.g. 2015
'''
import pandas as pd
import numpy as np
import io
import zipfile
from flowsa.common import log, get_all_state_FIPS_2, US_FIPS
from flowsa.flowbyfunctions import assign_fips_location_system
def BLS_QCEW_URL_helper(build_url, config, args):
urls = []
url = build_url
url = url.replace('__year__', str(args['year']))
urls.append(url)
return urls
def bls_qcew_call(url, qcew_response, args):
# initiate dataframes list
df_list = []
# unzip folder that contains bls data in ~4000 csv files
with zipfile.ZipFile(io.BytesIO(qcew_response.content), "r") as f:
# read in file names
for name in f.namelist():
# Only want state info
if "singlefile" in name:
data = f.open(name)
df_state = pd.read_csv(data, header=0, dtype=str)
df_list.append(df_state)
# concat data into single dataframe
df = pd.concat(df_list, sort=False)
df = df[['area_fips', 'own_code', 'industry_code', 'year',
'annual_avg_estabs', 'annual_avg_emplvl', 'total_annual_wages']]
return df
def bls_qcew_parse(dataframe_list, args):
# Concat dataframes
df = pd.concat(dataframe_list, sort=False)
# drop rows don't need
df = df[~df['area_fips'].str.contains('C|USCMS|USMSA|USNMS')].reset_index(drop=True)
df.loc[df['area_fips'] == 'US000', 'area_fips'] = US_FIPS
# set datatypes
float_cols = [col for col in df.columns if col not in ['area_fips', 'industry_code', 'year']]
for col in float_cols:
df[col] = df[col].astype('float')
# Keep owner_code = 1, 2, 3, 5
df = df[df.own_code.isin([1, 2, 3, 5])]
# Aggregate annual_avg_estabs and annual_avg_emplvl by area_fips, industry_code, year, flag
df = df.groupby(['area_fips', 'industry_code', 'year'])[['annual_avg_estabs',
'annual_avg_emplvl',
'total_annual_wages']].sum().reset_index()
# Rename fields
df = df.rename(columns={'area_fips': 'Location',
'industry_code': 'ActivityProducedBy',
'year': 'Year',
'annual_avg_emplvl': 'Number of employees',
'annual_avg_estabs': 'Number of establishments',
'total_annual_wages': 'Annual payroll'})
# Reformat FIPs to 5-digit
df['Location'] = df['Location'].apply('{:0>5}'.format)
# use "melt" fxn to convert colummns into rows
df = df.melt(id_vars=["Location", "ActivityProducedBy", "Year"],
var_name="FlowName",
value_name="FlowAmount")
# specify unit based on flowname
df['Unit'] = np.where(df["FlowName"] == 'Annual payroll', "USD", "p")
# specify class
df.loc[df['FlowName'] == 'Number of employees', 'Class'] = 'Employment'
df.loc[df['FlowName'] == 'Number of establishments', 'Class'] = 'Other'
df.loc[df['FlowName'] == 'Annual payroll', 'Class'] = 'Money'
# add location system based on year of data
df = assign_fips_location_system(df, args['year'])
# add hard code data
df['SourceName'] = 'BLS_QCEW'
# Add tmp DQ scores
df['DataReliability'] = 5
df['DataCollection'] = 5
df['Compartment'] = None
df['FlowType'] = "ELEMENTARY_FLOW"
return df
def clean_bls_qcew_fba_for_employment_sat_table(fba_df, **kwargs):
"""
When creating the employment satellite table for use in useeior, modify the flow name to match prior methodology
for mapping/impact factors
:param fba_df:
:param kwargs:
:return:
"""
fba_df = clean_bls_qcew_fba(fba_df, **kwargs)
# rename flowname value
fba_df['FlowName'] = fba_df['FlowName'].replace({'Number of employees': 'Jobs'})
return fba_df
def clean_bls_qcew_fba(fba_df, **kwargs):
"""
Function to clean BLS QCEW data when FBA is not used for employment satellite table
:param fba_df:
:param kwargs:
:return:
"""
fba_df = replace_missing_2_digit_sector_values(fba_df)
fba_df = remove_2_digit_sector_ranges(fba_df)
return fba_df
def replace_missing_2_digit_sector_values(df):
"""
In the 2015 (and possibly other dfs, there are instances of values at the 3 digit NAICS level, while
the 2 digit NAICS is reported as 0. The 0 values are replaced with summed 3 digit NAICS
:param df:
:return:
"""
from flowsa.flowbyfunctions import aggregator, fba_default_grouping_fields
# check for 2 digit 0 values
df_missing = df[(df['ActivityProducedBy'].apply(lambda x: len(x) == 2)) & (df['FlowAmount'] == 0)]
# create list of location/activityproduced by combos
missing_sectors = df_missing[['Location', 'ActivityProducedBy']].drop_duplicates().values.tolist()
# subset the df to 3 naics where flow amount is not 0 and that would sum to the missing 2 digit naics
df_subset = df[df['ActivityProducedBy'].apply(lambda x: len(x) == 3) & (df['FlowAmount'] != 0)]
new_sectors_list = []
for q, r in missing_sectors:
c1 = df_subset['Location'] == q
c2 = df_subset['ActivityProducedBy'].apply(lambda x: x[0:2] == r)
# subset data
new_sectors_list.append(df_subset[c1 & c2])
if len(new_sectors_list) != 0:
new_sectors = pd.concat(new_sectors_list, sort=False, ignore_index=True)
# drop last digit of naics and aggregate
new_sectors.loc[:, 'ActivityProducedBy'] = new_sectors['ActivityProducedBy'].apply(lambda x: x[0:2])
new_sectors = aggregator(new_sectors, fba_default_grouping_fields)
# drop the old location/activity columns in the bls df and add new sector values
new_sectors_list = new_sectors[['Location', 'ActivityProducedBy']].drop_duplicates().values.tolist()
# rows to drop
rows_list = []
for q, r in new_sectors_list:
c1 = df['Location'] == q
c2 = df['ActivityProducedBy'].apply(lambda x: x == r)
# subset data
rows_list.append(df[(c1 & c2)])
rows_to_drop = pd.concat(rows_list, ignore_index=True)
# drop rows from df
modified_df = pd.merge(df, rows_to_drop, indicator=True, how='outer').query('_merge=="left_only"').drop('_merge', axis=1)
# add new rows
modified_df = modified_df.append(new_sectors, sort=False)
else:
modified_df = df.copy()
return modified_df
def remove_2_digit_sector_ranges(fba_df):
"""
BLS publishes activity ranges of '31-33', 44-45', '48-49... drop these ranges.
The individual 2 digit naics are summed later.
:param df:
:return:
"""
df = fba_df[~fba_df['ActivityProducedBy'].str.contains('-')]
return df
def bls_clean_allocation_fba_w_sec(df_w_sec, **kwargs):
"""
clean up bls df with sectors by estimating suppresed data
:param df_w_sec:
:param attr:
:param method:
:return:
"""
from flowsa.flowbyfunctions import estimate_suppressed_data, sector_disaggregation, sector_aggregation, \
flow_by_activity_wsec_mapped_fields, add_missing_flow_by_fields, replace_strings_with_NoneType
sector_column = 'SectorProducedBy'
df = estimate_suppressed_data(df_w_sec, sector_column)
df = add_missing_flow_by_fields(df, flow_by_activity_wsec_mapped_fields)
df = replace_strings_with_NoneType(df)
# df = sector_aggregation(df, fba_mapped_default_grouping_fields)
# df = sector_disaggregation(df, fba_mapped_default_grouping_fields)
return df
|
{"hexsha": "c2b9d741113511fc4629b53ac1bbc49fd7e88271", "size": 8118, "ext": "py", "lang": "Python", "max_stars_repo_path": "flowsa/BLS_QCEW.py", "max_stars_repo_name": "ealonso-mfa/flowsa", "max_stars_repo_head_hexsha": "d3ccc98a39bacdf23c4bbf090041626286fd7116", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "flowsa/BLS_QCEW.py", "max_issues_repo_name": "ealonso-mfa/flowsa", "max_issues_repo_head_hexsha": "d3ccc98a39bacdf23c4bbf090041626286fd7116", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "flowsa/BLS_QCEW.py", "max_forks_repo_name": "ealonso-mfa/flowsa", "max_forks_repo_head_hexsha": "d3ccc98a39bacdf23c4bbf090041626286fd7116", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.9, "max_line_length": 129, "alphanum_fraction": 0.6494210397, "include": true, "reason": "import numpy", "num_tokens": 2128}
|
[STATEMENT]
lemma delete_edges_correct:
"FiniteGraph.delete_edges (list_graph_to_graph G) (set E) = list_graph_to_graph (delete_edges G E)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. FiniteGraph.delete_edges (list_graph_to_graph G) (set E) = list_graph_to_graph (FiniteListGraph.delete_edges G E)
[PROOF STEP]
unfolding list_graph_to_graph_def FiniteGraph.delete_edges_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lparr>nodes = nodes \<lparr>nodes = set (nodesL G), edges = set (edgesL G)\<rparr>, edges = {(e1, e2). (e1, e2) \<in> edges \<lparr>nodes = set (nodesL G), edges = set (edgesL G)\<rparr> \<and> (e1, e2) \<notin> set E}\<rparr> = \<lparr>nodes = set (nodesL (FiniteListGraph.delete_edges G E)), edges = set (edgesL (FiniteListGraph.delete_edges G E))\<rparr>
[PROOF STEP]
by (auto simp add: delete_edges_as_filter )
|
{"llama_tokens": 354, "file": "Network_Security_Policy_Verification_Lib_FiniteListGraph", "length": 2}
|
/*****************************************************************************
*
* File: sssp_gold.cpp
* Author: Alex Stivala
* Created: February 2011
*
* $Id: sssp_gold.cpp 668 2011-09-08 04:40:08Z astivala $
*
* Get single-source shortest path on CPU for verification.
* Uses Dijkstra's algorithm from the Boost Graph Library.
* (Based on the example/dijkstra-example.cpp code from Boost Graph Library
* manual
* http://www.boost.org/doc/libs/1_38_0/libs/graph/doc/dijkstra_shortest_paths.html)
*
****************************************************************************/
#include <assert.h>
#include <cutil_inline.h> /* CUDA SDK */
#include <boost/config.hpp>
#include <iostream>
#include <fstream>
#include <boost/graph/graph_traits.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/dijkstra_shortest_paths.hpp>
#include "sssp_gold.h"
using namespace boost;
/*
* sssp_gold() - use Dijkstra's algorithim on CPU (using Boost Graph Library)
* to solve single-source shortest path problem for checking.
*
* Parameters:
* adjlist - adjacnecy list (array of (node1,node2,cost) structs)
* num_nodes - number of nodes
* num_arcs - number of edges (entries in adjlist)
* source - source node
* distances (OUT) - array of shortest costs from source to each node
* predecessors (OUT) - predecessor node on minimum spanning tree for
* each node.
* Return value:
* time spent running dijkstra_shortest_paths() in milliseconds
*/
double sssp_gold(adjlist_entry_t adjlist[], long num_nodes, long num_arcs,
long source,
double distances[], long predecessors[])
{
unsigned int hTimer;
typedef adjacency_list < listS, vecS, directedS,
no_property, property < edge_weight_t, double > > graph_t;
typedef graph_traits < graph_t >::vertex_descriptor vertex_descriptor;
typedef graph_traits < graph_t >::edge_descriptor edge_descriptor;
typedef std::pair<long, long> Edge;
Edge *edge_array = new Edge[num_arcs];
double *weights = new double[num_arcs];
for (long i = 0; i < num_arcs; i++)
{
edge_array[i] = Edge(adjlist[i].from, adjlist[i].to);
weights[i] = adjlist[i].cost;
}
graph_t g(edge_array, edge_array + num_arcs, weights, num_nodes);
property_map<graph_t, edge_weight_t>::type weightmap = get(edge_weight, g);
std::vector<vertex_descriptor> p(num_vertices(g));
std::vector<double> d(num_vertices(g));
vertex_descriptor s = vertex(source, g);
cutilCheckError( cutCreateTimer(&hTimer) );
cutilCheckError( cutResetTimer(hTimer) );
cutilCheckError( cutStartTimer(hTimer) );
dijkstra_shortest_paths(g, s, predecessor_map(&p[0]).distance_map(&d[0]));
cutilCheckError( cutStopTimer(hTimer) );
double runtime = cutGetTimerValue(hTimer);
graph_traits < graph_t >::vertex_iterator vi, vend;
long i = 0;
for (tie(vi, vend) = vertices(g); vi != vend; ++vi)
{
assert(i < num_nodes);
distances[i] = d[*vi];
predecessors[i] = p[*vi];
i++;
}
delete[] edge_array;
delete[] weights;
return runtime;
}
|
{"hexsha": "4f373c449a7c27a0ba82c21990b612d27bff9787", "size": 3141, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "trunk/cuda_sssp_double/sssp_gold.cpp", "max_stars_repo_name": "stivalaa/traffic_assignment", "max_stars_repo_head_hexsha": "45378558af73feeaf9e6491612c93dba042cb5ec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2019-01-10T16:18:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T04:52:37.000Z", "max_issues_repo_path": "trunk/cuda_sssp_double/sssp_gold.cpp", "max_issues_repo_name": "stivalaa/traffic_assignment", "max_issues_repo_head_hexsha": "45378558af73feeaf9e6491612c93dba042cb5ec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "trunk/cuda_sssp_double/sssp_gold.cpp", "max_forks_repo_name": "stivalaa/traffic_assignment", "max_forks_repo_head_hexsha": "45378558af73feeaf9e6491612c93dba042cb5ec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2022-03-02T10:59:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T10:59:51.000Z", "avg_line_length": 31.7272727273, "max_line_length": 85, "alphanum_fraction": 0.6542502388, "num_tokens": 784}
|
"""
Code for training deep CNNs.
Takes the model name of the model to train
and runs for the desired number of epochs.
Measures training and validation accuracy and
stores this data to be graphed later.
Implements adaptive learning rate.
"""
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
import glob
import random
import pandas as pd
import numpy as np
import time
# from torch.utils.tensorboard import SummaryWriter
import json
import logging
from basic_model import BasicNet
from resnet import ResNet, Bottleneck, BasicBlock
from vgg import _vgg
from mnas_net import MNASNet
from densenet import DenseNet
from data_loader import NpyDataLoader, BatchedDataset
from evaluation import validate, print_batch_predictions, evaluate
import argparse
import torch.utils.data as data
parser = argparse.ArgumentParser()
parser.add_argument("data_dir",
help="directory to load data from")
parser.add_argument("model",
help="model to train, one of: resnet50, resnet34, resnet18, vgg, mnasnet or densenet")
parser.add_argument("checkpoint_dir",
help="directory to store weight checkpoints")
parser.add_argument("--epochs", required=False, type=int,
help="number of epochs to run for")
parser.add_argument("--epochs_per_val", "-v", required=False, type=int,
help="how many epochs per run on validation set")
parser.add_argument("--weight_decay", "-w", required=False, type=float,
help="l2 weight decay value for optimiser")
parser.add_argument("--batched", required=False, action="store_true")
args = parser.parse_args()
data_dir = args.data_dir
chosen_model = args.model
logging.basicConfig(filename=f'train-{chosen_model}.log', filemode="w", level=logging.DEBUG, format='%(levelname)s:%(asctime)s %(message)s')
device = (torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'))
print(f"Training on device {device}.")
logging.info(f"Training on device {device}.")
train_loader = None
validate_loader = None
classes = None
if args.batched:
train_ds = BatchedDataset(f"{data_dir}/batched/train/")
val_ds = BatchedDataset(f"{data_dir}/batched/validate/")
train_loader = data.DataLoader(train_ds,
batch_size=None,
shuffle=True,
)
validate_loader = data.DataLoader(
val_ds,
batch_size=None,
shuffle=True,
)
classes = ["MR", "PT", "CT", "XR"]
else:
loader = NpyDataLoader(data_dir, batch_size=128)
train_loader = loader.train_loader
validate_loader = loader.validate_loader
classes = loader.classes
start = time.time()
model = None
weight_decay = 0.0
if args.weight_decay:
weight_decay = args.weight_decay
if chosen_model.lower() == "resnet50":
model = ResNet(Bottleneck, [3,4,6,3], 4)
elif chosen_model.lower() == "resnet34":
model = ResNet(BasicBlock, [3,4,6,3], 4)
elif chosen_model.lower() == "resnet18":
model = ResNet(BasicBlock, [2,2,2,2], 4)
elif chosen_model.lower() == "basic":
model = BasicNet()
elif chosen_model.lower() == "vgg":
model = _vgg("vgg16", "D", True)
elif chosen_model.lower() == "vgg_dropout":
model = _vgg("vgg16", "D", True, dropout=True)
elif chosen_model.lower() == "mnasnet":
model = MNASNet(1.0)
elif chosen_model.lower() == "densenet":
model = DenseNet()
else:
print(f"Invalid option for model: {chosen_model}")
exit(0)
if args.weight_decay:
chosen_model += f"w-{weight_decay}"
print(model)
model = model.to(device=device)
model = nn.DataParallel(model) # distribute across multiple gpus for faster training
loss_func = nn.CrossEntropyLoss().to(device=device)
optimiser = optim.SGD(model.parameters(), lr=1e-2, momentum=0.9, weight_decay=weight_decay)
lr_scheduler = ReduceLROnPlateau(optimiser, "min", verbose=True, patience=3) # adaptive learning rate
lr_scheduler_rate = len(train_loader) // 10
metrics = {
"train_loss": [],
"train_accuracy" : [],
"validate_loss": [],
"validate_accuracy": [],
"train_epoch_time": []
}
num_batches = len(train_loader)
num_epochs = 10
if args.epochs:
num_epochs = args.epochs
epochs_per_validation = 2
if args.epochs_per_val:
epochs_per_validation = args.epochs_per_val
last = time.time()
for epoch in range(0, num_epochs):
lr_sched_loss = 0.0
loss_train = 0.0
i = 0
correct = 0
total = 0
epoch_start = time.time()
for x, y, path in train_loader:
# batch_start = time.time()
i += 1
# to_dev_start = time.time()
optimiser.zero_grad()
x = x.to(device=device, non_blocking=True)
y = y.to(device=device, non_blocking=True)
# to_dev_end = time.time()
# logging.debug(f"\tTo Device Time {to_dev_end-to_dev_start}")
# pred_start = time.time()
outputs = model(x)
# pred_end = time.time()
# logging.debug(f"\tPrediction Time {pred_end - pred_start}")
loss = loss_func(outputs, y)
# loss_start = time.time()
loss.backward()
# loss_end = time.time()
# logging.debug(f"\tLoss backward pass {loss_end - loss_start}")
# optim_start = time.time()
optimiser.step()
loss_train += loss.item()
lr_sched_loss += loss.item()
if i % lr_scheduler_rate == 0 and i != 0:
lr_scheduler.step(lr_sched_loss / lr_scheduler_rate)
logging.info(f"LR Sched Loss: {lr_sched_loss}")
lr_sched_loss = 0.0
_, predicted = torch.max(outputs, dim=1)
total += y.shape[0]
correct += int((predicted == y).sum())
# optim_end = time.time()
# logging.debug(f"\tOptim time {optim_end - optim_start}")
batch_end = time.time()
if i % 10 == 0:
current = time.time()
logging.debug(f"Batch time {(current - last) / 10}")
print(f"Epoch {epoch}: batch: {i} / {num_batches}")
logging.debug(f"EPOCH {epoch}: BATCH: {i} / {num_batches}")
last = current
epoch_end = time.time()
print(f"Epoch {epoch}, loss {loss_train / len(train_loader)}")
logging.info(f"Epoch {epoch}, loss {loss_train / len(train_loader)}")
logging.info(f"Train accuracy: {(correct/total):.2f}")
print(f"Train accuracy: {(correct/total):.2f}")
# if epoch % 5 == 0:
if epoch % epochs_per_validation == 0:
val_accuracy, val_loss = validate(model, loss_func, validate_loader, device, epoch)
metrics["validate_accuracy"].append(val_accuracy)
metrics["validate_loss"].append(val_loss)
checkpoint_path = f"{args.checkpoint_dir}/{chosen_model}-{epoch}.pt"
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimiser.state_dict(),
"model_name": chosen_model,
}, checkpoint_path)
metrics["train_loss"].append(loss_train / len(train_loader))
metrics["train_accuracy"].append(correct / total)
metrics["train_epoch_time"].append(epoch_end - epoch_start)
print_batch_predictions(model, validate_loader, classes, device)
evaluate(model, validate_loader, classes, device, "validate", model_name=chosen_model)
with open(f"./results/{chosen_model}-training-metrics.json", "w") as f:
json.dump(metrics, f, indent=4)
|
{"hexsha": "c303106042238abe7c89a4bf65f2aaabfe31e312", "size": 7351, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/train/train.py", "max_stars_repo_name": "cdmacfadyen/classify-modality", "max_stars_repo_head_hexsha": "fe4dadbd3a52a59e51c192a6cb16bacf394f4473", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-07-06T12:12:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T12:12:29.000Z", "max_issues_repo_path": "src/train/train.py", "max_issues_repo_name": "cdmacfadyen/classify-modality", "max_issues_repo_head_hexsha": "fe4dadbd3a52a59e51c192a6cb16bacf394f4473", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/train/train.py", "max_forks_repo_name": "cdmacfadyen/classify-modality", "max_forks_repo_head_hexsha": "fe4dadbd3a52a59e51c192a6cb16bacf394f4473", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0324074074, "max_line_length": 140, "alphanum_fraction": 0.6754183104, "include": true, "reason": "import numpy", "num_tokens": 1837}
|
import unittest
import numpy as np
from pyorick import * # pyorick exposes only intended APIs
# non-APIs which must be exposed for testing
from pyorick import Message, YorickVar, YorickHold, YorickVarCall
from pyorick import (ID_EOL, ID_EVAL, ID_EXEC, ID_GETVAR, ID_SETVAR,
ID_FUNCALL, ID_SUBCALL, ID_GETSLICE, ID_SETSLICE,
ID_GETSHAPE)
import __main__
# nosetests --with-coverage --cover-package=pyorick
class ExampleClass(object):
def __init__(self, thing):
self.thing = thing
def __eq__(self, other):
return self.thing == other.thing
def example_func(x):
return x
# create test fixtures
def setup_data(self):
# various types of scalar objects which must be encodable
self.scalars = [65, 6.5, 0.+6.5j, True, bytearray(b'B'),
np.array(65, dtype=np.short),
np.array(65, dtype=np.intc),
np.array(6.5, dtype=np.single),
'test string', '', ystring0,
slice(1,5,2), Ellipsis, ynewaxis, None]
# various types of array objects which must be encodable
self.arrays = [s+np.zeros((3,2), dtype=s.__class__)
for s in self.scalars[0:4]]
self.arrays.append(np.array([[66,66],[66,66],[66,66]], dtype=np.uint8))
self.arrays.extend([s+np.zeros((3,2), dtype=s.dtype)
for s in self.scalars[5:8]])
self.arrays.append([[66,66],[66,66],[66,66]])
self.arrays.append(bytearray(b'BABABABA'))
# various types of string array objects which must be encodable
self.strings = [['', 'test 1', 'another string'],
[['', 'test 1', 'another string'],
['test 2', ystring0, 'more']]]
self.strings.append(np.array(self.strings[1])) # corrupts string0?
# representable list and dict objects
self.groups = [(), [], ([], None), self.scalars, {},
{'key'+str(k):self.scalars[k]
for k in range(len(self.scalars))},
[[1,'abc'], {'a':1, 'b':[2,'c',4]}],
{'key0':[1,'abc'], 'key1':{'a':1, 'b':2}}]
# unrepresentable objects
self.bad = [{'a':[1,2,3], 2:[4,5,6]}, # illegal key type
example_func, # function
ExampleClass, # class
ExampleClass([1,2,3])] # class instance
class TestCodec(unittest.TestCase):
def setUp(self):
setup_data(self)
def tearDown(self):
pass
def test_scalars(self):
"""Check that scalar types can be encoded and decoded."""
for i in range(len(self.scalars)):
s = self.scalars[i]
self.assertTrue(yencodable(s), 'yencodable fails item '+str(i))
msg = Message(None, s)
v = msg.decode()
self.assertEqual(s, v, 'codec failed on item '+str(i))
def test_arrays(self):
"""Check that array types can be encoded and decoded."""
for i in range(len(self.arrays)):
s = self.arrays[i]
self.assertTrue(yencodable(s), 'yencodable fails item '+str(i))
msg = Message(None, s)
v = msg.decode()
self.assertTrue(np.array_equal(np.array(s), v),
'codec failed on item '+str(i))
def test_strings(self):
"""Check that string types can be encoded and decoded."""
for i in range(len(self.strings)):
s = self.strings[i]
self.assertTrue(yencodable(s), 'yencodable fails item '+str(i))
msg = Message(None, s)
v = msg.decode()
if isinstance(s, np.ndarray):
s = s.tolist()
self.assertEqual(s, v, 'codec failed on item '+str(i))
def test_groups(self):
"""Check that group types can be encoded and decoded."""
for i in range(len(self.groups)):
s = self.groups[i]
self.assertTrue(yencodable(s), 'yencodable fails item '+str(i))
msg = Message(None, s)
v = msg.decode()
if isinstance(s, tuple):
s = list(s)
self.assertEqual(s, v, 'codec failed on item '+str(i))
def test_bad(self):
"""Check that unencodable types cannot be encoded."""
for i in range(len(self.bad)):
s = self.bad[i]
self.assertFalse(yencodable(s), 'yencodable fails item '+str(i))
ypickling(False)
with self.assertRaises(PYorickError) as cm:
msg = Message(None, s)
self.assertIsInstance(cm.exception, PYorickError,
'codec failed on item '+str(i))
ypickling(encode=True, decode=True)
print('doing {}'.format(i))
msg = Message(None, s)
v = msg.decode()
self.assertEqual(s, v, 'codec failed on item '+str(i))
def test_active(self):
"""Check codec for active messages."""
msg = Message(ID_EOL, 4)
v = msg.decode()
self.assertTrue(v[0]==ID_EOL and v[1][0]==4, 'ID_EOL broken')
msg = Message(ID_EVAL, 'hi mom')
v = msg.decode()
self.assertTrue(v[0]==ID_EVAL and v[1][0]=='hi mom', 'ID_EVAL broken')
msg = Message(ID_EXEC, 'hi mom')
v = msg.decode()
self.assertTrue(v[0]==ID_EXEC and v[1][0]=='hi mom', 'ID_EXEC broken')
msg = Message(ID_GETVAR, 'vvv')
v = msg.decode()
self.assertTrue(v[0]==ID_GETVAR and v[1][0]=='vvv', 'ID_GETVAR broken')
msg = Message(ID_GETSHAPE, 'vvv')
v = msg.decode()
self.assertTrue(v[0]==ID_GETSHAPE and v[1][0]=='vvv',
'ID_GETSHAPE broken')
msg = Message(ID_SETVAR, 'vvv', 31.7)
v = msg.decode()
self.assertTrue(v[0]==ID_SETVAR and v[1][0]=='vvv' and v[1][1]==31.7,
'ID_SETVAR broken')
for ident in [ID_FUNCALL, ID_SUBCALL]:
if ident == ID_FUNCALL:
err = 'ID_FUNCALL broken'
else:
err = 'ID_SUBCALL broken'
msg = Message(ident, 'vvv')
v = msg.decode()
self.assertTrue(v[0]==ident and v[1][0]=='vvv' and len(v[1])==1,
err+' 1')
msg = Message(ident, 'vvv', 31.7)
v = msg.decode()
self.assertTrue(v[0]==ident and v[1][0]=='vvv' and v[1][1]==31.7,
err+' 2')
msg = Message(ident, 'vvv', wow=-21)
v = msg.decode()
self.assertTrue(v[0]==ident and v[1][0]=='vvv' and len(v[1])==1 and
v[2]['wow']==-21, err+' 3')
msg = Message(ident, 'vvv', 31.7, wow=-21)
v = msg.decode()
self.assertTrue(v[0]==ident and v[1][0]=='vvv' and v[1][1]==31.7 and
v[2]['wow']==-21, err+' 4')
msg = Message(ident, 'vvv', 31.7, None, wow=-21, zow='abc')
v = msg.decode()
self.assertTrue(v[0]==ident and v[1][0]=='vvv' and v[1][1]==31.7 and
v[1][2]==None and v[2]['wow']==-21 and
v[2]['zow']=='abc', err+' 5')
msg = Message(ID_GETSLICE, 'vvv')
v = msg.decode()
self.assertTrue(v[0]==ID_GETSLICE and v[1][0]=='vvv' and len(v[1])==1,
'ID_GETSLICE broken')
msg = Message(ID_GETSLICE, 'vvv', 42, Ellipsis)
v = msg.decode()
self.assertTrue(v[0]==ID_GETSLICE and v[1][0]=='vvv' and v[1][1]==42 and
v[1][2]==Ellipsis, 'ID_GETSLICE broken')
msg = Message(ID_SETSLICE, 'vvv', 'q')
v = msg.decode()
self.assertTrue(v[0]==ID_SETSLICE and v[1][0]=='vvv' and v[1][1]=='q',
'ID_SETSLICE broken 1')
msg = Message(ID_SETSLICE, 'vvv', 42, Ellipsis, 'q')
v = msg.decode()
self.assertTrue(v[0]==ID_SETSLICE and v[1][0]=='vvv' and v[1][1]==42 and
v[1][2]==Ellipsis and v[1][3]=='q',
'ID_SETSLICE broken 2')
def gen_messages(self): # for test_reader
for obj in self.scalars + self.arrays + self.strings + self.groups:
yield obj, Message(None, obj)
yield 'ID_EOL', Message(ID_EOL, 4)
yield 'ID_EVAL', Message(ID_EVAL, 'hi mom')
yield 'ID_EXEC', Message(ID_EXEC, 'hi mom')
yield 'ID_GETVAR', Message(ID_GETVAR, 'vvv')
yield 'ID_GETSHAPE', Message(ID_GETSHAPE, 'vvv')
yield 'ID_SETVAR', Message(ID_SETVAR, 'vvv', 31.7)
yield 'ID_FUNCALL', Message(ID_FUNCALL, 'vvv', 31.7, None, wow=-21)
yield 'ID_SUBCALL', Message(ID_SUBCALL, 'vvv', 31.7, None, wow=-21)
yield 'ID_GETSLICE', Message(ID_GETSLICE, 'vvv', 42, Ellipsis)
yield 'ID_SETSLICE', Message(ID_SETSLICE, 'vvv', 42, Ellipsis, 'q')
def test_reader(self):
"""Check codec readers."""
for obj, m in self.gen_messages():
mlen = len(m.packets)
msg = Message()
i = 0
for packet in msg.reader():
em = str(i)+': '+repr(obj)
self.assertLess(i, mlen, 'reader stopped late on ' + em)
self.assertEqual(packet.dtype.itemsize,
m.packets[i].dtype.itemsize,
'reader wrong size on ' + em)
# np.copyto(packet, m.packets[i], casting='safe')
# following two lines work back to numpy 1.5:
self.assertTrue(np.can_cast(m.packets[i].dtype, packet.dtype,
casting='safe'),
'reader wrong type on '+ em)
packet[...] = m.packets[i]
i += 1
self.assertEqual(i, mlen, 'reader stopped early on ' +
str(i)+': '+repr(obj))
class TestProcess(unittest.TestCase):
def setUp(self):
setup_data(self)
self.yo = Yorick()
def tearDown(self):
self.yo.kill()
def test_basic(self):
"""Check variety of simple yorick interface features."""
self.yo("junk=42;")
self.assertEqual(self.yo("=junk"), 42, 'process failed basic 1')
self.assertEqual(self.yo.v.junk, 42, 'process failed basic 2')
self.assertEqual(self.yo.call.junk.v, 42, 'process failed basic 3')
self.assertEqual(self.yo.evaluate("junk"), 42, 'process failed basic 4')
self.assertEqual(self.yo.handles(1), self.yo.call,
'process failed basic 5')
self.assertEqual(self.yo.handles(7), (self.yo.c,self.yo.e,self.yo.v),
'process failed basic 6')
self.assertEqual(self.yo.c[''].bare, self.yo.bare,
'process failed basic 7')
self.assertEqual(self.yo.v['Y_HOME'], self.yo.v.Y_HOME,
'process failed basic 7')
def test_scalars(self):
"""Check that scalar types can be sent and received."""
for i in range(len(self.scalars)):
s = self.scalars[i]
self.yo.v.junk = s
v = self.yo.v.junk
self.assertEqual(s, v, 'process failed on item '+str(i))
def test_arrays(self):
"""Check that array types can be sent and received."""
for i in range(len(self.arrays)):
s = self.arrays[i]
self.yo.c.junk = s
v = self.yo.e.junk.value
self.assertTrue(np.array_equal(np.array(s), v),
'process failed on item '+str(i))
def test_strings(self):
"""Check that string types can be sent and received."""
for i in range(len(self.strings)):
s = self.strings[i]
self.yo.e.junk = s
v = self.yo.c.junk.value
if isinstance(s, np.ndarray):
s = s.tolist()
self.assertEqual(s, v, 'process failed on item '+str(i))
def test_groups(self):
"""Check that group types can be sent and received."""
for i in range(len(self.groups)):
s = self.groups[i]
self.yo.v.junk = s
v = self.yo.value.junk
if isinstance(s, tuple):
s = list(s)
elif not len(s):
s = [] # yorick cannot distinguish {} from []
self.assertEqual(s, v, 'process failed on item '+str(i)+
'\n'+str(s)+'\n'+str(v))
def test_active(self):
"""Check that all requests can be sent and received."""
# exec, eval, getvar, setvar already tested above
x = self.yo.evaluate.where([1,0,-3])
self.assertEqual(np.array(x).tolist(), [1,3],
'process failed on funcall')
self.yo("""
func test(a, b=) {
extern testv;
testv = a - b;
return testv;
}
""")
self.assertEqual(self.yo.e("test({0}, b={1})", [2,1], 1.5).tolist(),
[0.5, -0.5], 'process failed on formatted eval')
f = self.yo.value.test
self.assertTrue(isinstance(f, YorickVar),
'process failed on non-data value return')
self.yo.call.test([1,2], b=1.5)
self.assertEqual(self.yo.v.testv.tolist(), [-0.5, 0.5],
'process failed on subcall with keyword')
self.assertTrue(f([2,1], b=1.5) is None,
'process failed on value subcall semantics')
self.assertEqual(self.yo.v.testv.tolist(), [0.5, -0.5],
'process failed on value subcall with keyword')
self.assertEqual(self.yo.e.test([1,2], b=1.5).tolist(), [-0.5, 0.5],
'process failed on funcall with keyword')
self.assertEqual(self.yo.e.testv[1], -0.5,
'process failed on getslice')
self.assertEqual(self.yo.e.testv[...].tolist(), [-0.5, 0.5],
'process failed on getslice with ellipsis')
self.yo.e.testv[1:2] = [2.0, 3.0]
self.assertEqual(self.yo.v.testv.tolist(), [2.0, 3.0],
'process failed on setslice')
self.yo.c.testv[0:] = [3.0, 2.0]
self.assertEqual(self.yo.v.testv.tolist(), [3.0, 2.0],
'process failed on setslice, python semantics')
i = self.yo.evaluate.testv.info
self.assertEqual(i, (6, 1, 2), 'process failed on getshape')
i = self.yo.evaluate.test.info
self.assertEqual(i, (-1,), 'process failed on getshape')
def test_hold(self):
"""Check that all requests can be sent and received."""
# exec, eval, getvar, setvar already tested above
#f = self.yo.e.create('~/gh/pyorick/junk')
#self.yo.e.write(f, 'this is a test')
#del f
self.yo("""
struct PyTest {
long mema;
double memb(2,3);
char memc;
}
""")
struct = self.yo.e.PyTest(mema=-7, memb=[[11,12],[21,22],[31,32]],
memc=65)
self.assertEqual(struct['mema'], -7, 'string valued index failed')
self.assertEqual(struct['memb',2,3], 32, 'string mixed index failed')
s = Key2AttrWrapper(struct)
self.assertEqual(s.memc, 65, 'Key2AttrWrapper get failed')
s.memc = 97
self.assertEqual(s.memc, 97, 'Key2AttrWrapper set failed')
s = self.yo.e.random.hold(1000, 1001)
self.assertTrue(isinstance(s, YorickVar), 'hold attribute failed')
del struct # checks that deleting held reference works
self.yo.v.t = self.yo.e.noop(s)
self.assertEqual(self.yo.e.t.shape, (1001,1000),
'passing held reference as argument failed')
s = s[5,None] # implicitly deletes object after retrieving one column
self.assertEqual(s.shape, (1001,), 'indexing held reference failed')
s = self.yo.e('@t')
self.assertTrue(isinstance(s, YorickVar), 'hold @-syntax failed')
self.assertEqual(s.shape, (1001,1000),
'held reference attribute failed')
del s
self.yo.v.t = None
def test_recurse(self):
"""Check that a yorick reply can contain python requests."""
self.yo("""
func recursive(x) {
extern _recur;
if (!_recur) { _recur=1; py, "import numpy as np"; }
y = py("np.array", [x, 1-x]);
py, "var=", 1+x;
return py("var") - x;
}
""")
self.yo.c.recursive(2)
self.assertEqual(__main__.var, 3, 'recursive request set failed')
self.assertEqual(self.yo.e.recursive(2), 1,
'recursive request reply value failed')
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "b72f35f2d499d31979a90d7183658dee1bb24b52", "size": 16711, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyorick/test_pyorick.py", "max_stars_repo_name": "trmwzm/pyorick", "max_stars_repo_head_hexsha": "d16af9e0856920d74ea38db2fe9c951e26d765ba", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-04-14T03:30:01.000Z", "max_stars_repo_stars_event_max_datetime": "2017-04-14T03:30:01.000Z", "max_issues_repo_path": "pyorick/test_pyorick.py", "max_issues_repo_name": "trmwzm/pyorick", "max_issues_repo_head_hexsha": "d16af9e0856920d74ea38db2fe9c951e26d765ba", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-01-23T13:07:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-23T13:07:11.000Z", "max_forks_repo_path": "pyorick/test_pyorick.py", "max_forks_repo_name": "trmwzm/pyorick", "max_forks_repo_head_hexsha": "d16af9e0856920d74ea38db2fe9c951e26d765ba", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-02-02T22:30:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T15:56:35.000Z", "avg_line_length": 43.6318537859, "max_line_length": 80, "alphanum_fraction": 0.5298306505, "include": true, "reason": "import numpy", "num_tokens": 4298}
|
module SPCSpectra
using Dates
export SPC
mutable struct SPC
data::Vector{Tuple{Vector{<:Number}, Vector{Float32}}} # xdata can be Float64 if calculated from range specs
zdata::Vector{Float32}
xlabel::String
ylabel::String
zlabel::String
experimenttype::String
timestamp::DateTime
param_dict::Dict{String, String}
params::Vector{String}
end
# Adaptation of the pythonpackage spc by rohanisaac
# byte positon of various parts of the file
# head_siz = 512
# old_head_siz = 256
subhead_siz = 32
log_siz = 64
"Units for x,z,w axes."
const fxtype_op = ["Arbitrary",
"Wavenumber (cm-1)",
"Micrometers (um)",
"Nanometers (nm)",
"Seconds ",
"Minutes", "Hertz (Hz)",
"Kilohertz (KHz)",
"Megahertz (MHz) ",
"Mass (M/z)",
"Parts per million (PPM)",
"Days",
"Years",
"Raman Shift (cm-1)",
"eV",
"XYZ text labels in fcatxt (old 0x4D version only)",
"Diode Number",
"Channel",
"Degrees",
"Temperature (F)",
"Temperature (C)",
"Temperature (K)",
"Data Points",
"Milliseconds (mSec)",
"Microseconds (uSec) ",
"Nanoseconds (nSec)",
"Gigahertz (GHz)",
"Centimeters (cm)",
"Meters (m)",
"Millimeters (mm)",
"Hours"]
"Units y-axis."
const fytype_op = ["Arbitrary Intensity",
"Interferogram",
"Absorbance",
"Kubelka-Munk",
"Counts",
"Volts",
"Degrees",
"Milliamps",
"Millimeters",
"Millivolts",
"Log(1/R)",
"Percent",
"Intensity",
"Relative Intensity",
"Energy",
"",
"Decibel",
"",
"",
"Temperature (F)",
"Temperature (C)",
"Temperature (K)",
"Index of Refraction [N]",
"Extinction Coeff. [K]",
"Real",
"Imaginary",
"Complex"]
const fytype_op2 = ["Transmission",
"Reflectance",
"Arbitrary or Single Beam with Valley Peaks",
"Emission"]
const fexper_op = ["General SPC",
"Gas Chromatogram",
"General Chromatogram",
"HPLC Chromatogram",
"FT-IR, FT-NIR, FT-Raman Spectrum or Igram",
"NIR Spectrum",
"UV-VIS Spectrum",
"X-ray Diffraction Spectrum",
"Mass Spectrum ",
"NMR Spectrum or FID",
"Raman Spectrum",
"Fluorescence Spectrum",
"Atomic Spectrum",
"Chromatography Diode Array Spectra"]
flag_bits(n) = BitVector(digits(n, base = 2, pad = 8 * sizeof(n)))
read_data(io::IO, T::DataType) = ltoh(read(io, T))
read_data(io::IO, ::Type{String}, n::Integer) = strip(String(read(io, n)), '\0')
read_data(io::IO, T::DataType, n::Integer...) = ltoh.(reshape(reinterpret(T, read(io, prod(n) * sizeof(T))), Int64.(n)...))
read_data(io::IO, TT::Union{NTuple{N, DataType} where N, Vector{DataType}}) = ltoh.(read.(Ref(io), TT))
"""
SPC(filename::AbstractString)
Construct SPC objects.
"""
function SPC(filename::AbstractString)
content = read(filename)
io = IOBuffer(content)
ftflg, fversn = content[1:2]
# --------------------------------------------
# NEW FORMAT (LSB)
# --------------------------------------------
fversn == 0x4b || return "Reading of file version $(repr(fversn)) not implemented"
ftflg = read_data(io, UInt8)
fversn = read_data(io, UInt8)
fexper = read_data(io, UInt8)
fexp = read_data(io, UInt8)
fnpts = read_data(io, Int32)
ffirst = read_data(io, Float64)
flast = read_data(io, Float64)
fnsub = read_data(io, Int32)
fxtype = read_data(io, UInt8)
fytype = read_data(io, UInt8)
fztype = read_data(io, UInt8)
fpost = read_data(io, UInt8)
fdate = read_data(io, Int32)
fres = read_data(io, String, 9)
fsource = read_data(io, String, 9)
fpeakpt = read_data(io, Int16)
fspare = read_data(io, String, 32)
fcmnt = read_data(io, String, 130)
fcatxt = read_data(io, String, 30)
flogoff = read_data(io, Int32)
fmods = read_data(io, Int32)
fprocs = read_data(io, UInt8)
flevel = read_data(io, UInt8)
fsampin = read_data(io, Int16)
ffactor = read_data(io, Float32)
fmethod = read_data(io, String, 48)
fzinc = read_data(io, Float32)
fwplanes = read_data(io, Int32)
fwinc = read_data(io, Float32)
fwtype = read_data(io, UInt8)
freser = read_data(io, String, 187)
# Flag bits
tsprec, tcgram, tmulti, trandm, tordrd, talabs, txyxys, txvals = flag_bits(ftflg)
# Convert date time to appropriate format
year = fdate >> 20
month = (fdate >> 16) % (2^4)
day = (fdate >> 11) % (2^5)
hour = (fdate >> 6) % (2^5)
minute = fdate % (2^6)
timestamp = DateTime(year, month, day, hour, minute)
# remove multiple spaces
cmnt = replace(fcmnt, r"\s+" => " ")
# figure out type of file
dat_multi = fnsub > 1
dat_fmt = if txyxys
# x values are given
"-xy"
elseif txvals
# only one subfile, which contains the x data
dat_fmt = "x-y"
else
# no x values are given, but they can be generated
dat_fmt = "gx-y"
end
println("$dat_fmt($fnsub)")
x = if ! txyxys
# txyxys don't have global x data
if txvals
# if global x data is given
read_data(io, Float32, fnpts)
else
# otherwise generate them
range(ffirst, flast; length=fnpts) |> collect
end
end
# make a list of subfiles
xydata = []
zdata = Float32[]
z0 = dz = 0f0
# if subfile directory is given
if dat_fmt == "-xy" && fnpts > 0
directory = true
# loop over entries in directory
for i in 1:fnsub
ssfposn, ssfsize, ssftime = read_data(io, (Int32, Int32, Float32))
# add sufile, load defaults for npts and exp
pos = position(io)
seek(io, ssfposn) # io buffer position is zero-based!
xloc, y, z, zinc = subFile(io, 0, 0, true, tsprec, tmulti)
if i == 1
z0 = z
dz = zinc
end
seek(io, pos)
push!(xydata, (isnothing(xloc) ? x : xloc, y))
push!(zdata, z)
end
else
# don't have directory, for each subfile
for i in 1:fnsub
xloc, y, z, zinc = subFile(io, fnpts, fexp, txyxys, tsprec, tmulti)
if i == 1
z0 = z
dz = zinc
end
push!(xydata, (isnothing(xloc) ? x : xloc, y))
push!(zdata, z)
end
end
# if log data exists
# flog offset to log data offset not zero (bytes)
param_dict = Dict{String, String}()
params = String[] # put the rest into a list
if flogoff > 0
log_head_end = flogoff + log_siz
io_log = IOBuffer(content[flogoff+1:log_head_end])
# logstc_str = "<iiiii44s"
logsizd, logsizm, logtxto, logbins, logdsks = read_data(io_log, Int32, 5)
logspar = read_data(io_log, String, 44)
log_pos = flogoff + logtxto
log_end_pos = flogoff + logsizd
# line endings: get rid of any '\r' and then split on '\n'
log_content = split(strip(String(content[log_pos + 1:log_end_pos]), ['\0', '\r', '\n']), r"\r?\n")
# split log data into dictionary based on =
for x in log_content
if occursin("=", x)
# stop it from breaking if there is more than 1 =
key, value = split(x, "=")[1:2]
push!(param_dict, key => strip(value, '\0'))
else
push!(params, x)
end
end
end
labels = [
get(fxtype_op, fxtype + 1, "Unknown"),
get(fytype_op, fytype + 1, get(fytype_op2, fytype - 127, "Unknown")),
get(fxtype_op, fztype + 1, "Unknown")
]
# --------------------------
# check if labels are included as text
# --------------------------
# split it based on '\0' character
# format x, y, z
if talabs
for (i, s) in enumerate(split(fcatxt, '\0', keepempty = false))
isempty(s) || (labels[i] = s)
end
end
if (0x10 & ftflg == 0x0)
fzinc > 0 && (dz == fzinc)
zdata = z0 .+ collect(0:dz:((fnpts - 1) * dz))
end
SPC(xydata, zdata, labels..., get(fexper_op, fexper + 1, "Unknown"), timestamp, param_dict, params)
end
"""
subFile(io::IO, fnpts, fexp, txyxy, tsprec, tmulti)
Process each subfile passed to it, extracts header information and data
information and places them in data members
Data
- x: x-data (optional)
- y: y-data
- y_int: integer y-data if y-data is not floating
"""
function subFile(io::IO, fnpts, fexp, txyxy, tsprec, tmulti)
# extract subheader info
subflgs, subexp, subindx, subtime, subnext, subnois, subnpts, subscan, subwlevel, subresv = read_subheader(io)
pts = txyxy ? subnpts : fnpts
# Choosing exponent
# -----------------
# choose local vs global exponent depending on tmulti
exp = tmulti ? subexp : fexp
# Make sure it is reasonable, if it out of range zero it
(-128 < exp <= 128) || (exp = 0)
# --------------------------
# if x_data present
# --------------------------
x = if txyxy
# x_str = '<' + 'i' * pts
x_raw = read_data(io, Int32, pts)
(2.0f0^(exp - 32)) .* x_raw
else
nothing
end
# --------------------------
# extract y_data
# --------------------------
y = if exp == 128
# Floating y-values
read_data(io, Float32, pts)
else
# integer format
if tsprec
# 16 bit
y_raw16 = read_data(io, Int16, pts)
(2.0f0^(exp - 16)) .* y_raw16
else
# 32 bit, using size of subheader to figure out data type
y_raw = read_data(io, Int32, pts)
(2.0f0^(exp - 32)) .* y_raw
end
end
z = subtime
zinc = subnext - subtime
x, y, z, zinc
end
"""
read_subheader(io::IO)
Return the subheader as a list:
-------
10 item list with the following data members:
[1] subflgs
[2] subexp
[3] subindx
[4] subtime
[5] subnext
[6] subnois
[7] subnpts
[8] subscan
[9] subwlevel
[10] subresv
"""
function read_subheader(io::IO)
subflgs = read_data(io, UInt8)
subexp = read_data(io, UInt8)
subindx = read_data(io, Int16)
subtime = read_data(io, Float32)
subnext = read_data(io, Float32)
subnois = read_data(io, Float32)
subnpts = read_data(io, Int32)
subscan = read_data(io, Int32)
subwlevel = read_data(io, Float32)
subresv = read_data(io, String, 4)
subflgs, subexp, subindx, subtime, subnext, subnois, subnpts, subscan, subwlevel, subresv
end
end # module
|
{"hexsha": "35af6129594621edf11675c290ce45bb5006f5cf", "size": 11801, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/SPCSpectra.jl", "max_stars_repo_name": "hhaensel/SPCSpectra.jl", "max_stars_repo_head_hexsha": "e9cc0bfae3f5507983dfd0e512c1b92e20bd4529", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/SPCSpectra.jl", "max_issues_repo_name": "hhaensel/SPCSpectra.jl", "max_issues_repo_head_hexsha": "e9cc0bfae3f5507983dfd0e512c1b92e20bd4529", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-03-27T21:29:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T22:32:54.000Z", "max_forks_repo_path": "src/SPCSpectra.jl", "max_forks_repo_name": "hhaensel/SPCSpectra.jl", "max_forks_repo_head_hexsha": "e9cc0bfae3f5507983dfd0e512c1b92e20bd4529", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2022-03-29T15:52:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T20:15:49.000Z", "avg_line_length": 30.8120104439, "max_line_length": 123, "alphanum_fraction": 0.5088551818, "num_tokens": 3266}
|
from matplotlib.patches import _Style, FancyArrowPatch
from matplotlib.transforms import IdentityTransform
from matplotlib.path import Path
import numpy as np
class _FancyAxislineStyle(object):
class SimpleArrow(FancyArrowPatch):
"""
The artist class that will be returned for SimpleArrow style.
"""
_ARROW_STYLE = "->"
def __init__(self, axis_artist, line_path, transform,
line_mutation_scale):
self._axis_artist = axis_artist
self._line_transform = transform
self._line_path = line_path
self._line_mutation_scale = line_mutation_scale
FancyArrowPatch.__init__(self,
path=self._line_path,
arrowstyle=self._ARROW_STYLE,
arrow_transmuter=None,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=line_mutation_scale,
mutation_aspect=None,
transform=IdentityTransform(),
)
def set_line_mutation_scale(self, scale):
self.set_mutation_scale(scale*self._line_mutation_scale)
def _extend_path(self, path, mutation_size=10):
"""
Extend the path to make a room for drawing arrow.
"""
from matplotlib.bezier import get_cos_sin
x0, y0 = path.vertices[-2]
x1, y1 = path.vertices[-1]
cost, sint = get_cos_sin(x0, y0, x1, y1)
d = mutation_size * 1.
x2, y2 = x1 + cost*d, y1+sint*d
if path.codes is None:
_path = Path(np.concatenate([path.vertices, [[x2, y2]]]))
else:
_path = Path(np.concatenate([path.vertices, [[x2, y2]]]),
np.concatenate([path.codes, [Path.LINETO]]))
return _path
def set_path(self, path):
self._line_path = path
def draw(self, renderer):
"""
Draw the axis line.
1) transform the path to the display coordinate.
2) extend the path to make a room for arrow
3) update the path of the FancyArrowPatch.
4) draw
"""
path_in_disp = self._line_transform.transform_path(self._line_path)
mutation_size = self.get_mutation_scale() #line_mutation_scale()
extented_path = self._extend_path(path_in_disp,
mutation_size=mutation_size)
self._path_original = extented_path
FancyArrowPatch.draw(self, renderer)
class FilledArrow(SimpleArrow):
"""
The artist class that will be returned for SimpleArrow style.
"""
_ARROW_STYLE = "-|>"
class AxislineStyle(_Style):
"""
:class:`AxislineStyle` is a container class which defines style classes
for AxisArtists.
An instance of any axisline style class is an callable object,
whose call signature is ::
__call__(self, axis_artist, path, transform)
When called, this should return a mpl artist with following
methods implemented. ::
def set_path(self, path):
# set the path for axisline.
def set_line_mutation_scale(self, scale):
# set the scale
def draw(self, renderer):
# draw
"""
_style_list = {}
class _Base(object):
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initialization.
"""
super().__init__()
def __call__(self, axis_artist, transform):
"""
Given the AxisArtist instance, and transform for the path
(set_path method), return the mpl artist for drawing the axis line.
"""
return self.new_line(axis_artist, transform)
class SimpleArrow(_Base):
"""
A simple arrow.
"""
ArrowAxisClass = _FancyAxislineStyle.SimpleArrow
def __init__(self, size=1):
"""
*size*
size of the arrow as a fraction of the ticklabel size.
"""
self.size = size
super().__init__()
def new_line(self, axis_artist, transform):
linepath = Path([(0,0), (0, 1)])
axisline = self.ArrowAxisClass(axis_artist, linepath, transform,
line_mutation_scale=self.size)
return axisline
_style_list["->"] = SimpleArrow
class FilledArrow(SimpleArrow):
ArrowAxisClass = _FancyAxislineStyle.FilledArrow
_style_list["-|>"] = FilledArrow
|
{"hexsha": "7c9c0a9711b65fcb85ef6fa15731f54ae487ad1b", "size": 5107, "ext": "py", "lang": "Python", "max_stars_repo_path": "PythonAPI/carissma_project/lib/python3.5/site-packages/mpl_toolkits/axisartist/axisline_style.py", "max_stars_repo_name": "AbdulHoffmann/carla_carissma", "max_stars_repo_head_hexsha": "8d382769ffa02a6c61a22c57160285505f5ff0a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 445, "max_stars_repo_stars_event_min_datetime": "2019-01-26T13:50:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-18T05:17:38.000Z", "max_issues_repo_path": "venv/lib/python3.7/site-packages/mpl_toolkits/axisartist/axisline_style.py", "max_issues_repo_name": "John1001Song/Big-Data-Robo-Adviser", "max_issues_repo_head_hexsha": "9444dce96954c546333d5aecc92a06c3bfd19aa5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 242, "max_issues_repo_issues_event_min_datetime": "2019-01-29T15:48:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:09:21.000Z", "max_forks_repo_path": "venv/lib/python3.7/site-packages/mpl_toolkits/axisartist/axisline_style.py", "max_forks_repo_name": "John1001Song/Big-Data-Robo-Adviser", "max_forks_repo_head_hexsha": "9444dce96954c546333d5aecc92a06c3bfd19aa5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 64, "max_forks_repo_forks_event_min_datetime": "2018-04-25T08:51:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-29T14:13:57.000Z", "avg_line_length": 31.1402439024, "max_line_length": 79, "alphanum_fraction": 0.5412179362, "include": true, "reason": "import numpy", "num_tokens": 1035}
|
function M = xfm_read(fname)
% M = xfm_read(fname)
%
% xfm_read.m
%
% Original Author: Bruce Fischl
% CVS Revision Info:
% $Author: nicks $
% $Date: 2011/03/02 00:04:13 $
% $Revision: 1.4 $
%
% Copyright © 2011 The General Hospital Corporation (Boston, MA) "MGH"
%
% Terms and conditions for use, reproduction, distribution and contribution
% are found in the 'FreeSurfer Software License Agreement' contained
% in the file 'LICENSE' found in the FreeSurfer distribution, and here:
%
% https://surfer.nmr.mgh.harvard.edu/fswiki/FreeSurferSoftwareLicense
%
% Reporting: freesurfer@nmr.mgh.harvard.edu
%
fid = fopen(fname) ;
if (fid < 0)
error(sprintf('could not open file %s', fname));
end
tline = fgetl(fid) ;
while ((length(tline) > 0) & (tline(1) == '%'))
tline = fgetl(fid) ;
end
tok = strtok(tline);
while (strcmp(tok, 'Linear_Transform') ~= 1)
tline = fgetl(fid) ;
tok = strtok(tline);
end
M = zeros(4,4) ; M(4,4) = 1;
for row=1:3
tline = fgetl(fid) ; % one row of matrix
tmp = sscanf(tline, '%f');
M(row,:) = tmp';
end
fclose(fid) ;
|
{"author": "vistalab", "repo": "vistasoft", "sha": "7f0102c696c091c858233340cc7e1ab02f064d4c", "save_path": "github-repos/MATLAB/vistalab-vistasoft", "path": "github-repos/MATLAB/vistalab-vistasoft/vistasoft-7f0102c696c091c858233340cc7e1ab02f064d4c/external/freesurfer/xfm_read.m"}
|
[STATEMENT]
lemma det_linear_rows_sum:
assumes fS: "finite S"
and a: "a \<in> {0..<n} \<rightarrow> S \<rightarrow> carrier_vec n"
shows "det (mat\<^sub>r n n (\<lambda> i. finsum_vec TYPE('a :: comm_ring_1) n (a i) S)) =
sum (\<lambda>f. det (mat\<^sub>r n n (\<lambda> i. a i (f i))))
{f. (\<forall>i\<in>{0..<n}. f i \<in> S) \<and> (\<forall>i. i \<notin> {0..<n} \<longrightarrow> f i = i)}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. det (mat\<^sub>r n n (\<lambda>i. finsum_vec TYPE('a) n (a i) S)) = (\<Sum>f | (\<forall>i\<in>{0..<n}. f i \<in> S) \<and> (\<forall>i. i \<notin> {0..<n} \<longrightarrow> f i = i). det (mat\<^sub>r n n (\<lambda>i. a i (f i))))
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. det (mat\<^sub>r n n (\<lambda>i. finsum_vec TYPE('a) n (a i) S)) = (\<Sum>f | (\<forall>i\<in>{0..<n}. f i \<in> S) \<and> (\<forall>i. i \<notin> {0..<n} \<longrightarrow> f i = i). det (mat\<^sub>r n n (\<lambda>i. a i (f i))))
[PROOF STEP]
let ?T = "{0..<n}"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. det (mat\<^sub>r n n (\<lambda>i. finsum_vec TYPE('a) n (a i) S)) = (\<Sum>f | (\<forall>i\<in>{0..<n}. f i \<in> S) \<and> (\<forall>i. i \<notin> {0..<n} \<longrightarrow> f i = i). det (mat\<^sub>r n n (\<lambda>i. a i (f i))))
[PROOF STEP]
have fT: "finite ?T"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. finite {0..<n}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
finite {0..<n}
goal (1 subgoal):
1. det (mat\<^sub>r n n (\<lambda>i. finsum_vec TYPE('a) n (a i) S)) = (\<Sum>f | (\<forall>i\<in>{0..<n}. f i \<in> S) \<and> (\<forall>i. i \<notin> {0..<n} \<longrightarrow> f i = i). det (mat\<^sub>r n n (\<lambda>i. a i (f i))))
[PROOF STEP]
have th0: "\<And>x y. mat\<^sub>r n n (\<lambda> i. if i \<in> ?T then x i else y i) = mat\<^sub>r n n (\<lambda> i. x i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>x y. mat\<^sub>r n n (\<lambda>i. if i \<in> {0..<n} then x i else y i) = mat\<^sub>r n n x
[PROOF STEP]
by (rule eq_rowI, auto)
[PROOF STATE]
proof (state)
this:
mat\<^sub>r n n (\<lambda>i. if i \<in> {0..<n} then ?x i else ?y i) = mat\<^sub>r n n ?x
goal (1 subgoal):
1. det (mat\<^sub>r n n (\<lambda>i. finsum_vec TYPE('a) n (a i) S)) = (\<Sum>f | (\<forall>i\<in>{0..<n}. f i \<in> S) \<and> (\<forall>i. i \<notin> {0..<n} \<longrightarrow> f i = i). det (mat\<^sub>r n n (\<lambda>i. a i (f i))))
[PROOF STEP]
have c: "(\<lambda> _. 0\<^sub>v n) \<in> ?T \<rightarrow> carrier_vec n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<lambda>_. 0\<^sub>v n) \<in> {0..<n} \<rightarrow> carrier_vec n
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
(\<lambda>_. 0\<^sub>v n) \<in> {0..<n} \<rightarrow> carrier_vec n
goal (1 subgoal):
1. det (mat\<^sub>r n n (\<lambda>i. finsum_vec TYPE('a) n (a i) S)) = (\<Sum>f | (\<forall>i\<in>{0..<n}. f i \<in> S) \<and> (\<forall>i. i \<notin> {0..<n} \<longrightarrow> f i = i). det (mat\<^sub>r n n (\<lambda>i. a i (f i))))
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. det (mat\<^sub>r n n (\<lambda>i. finsum_vec TYPE('a) n (a i) S)) = (\<Sum>f | (\<forall>i\<in>{0..<n}. f i \<in> S) \<and> (\<forall>i. i \<notin> {0..<n} \<longrightarrow> f i = i). det (mat\<^sub>r n n (\<lambda>i. a i (f i))))
[PROOF STEP]
by (rule det_linear_rows_finsum_lemma[OF fS fT c subset_refl a, unfolded th0])
[PROOF STATE]
proof (state)
this:
det (mat\<^sub>r n n (\<lambda>i. finsum_vec TYPE('a) n (a i) S)) = (\<Sum>f | (\<forall>i\<in>{0..<n}. f i \<in> S) \<and> (\<forall>i. i \<notin> {0..<n} \<longrightarrow> f i = i). det (mat\<^sub>r n n (\<lambda>i. a i (f i))))
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1673, "file": "Jordan_Normal_Form_Determinant", "length": 11}
|
# -*- coding: utf-8 -*-
"""
BasicWorm, WormPartition, JSON_Serializer
Credit to Christopher R. Wagner at
http://robotfantastic.org/serializing-python-data-to-json-some-edge-cases.html
for the following six functions:
isnamedtuple
serialize
restore
data_to_json
json_to_data
nested_equal
"""
import numpy as np
import warnings
import copy
import h5py
import matplotlib.pyplot as plt
import json
from collections import namedtuple, Iterable, OrderedDict
from .. import config, utils
from .pre_features import WormParsing
from .video_info import VideoInfo
#%%
class JSON_Serializer():
"""
A class that can save all of its attributes to a JSON file, or
load them from a JSON file.
"""
def __init__(self):
pass
def save_to_JSON(self, JSON_path):
serialized_data = data_to_json(list(self.__dict__.items()))
with open(JSON_path, 'w') as outfile:
outfile.write(serialized_data)
def load_from_JSON(self, JSON_path):
with open(JSON_path, 'r') as infile:
serialized_data = infile.read()
member_list = json_to_data(serialized_data)
for member in member_list:
setattr(self, member[0], member[1])
#%%
class UnorderedWorm(JSON_Serializer):
"""
Encapsulates the notion of worm contour or skeleton data that might have
been obtained from a computer vision operation
* We don't assume the contour or skeleton points are evenly spaced,
but we do assume they are in order as you walk along the skeleton.
* We DON'T assume that the head and the tail are at points 0 and -1,
respectively - hence the use of the word "unordered" in the name of this
class.
* We don't assume that there is the same number of contour points
in each frame. This means we can't use a simple ndarray representation
for the contour. Instead, we use a list of single-dimension numpy
arrays.
"""
def __init__(self, other):
attributes = ['unordered_contour', 'unordered_skeleton',
'head', 'tail', 'video_info']
if other is None:
for a in attributes:
setattr(self, a, None)
else:
# Copy constructor
for a in attributes:
setattr(self, a, copy.deepcopy(getattr(other, a)))
@classmethod
def from_skeleton_factory(cls, skeleton, head=None, tail=None):
"""
A factory method taking the simplest possible input: just a skeleton.
Assumes 0th point is head, n-1th point is tail. No contour.
Parameters
----------
skeleton : list of ndarray or ndarray
If ndarray, we are in the simpler "homocardinal" case
If list of ndarray, each frame can have a varying number of points
head: ndarray containing the position of the head.
tail: ndarray containing the position of the tail.
"""
uow = cls()
# if len(np.shape(skeleton)) != 3 or np.shape(skeleton)[1] != 2:
# raise Exception("Provided skeleton must have "
# "shape (n_points,2,n_frames)")
uow.skeleton = skeleton
if tail is None:
uow.tail = skeleton[0, :, :]
else:
uow.tail = tail
if head is None:
uow.head = skeleton[-1, :, :]
else:
uow.head = head
# TODO: First check for ndarray or list, if ndarray use skeleton.shape
# if len(np.shape(skeleton)) != 3 or np.shape(skeleton)[1] != 2:
# raise Exception("Provided skeleton must have "
# "shape (n_points,2,n_frames)")
# TODO: We need to handle the list case
return uow
@classmethod
def from_contour_factory(cls, contour, head=None, tail=None):
pass
def ordered_ventral_contour(self):
"""
Return the vulva side of the ordered heterocardinal contour.
i.e. with tail at position -1 and head at position 0.
"""
# TODO
pass
def ordered_dorsal_contour(self):
"""
Return the non-vulva side of the ordered heterocardinal contour.
i.e. with tail at position -1 and head at position 0.
"""
# TODO
pass
def ordered_skeleton(self):
"""
Return the ordered skeleton.
i.e. with tail at position -1 and head at position 0.
"""
# TODO
pass
#%%
class BasicWorm(JSON_Serializer):
"""
A worm's skeleton and contour, not necessarily "normalized" to 49 points,
and possibly heterocardinal (i.e. possibly with a varying number of
points per frame). We also try to allow for the following possibilities:
- skeleton specified in the file, but no contour
- in this case the contour should be left blank (TODO)
- contour specified in the file, but no skeleton
- in this case the skeleton is derived using the algorithm
WormParsing.compute_skeleton_and_widths
- both contour and skeleton are specified in the file
- in this case you can use either:
- .h_skeleton, the derived skeleton, or
- ._h_loaded_skeleton, the skeleton from the file.
Attributes
----------
h_skeleton : list, where each element is a numpy array of shape (2,k_i)
Each element of the list is a frame.
Where k_i is the number of skeleton points in frame i.
The first axis of the numpy array, having len 2, is the x and y.
Missing frames should be identified by None.
h_ventral_contour: Same type and shape as skeleton (see above)
The vulva side of the contour.
h_dorsal_contour: Same type and shape as skeleton (see above)
The non-vulva side of the contour.
video_info : An instance of the VideoInfo class.
(contains metadata attributes of the worm video)
"""
def __init__(self, other=None):
attributes = ['_h_skeleton', '_h_ventral_contour',
'_h_dorsal_contour']
if other is None:
for a in attributes:
setattr(self, a, None)
self.video_info = VideoInfo()
else:
# Copy constructor
for a in attributes:
setattr(self, a, copy.deepcopy(getattr(other, a)))
@classmethod
def from_schafer_file_factory(cls, data_file_path):
bw = cls()
with h5py.File(data_file_path, 'r') as h:
# These are all HDF5 'references'
all_ventral_contours_refs = h['all_vulva_contours'].value
all_dorsal_contours_refs = h['all_non_vulva_contours'].value
all_skeletons_refs = h['all_skeletons'].value
is_stage_movement = utils._extract_time_from_disk(
h, 'is_stage_movement')
is_valid = utils._extract_time_from_disk(h, 'is_valid')
all_skeletons = []
all_ventral_contours = []
dorsal_contour = []
for valid_frame, iFrame in zip(is_valid, range(is_valid.size)):
if valid_frame:
all_skeletons.append(
h[all_skeletons_refs[iFrame][0]].value)
all_ventral_contours.append(
h[all_ventral_contours_refs[iFrame][0]].value)
dorsal_contour.append(
h[all_dorsal_contours_refs[iFrame][0]].value)
else:
all_skeletons.append(None)
all_ventral_contours.append(None)
dorsal_contour.append(None)
# Video Metadata
is_stage_movement = is_stage_movement.astype(bool)
is_valid = is_valid.astype(bool)
# A kludge, we drop frames in is_stage_movement that are in excess
# of the number of frames in the video. It's unclear why
# is_stage_movement would be longer by 1, which it was in our
# canonical example.
is_stage_movement = is_stage_movement[0:len(all_skeletons)]
# 5. Derive frame_code from the two pieces of data we have,
# is_valid and is_stage_movement.
bw.video_info.frame_code = (1 * is_valid +
2 * is_stage_movement +
100 * ~(is_valid | is_stage_movement))
# Remove any derived skeleton, since we are loading new contours
# and therefore we'll want any call to .h_skeleton to derive a new one.
bw.__remove_precalculated_skeleton()
# Also save the skeleton that was specified in the file, if it exists.
bw._h_loaded_skeleton = all_skeletons
# Load the contours that were specified in the file, if they exist.
bw._h_ventral_contour = all_ventral_contours
bw._h_dorsal_contour = dorsal_contour
return bw
@classmethod
def from_contour_factory(cls, ventral_contour, dorsal_contour):
"""
Return a BasicWorm from a normalized ventral_contour and dorsal_contour
Parameters
---------------
ventral_contour: numpy array of shape (49,2,n)
dorsal_contour: numpy array of shape (49,2,n)
Returns
----------------
BasicWorm object
"""
if not isinstance(ventral_contour, (list,tuple)):
# we need to change the data from a (49,2,n) array to a list of (2,49)
assert(np.shape(ventral_contour) == np.shape(dorsal_contour))
assert ventral_contour.shape[1] == 2
h_ventral_contour = WormParsing._h_array2list(ventral_contour)
h_dorsal_contour = WormParsing._h_array2list(dorsal_contour)
else:
h_ventral_contour = ventral_contour
h_dorsal_contour = dorsal_contour
# Here I am checking that the contour missing frames are aligned.
# I prefer to populate the frame_code in normalized worm.
assert all( v == d for v,d in zip(h_ventral_contour, h_dorsal_contour) if v is None or d is None)
# Having converted our normalized contour to a heterocardinal-type
# contour that just "happens" to have all its frames with the same
# number of skeleton points, we can just call another factory method
# and we are done:
bw = cls()
bw.h_ventral_contour = h_ventral_contour
bw.h_dorsal_contour = h_dorsal_contour
return bw
@classmethod
def from_skeleton_factory(cls, skeleton, extrapolate_contour=False):
if not extrapolate_contour:
'''
Construct the object using only the skeletons without contours.
This is a better default because the contour interpolation will produce a fake contour.
'''
bw = cls()
#other option will be to give a list of None, but this make more obvious when there is a mistake
bw.h_ventral_contour = None
bw.h_dorsal_contour = None
if isinstance(skeleton, (list,tuple)):
bw._h_skeleton = skeleton
else:
assert skeleton.shape[1] == 2
bw._h_skeleton = WormParsing._h_array2list(skeleton)
return bw
else:
"""
Derives a contour from the skeleton
THIS PART IS BUGGY, THE INTERPOLATION WORKS ONLY IN A LIMITED NUMBER OF CASES
TODO: right now the method creates the bulge entirely in the y-axis,
across the x-axis. Instead the bulge should be rotated to
apply across the head-tail orientation.
TODO: the bulge should be more naturalistic than the simple sine wave
currently used.
"""
# Make ventral_contour != dorsal_contour by making them "bulge"
# in the middle, in a basic simulation of what a real worm looks like
bulge_x = np.zeros((config.N_POINTS_NORMALIZED))
# Create the "bulge"
x = np.linspace(0, 1, config.N_POINTS_NORMALIZED)
bulge_y = np.sin(x * np.pi) * 50
# Shape is (49,2,1):
bulge_frame1 = np.rollaxis(np.dstack([bulge_x, bulge_y]),
axis=0, start=3)
# Repeat the bulge across all frames:
num_frames = skeleton.shape[2]
bulge_frames = np.repeat(bulge_frame1, num_frames, axis=2)
# Apply the bulge above and below the skeleton
ventral_contour = skeleton + bulge_frames
dorsal_contour = skeleton - bulge_frames
# Now we are reduced to the contour factory case:
return BasicWorm.from_contour_factory(ventral_contour, dorsal_contour)
@property
def h_ventral_contour(self):
return self._h_ventral_contour
@h_ventral_contour.setter
def h_ventral_contour(self, x):
self._h_ventral_contour = x
self.__remove_precalculated_skeleton()
@property
def h_dorsal_contour(self):
return self._h_dorsal_contour
@h_dorsal_contour.setter
def h_dorsal_contour(self, x):
self._h_dorsal_contour = x
self.__remove_precalculated_skeleton()
def __remove_precalculated_skeleton(self):
"""
Removes the precalculated self._h_skeleton, if it exists.
This is typically called if we've potentially changed something,
i.e. if we've loaded new values for self.h_ventral_contour or
self.h_non_vulva contour.
In these cases we must be sure to delete h_skeleton, since it is
derived from ventral_contour and dorsal_contour.
It will be recalculated if it's ever asked for.
"""
try:
del(self._h_skeleton)
except AttributeError:
pass
@property
def h_skeleton(self):
"""
If self._h_skeleton has been defined, then return it.
Otherwise, try to extrapolate it from the contour.
Note: This method does not have an obvious use case. The normal
pipeline is to call NormalizedWorm.from_BasicWorm_factory, which will
calculate a skeleton.
"""
try:
return self._h_skeleton
except AttributeError:
# Extrapolate skeleton from contour
# TODO: improve this: for now
self._h_widths, self._h_skeleton = \
WormParsing.compute_skeleton_and_widths(self.h_ventral_contour, self.h_dorsal_contour)
#how can i call _h_widths???
return self._h_skeleton
def plot_frame(self, frame_index):
"""
Plot the contour and skeleton the worm for one of the frames.
Parameters
----------------
frame_index: int
The desired frame # to plot.
"""
vc = self.h_ventral_contour[frame_index]
dc = self.h_dorsal_contour[frame_index]
s = self.h_skeleton[frame_index]
plt.scatter(vc[0, :], vc[1, :])
plt.scatter(dc[0, :], dc[1, :])
plt.scatter(s[0, :], s[1, :])
plt.gca().set_aspect('equal', adjustable='box')
plt.show()
def validate(self):
"""
Validate that self is a well-defined BasicWorm instance.
"""
assert(len(self.h_ventral_contour) == len(self.h_dorsal_contour))
def __repr__(self):
return utils.print_object(self)
def __eq__(self, other):
"""
Compare this BasicWorm against another.
"""
attribute_list = ['h_ventral_contour', 'h_dorsal_contour',
'h_skeleton', 'video_info']
return utils.compare_attributes(self, other, attribute_list)
#%%
class WormPartition():
def __init__(self):
# These are RANGE values, so the last value is not inclusive
self.worm_partitions = {'head': (0, 8),
'neck': (8, 16),
'midbody': (16, 33),
'old_midbody_velocity': (20, 29),
'hips': (33, 41),
'tail': (41, 49),
# refinements of ['head']
'head_tip': (0, 3),
'head_base': (5, 8), # ""
# refinements of ['tail']
'tail_base': (41, 44),
'tail_tip': (46, 49), # ""
'all': (0, 49),
# neck, midbody, and hips
'body': (8, 41)}
self.worm_partition_subsets = {
'normal': (
'head', 'neck', 'midbody', 'hips', 'tail'), 'first_third': (
'head', 'neck'), 'second_third': (
'midbody',), 'last_third': (
'hips', 'tail'), 'all': (
'all',)}
def get_partition_subset(self, partition_type):
"""
There are various ways of partitioning the worm's 49 points.
this method returns a subset of the worm partition dictionary
TODO: This method still is not obvious to me. Also, we should move
these things to a separate class.
Parameters
---------------------------------------
partition_type: string
e.g. 'head'
Usage
---------------------------------------
For example, to see the mean of the head and the mean of the neck,
use the partition subset, 'first_third', like this:
nw = NormalizedWorm(....)
width_dict = {k: np.mean(nw.get_partition(k), 0) for k in ('head', 'neck')}
OR, using self.worm_partition_subsets,
s = nw.get_paritition_subset('first_third')
# i.e. s = {'head':(0,8), 'neck':(8,16)}
width_dict = {k: np.mean(nw.get_partition(k), 0) for k in s.keys()}
Notes
---------------------------------------
Translated from get.ALL_NORMAL_INDICES in SegwormMatlabClasses /
+seg_worm / @skeleton_indices / skeleton_indices.m
"""
# parition_type is assumed to be a key for the dictionary
# worm_partition_subsets
p = self.worm_partition_subsets[partition_type]
# Return only the subset of partitions contained in the particular
# subset of interest, p.
return {k: self.worm_partitions[k] for k in p}
def get_subset_partition_mask(self, name):
"""
Returns a boolean mask - for working with arrays given a partition.
"""
keys = self.worm_partition_subsets[name]
mask = np.zeros(49, dtype=bool)
for key in keys:
mask = mask | self.partition_mask(key)
return mask
def partition_mask(self, partition_key):
"""
Returns a boolean numpy array corresponding to the partition requested.
"""
mask = np.zeros(49, dtype=bool)
slice_val = self.worm_partitions[partition_key]
mask[slice(*slice_val)] = True
return mask
def get_partition(self, partition_key, data_key='skeletons',
split_spatial_dimensions=False):
"""
Retrieve partition of a measurement of the worm, that is, across all
available frames but across only a subset of the 49 points.
Parameters
---------------------------------------
partition_key: string
The desired partition. e.g. 'head', 'tail', etc.
#TODO: This should be documented better
INPUT: a partition key, and an optional data key.
If split_spatial_dimensions is True, the partition is returned
separated into x and y
OUTPUT: a numpy array containing the data requested, cropped to just
the partition requested.
(so the shape might be, say, 4xn if data is 'angles')
data_key: string (optional)
The desired measurement (default is 'skeletons')
split_spatial_dimensions: bool (optional)
If True, the partition is returned separated into x and y
Returns
---------------------------------------
A numpy array containing the data requested, cropped to just
the partition requested.
(so the shape might be, say, 4xn if data is 'angles')
Notes
---------------------------------------
Translated from get.ALL_NORMAL_INDICES in SegwormMatlabClasses /
+seg_worm / @skeleton_indices / skeleton_indices.m
"""
# We use numpy.split to split a data_dict element into three, cleaved
# first by the first entry in the duple worm_partitions[partition_key],
# and second by the second entry in that duple.
# Taking the second element of the resulting list of arrays, i.e. [1],
# gives the partitioned component we were looking for.
part = self.worm_partitions[partition_key]
worm_attribute_values = getattr(self, data_key)
if(len(worm_attribute_values) != 0):
# Let's suppress the warning about zero arrays being reshaped
# since that's irrelevant since we are only looking at the
# non-zero array in the middle i.e. the 2nd element i.e. [1]
with warnings.catch_warnings():
warnings.simplefilter('ignore', category=FutureWarning)
partition = np.split(worm_attribute_values,
part)[1]
if(split_spatial_dimensions):
return partition[:, 0, :], partition[:, 1, :]
else:
return partition
else:
return None
#%%
def isnamedtuple(obj):
"""
Heuristic check if an object is a namedtuple.
"""
return isinstance(obj, tuple) \
and hasattr(obj, "_fields") \
and hasattr(obj, "_asdict") \
and callable(obj._asdict)
def serialize(data):
"""
"""
if data is None or isinstance(data, (bool, int, float, str)):
return data
if isinstance(data, list):
return [serialize(val) for val in data]
if isinstance(data, OrderedDict):
return {"py/collections.OrderedDict":
[[serialize(k), serialize(v)] for k, v in data.items()]}
if isnamedtuple(data):
return {"py/collections.namedtuple": {
"type": type(data).__name__,
"fields": list(data._fields),
"values": [serialize(getattr(data, f)) for f in data._fields]}}
if isinstance(data, dict):
if all(isinstance(k, str) for k in data):
return {k: serialize(v) for k, v in data.items()}
return {"py/dict": [[serialize(k), serialize(v)]
for k, v in data.items()]}
if isinstance(data, tuple):
return {"py/tuple": [serialize(val) for val in data]}
if isinstance(data, set):
return {"py/set": [serialize(val) for val in data]}
if isinstance(data, np.ndarray):
return {"py/numpy.ndarray": {
"values": data.tolist(),
"dtype": str(data.dtype)}}
raise TypeError("Type %s not data-serializable" % type(data))
def restore(dct):
"""
"""
if "py/dict" in dct:
return dict(dct["py/dict"])
if "py/tuple" in dct:
return tuple(dct["py/tuple"])
if "py/set" in dct:
return set(dct["py/set"])
if "py/collections.namedtuple" in dct:
data = dct["py/collections.namedtuple"]
return namedtuple(data["type"], data["fields"])(*data["values"])
if "py/numpy.ndarray" in dct:
data = dct["py/numpy.ndarray"]
return np.array(data["values"], dtype=data["dtype"])
if "py/collections.OrderedDict" in dct:
return OrderedDict(dct["py/collections.OrderedDict"])
return dct
def data_to_json(data):
"""
"""
return json.dumps(serialize(data))
def json_to_data(s):
"""
"""
return json.loads(s, object_hook=restore)
def nested_equal(v1, v2):
"""
Compares two complex data structures.
This handles the case where numpy arrays are leaf nodes.
"""
if isinstance(v1, str) or isinstance(v2, str):
return v1 == v2
if isinstance(v1, np.ndarray) or isinstance(v2, np.ndarray):
return np.array_equal(v1, v2)
if isinstance(v1, dict) and isinstance(v2, dict):
return nested_equal(v1.items(), v2.items())
if isinstance(v1, Iterable) and isinstance(v2, Iterable):
return all(nested_equal(sub1, sub2) for sub1, sub2 in zip(v1, v2))
return v1 == v2
|
{"hexsha": "8a16521c64451b06ae01c0293d0fd203c1fc9597", "size": 24726, "ext": "py", "lang": "Python", "max_stars_repo_path": "open_worm_analysis_toolbox/prefeatures/basic_worm.py", "max_stars_repo_name": "suzil/open-worm-analysis-toolbox", "max_stars_repo_head_hexsha": "ea1d3cddcefb6724e3d531cbced32092ac431411", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 35, "max_stars_repo_stars_event_min_datetime": "2016-03-02T06:32:16.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-17T22:57:22.000Z", "max_issues_repo_path": "open_worm_analysis_toolbox/prefeatures/basic_worm.py", "max_issues_repo_name": "suzil/open-worm-analysis-toolbox", "max_issues_repo_head_hexsha": "ea1d3cddcefb6724e3d531cbced32092ac431411", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 80, "max_issues_repo_issues_event_min_datetime": "2015-12-30T21:55:57.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-13T10:38:47.000Z", "max_forks_repo_path": "open_worm_analysis_toolbox/prefeatures/basic_worm.py", "max_forks_repo_name": "suzil/open-worm-analysis-toolbox", "max_forks_repo_head_hexsha": "ea1d3cddcefb6724e3d531cbced32092ac431411", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 17, "max_forks_repo_forks_event_min_datetime": "2016-02-19T09:25:39.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-17T22:57:33.000Z", "avg_line_length": 33.9176954733, "max_line_length": 108, "alphanum_fraction": 0.5896627032, "include": true, "reason": "import numpy", "num_tokens": 5620}
|
def ConvertBaselineJson(siteUuidList):
userUuidCards = pd.DataFrame()
for siteUuid in tqdm(siteUuidList):
SiteuserUuidCards = GetBaselineJson([siteUuid])
if len(SiteuserUuidCards) == 0:
continue
columns = [col for col in SiteuserUuidCards if col.startswith('cardIds')]
melted = SiteuserUuidCards[columns + ['userUuid','enrollDate']].melt(id_vars=['userUuid','enrollDate'])
melted = melted.drop(columns=['variable'])
melted = melted.dropna(subset=['value'])
melted['cardType'] = melted.value.apply(lambda x:x.split('-')[0])
melted['cardFirstSix'] = melted.value.apply(lambda x:x.split('-')[1])
melted['cardLastFour'] = melted.value.apply(lambda x:x.split('-')[2])
melted['siteUuid'] = siteUuid
melted['merchantUuid'] = GetSiteInfo(siteUuid)['merchantUuid']
melted = melted.rename(columns = {'value':'cardId'})
userUuidCards = userUuidCards.append(melted,sort=False)
return userUuidCards
def flatten_json(y):
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(y)
return out
def GetAllVisibleSites():
"""
Return a list of sites
"""
import pandas as pd
SiteInfo = pd.read_csv('/Users/alessandroorfei/PycharmProjects/aggregate-incremental/resources/gas_merchant_service.csv')
SiteInfo = SiteInfo[(SiteInfo.visibility == "DEFAULT")].copy()
return list(SiteInfo['siteUuid'])
def GetBaselineJson(siteUuidList):
s3 = boto3.resource('s3')
if type(siteUuidList) != list:
siteUuidList = [siteUuidList]
AllSites = pd.DataFrame()
for siteUuid in tqdm(siteUuidList):
merchantUuid = GetSiteInfo(siteUuid)['merchantUuid']
content_object = s3.Object('data.upside-services.com',
'service-station/' + merchantUuid + '/' + siteUuid + '/data/analysis/baseline.json')
file_content = content_object.get()['Body'].read().decode('utf-8')
d = json.loads(file_content)
SiteuserUuidCards = pd.DataFrame()
for user in range(0, len(d['userBaselines'])):
d_flat = flatten_json(d['userBaselines'][user])
dat = json_normalize(d_flat)
SiteuserUuidCards['siteUuid'] = siteUuid
SiteuserUuidCards = SiteuserUuidCards.append(dat, ignore_index=True, sort=False)
AllSites = AllSites.append(SiteuserUuidCards, sort=False)
return AllSites
def GetIncremental(siteUuidList, StartDate, EndDate, userUuidList=[]):
"""
This function returns a dataframe of Incremental data for a siteUuid
parameters:
siteUuidList: site identifiers. e.g. ['e30a6caa-efdd-4d5d-92ad-010d1d158a35']
StartDate: string date, e.g. "2018-04-01"
EndDate: string date, e.g. "2018-10-31"
returns:
DataFrame with Incremental date converted to datetime
"""
import os
import pandas as pd
os.system('pip2 install --upgrade runbookcli')
os.chdir('/Users/alessandroorfei/Desktop/')
if type(siteUuidList) != list:
siteUuidList = [siteUuidList]
Incremental = pd.DataFrame()
for siteUuid in tqdm(siteUuidList):
incremental_downloader = 'runbook get_incremental prod ' + 'incremental_' + siteUuid + '.csv --sites ' + siteUuid
print(incremental_downloader)
os.system(incremental_downloader)
SiteIncremental = pd.read_csv('/Users/alessandroorfei/Desktop/' + 'incremental_' + siteUuid + '.csv')
SiteIncremental['date'] = pd.to_datetime(SiteIncremental.date)
SiteIncremental = SiteIncremental[(SiteIncremental.date >= pd.to_datetime(StartDate))
& (SiteIncremental.date <= pd.to_datetime(EndDate))].copy()
# Filter to permitted users
if userUuidList != []:
SiteIncremental = SiteIncremental[SiteIncremental.userUuid.isin(userUuidList)].copy()
os.system("rm /Users/alessandroorfei/Desktop/incremental_" + siteUuid + '.csv')
Incremental = Incremental.append(SiteIncremental, sort=False)
return Incremental
def GetMerchantSites(merchantUuidList, visibility="DEFAULT", processorType=[]):
"""
Return a list of sites
"""
import pandas as pd
SiteInfo = pd.read_csv('/Users/alessandroorfei/PycharmProjects/aggregate-incremental/resources/gas_merchant_service.csv')
SiteInfo = SiteInfo[(SiteInfo.merchantUuid.isin(merchantUuidList)) &
(SiteInfo.visibility == visibility)].copy()
if processorType != []:
SiteInfo = SiteInfo[SiteInfo.processorType.isin(processorType)].copy()
return list(SiteInfo['siteUuid'])
def GetpersonID(df, PersonIdentifiers):
df['personID'] = ""
for var in PersonIdentifiers:
df['personID'] = df['personID'] + df[var].astype('str')
return df['personID']
def GetSiteInfo(siteUuid, visibility = "DEFAULT"):
"""
Returns a dict object with info on the site
"""
import pandas as pd
SiteInfo = pd.read_csv('/Users/alessandroorfei/PycharmProjects/aggregate-incremental/resources/gas_merchant_service.csv')
SiteInfo = SiteInfo[(SiteInfo.siteUuid == siteUuid) &
(SiteInfo.visibility == visibility)].copy()
assert len(SiteInfo) == 1
SiteInfo = SiteInfo.to_dict(orient='records')[0]
return SiteInfo
def GetTransactions(siteUuidList, StartDate, EndDate, sourceTerminal=["All"]):
"""
This function returns a dataframe of Transaction data for siteUuid
parameters:
siteUuidList: list of site identifiers. e.g. ['e30a6caa-efdd-4d5d-92ad-010d1d158a35']
StartDate: string date, e.g. "2018-04-01"
EndDate: string date, e.g. "2018-10-31"
returns:
DataFrame with Transaction data plus TranTime and TranDate
"""
import boto3
import pandas as pd
from upside_core.transaction.datalake_dao import TransactionDataLakeDAO
from pandas.io.json import json_normalize # package for flattening json in pandas df
StartDate = pd.to_datetime(StartDate)
EndDate = pd.to_datetime(EndDate)
Transactions = pd.DataFrame()
for siteUuid in tqdm(siteUuidList):
transaction_dao = TransactionDataLakeDAO(tier='prod', s3_client=boto3.client('s3'))
SiteTransactions = transaction_dao.get(siteUuid, 'default', StartDate, EndDate)
SiteTransactions = json_normalize(SiteTransactions)
SiteTransactions['TranTime'] = pd.to_datetime(SiteTransactions['transactionTimestamp'])
SiteTransactions['TranDate'] = pd.to_datetime(SiteTransactions.TranTime.dt.date)
if sourceTerminal != ["All"]:
SiteTransactions = SiteTransactions[SiteTransactions.sourceTerminal.isin(sourceTerminal)].copy()
Transactions = Transactions.append(SiteTransactions, sort=False)
return Transactions
def GraphTranCounts(Transactions, StartDate, EndDate):
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
counts = pd.DataFrame(Transactions.TranDate.value_counts().sort_index())
counts = counts.reset_index()
counts = counts.rename(columns={'index': 'TranDate',
'TranDate': 'counts'})
spine = pd.DataFrame(pd.date_range(start=StartDate, end=EndDate), columns={'TranDate'})
fullcount = pd.merge(spine, counts, how='left', on='TranDate')
fullcount['counts'] = np.where(fullcount.counts.isnull(), 0, fullcount.counts)
fullcount = fullcount.set_index('TranDate')
ax = fullcount.plot()
ax.set_ylim(ymin=0)
return
def ImputeInsideTransactions(Transactions, processor, SalesTax):
"""
Returns a Series that is the inferred inside purchase amount
based on observing the tax and knowing the sales tax
"""
import numpy as np
if processor == "motiva":
Transactions['InsideAmount'] = np.where(Transactions.sourceTerminal == "INSIDE",
Transactions['total.amount'],
np.nan)
else:
Transactions['InsideAmount'] = np.where(Transactions.sourceTerminal == "INSIDE",
Transactions['tax.amount']/SalesTax,
np.nan)
return Transactions['InsideAmount']
def MatchOfferAndTx(Incremental, userUuidCards, Transactions):
import pandas as pd
Incremental = pd.merge(Incremental,
userUuidCards,
how='left',
on=['siteUuid', 'userUuid'])
# ## First Pass
Incremental = Incremental.drop(columns=['merchantUuid'])
# Pass Number 1 - Matchiing on: ['TranDate','siteUuid','cardId','total.amount','sourceTerminal'])
TotalMatches = pd.DataFrame()
MatchingIncPass1 = pd.DataFrame()
for i in [col for col in Incremental if col.startswith('cardIds_')]:
Matched = pd.merge(Incremental,
Transactions,
how='inner',
left_on=['date', 'siteUuid', i, 'totalRevenue', 'sourceTerminal'],
right_on=['TranDate', 'siteUuid', 'cardId', 'total.amount', 'sourceTerminal'])
MatchingIncPass1 = MatchingIncPass1.append(Matched, sort=False)
MatchingIncPass1 = MatchingIncPass1.drop_duplicates(subset=['transactionUuid'])
MatchingIncPass1 = MatchingIncPass1.drop_duplicates(subset=['offerUuid'])
TotalMatches = TotalMatches.append(MatchingIncPass1, sort=False)
TotalMatches = TotalMatches.drop_duplicates(subset=['transactionUuid'])
TotalMatches = TotalMatches.drop_duplicates(subset=['offerUuid'])
print(len(TotalMatches))
print(len(Incremental))
print("Failed to match on Pass 1 ", 1 - len(TotalMatches) / len(Incremental))
IncNotMatched_Pass1 = Incremental[~Incremental.offerUuid.isin(TotalMatches.offerUuid)].copy()
TransNotMatched_Pass1 = Transactions[~Transactions.transactionUuid.isin(TotalMatches.transactionUuid)].copy()
# ## Second Pass
# Pass2 - Matching on: ['TranDate','siteUuid','cardId','sourceTerminal']
MatchingIncPass2 = pd.DataFrame()
for i in [col for col in IncNotMatched_Pass1 if col.startswith('cardIds_')]:
Matched = pd.merge(IncNotMatched_Pass1,
TransNotMatched_Pass1,
how='inner',
left_on=['date', 'siteUuid', i, 'sourceTerminal'],
right_on=['TranDate', 'siteUuid', 'cardId', 'sourceTerminal'])
MatchingIncPass2 = MatchingIncPass2.append(Matched, sort=False)
MatchingIncPass2 = MatchingIncPass2.drop_duplicates(subset=['transactionUuid'])
MatchingIncPass2 = MatchingIncPass2.drop_duplicates(subset=['offerUuid'])
TotalMatches = TotalMatches.append(MatchingIncPass2, sort=False)
TotalMatches = TotalMatches.drop_duplicates(subset=['transactionUuid'])
TotalMatches = TotalMatches.drop_duplicates(subset=['offerUuid'])
print(len(TotalMatches))
print(len(Incremental))
print("Failed to match on Pass 2 ", 1 - len(TotalMatches) / len(Incremental))
IncNotMatched_Pass2 = Incremental[~Incremental.offerUuid.isin(TotalMatches.offerUuid)].copy()
TransNotMatched_Pass2 = Transactions[~Transactions.transactionUuid.isin(TotalMatches.transactionUuid)].copy()
# ## Third Pass
# Pass3 - exact match on date, siteUuid, cardId, sale amount
MatchingIncPass3 = pd.DataFrame()
IncNotMatched_Pass2 = IncNotMatched_Pass2.drop(columns=['sourceTerminal'])
for i in [col for col in IncNotMatched_Pass2 if col.startswith('cardIds_')]:
Matched = pd.merge(IncNotMatched_Pass2,
TransNotMatched_Pass2,
how='inner',
left_on=['date', 'siteUuid', i, 'totalRevenue'],
right_on=['TranDate', 'siteUuid', 'cardId', 'total.amount'])
MatchingIncPass3 = MatchingIncPass3.append(Matched, sort=False)
MatchingIncPass3 = MatchingIncPass3.drop_duplicates(subset=['transactionUuid'])
MatchingIncPass3 = MatchingIncPass3.drop_duplicates(subset=['offerUuid'])
TotalMatches = TotalMatches.append(MatchingIncPass3, sort=False)
TotalMatches = TotalMatches.drop_duplicates(subset=['transactionUuid'])
TotalMatches = TotalMatches.drop_duplicates(subset=['offerUuid'])
print(len(TotalMatches))
print(len(Incremental))
print("Failed to match on Pass 3 ", 1 - len(TotalMatches) / len(Incremental))
IncNotMatched_Pass3 = Incremental[~Incremental.offerUuid.isin(TotalMatches.offerUuid)].copy()
TransNotMatched_Pass3 = Transactions[~Transactions.transactionUuid.isin(TotalMatches.transactionUuid)].copy()
# ## Fourth Pass
# Pass4 - exact match on date, siteUuid, cardId, sale amount
MatchingIncPass4 = pd.DataFrame()
for i in [col for col in IncNotMatched_Pass3 if col.startswith('cardIds_')]:
Test = IncNotMatched_Pass3[i].str.split(pat="-", expand=True, n=2)
Test = Test.rename(columns={0: 'cardType_json',
1: 'cardFirstSix',
2: 'cardLastFour'})
Test = pd.concat([IncNotMatched_Pass3, Test], sort=False, axis=1)
if len(Test[Test[i].notnull()]) == 0:
continue
Matched = pd.merge(Test,
TransNotMatched_Pass3,
how='inner',
left_on=['date', 'siteUuid', 'cardFirstSix', 'cardLastFour', 'totalRevenue',
'sourceTerminal'],
right_on=['TranDate', 'siteUuid', 'cardFirstSix', 'cardLastFour', 'total.amount',
'sourceTerminal'])
MatchingIncPass4 = MatchingIncPass4.append(Matched, sort=False)
MatchingIncPass4 = MatchingIncPass4.drop_duplicates(subset=['transactionUuid'])
MatchingIncPass4 = MatchingIncPass4.drop_duplicates(subset=['offerUuid'])
TotalMatches = TotalMatches.append(MatchingIncPass4, sort=False)
TotalMatches = TotalMatches.drop_duplicates(subset=['transactionUuid'])
TotalMatches = TotalMatches.drop_duplicates(subset=['offerUuid'])
print(len(TotalMatches))
print(len(Incremental))
print("Failed to match on Pass 4 ", 1 - len(TotalMatches) / len(Incremental))
IncNotMatched_Pass4 = Incremental[~Incremental.offerUuid.isin(TotalMatches.offerUuid)].copy()
TransNotMatched_Pass4 = Transactions[~Transactions.transactionUuid.isin(TotalMatches.transactionUuid)].copy()
TotalMatches = TotalMatches[['siteUuid', 'userUuid', 'offerUuid', 'TranTime', 'transactionUuid']].copy()
return TotalMatches
|
{"hexsha": "30be711dbf3a72312c7ac5452375723749dae101", "size": 15037, "ext": "py", "lang": "Python", "max_stars_repo_path": "sample/helpers/helpers.py", "max_stars_repo_name": "aeorfei/aggregate-incremental", "max_stars_repo_head_hexsha": "aa63b6139d8edc335d6d789ff1a3eabea1ae33e5", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sample/helpers/helpers.py", "max_issues_repo_name": "aeorfei/aggregate-incremental", "max_issues_repo_head_hexsha": "aa63b6139d8edc335d6d789ff1a3eabea1ae33e5", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sample/helpers/helpers.py", "max_forks_repo_name": "aeorfei/aggregate-incremental", "max_forks_repo_head_hexsha": "aa63b6139d8edc335d6d789ff1a3eabea1ae33e5", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.0859598854, "max_line_length": 125, "alphanum_fraction": 0.6525902773, "include": true, "reason": "import numpy", "num_tokens": 3519}
|
theory T72
imports Main
begin
lemma "(
(\<forall> x::nat. \<forall> y::nat. meet(x, y) = meet(y, x)) &
(\<forall> x::nat. \<forall> y::nat. join(x, y) = join(y, x)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, meet(y, z)) = meet(meet(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(x, join(y, z)) = join(join(x, y), z)) &
(\<forall> x::nat. \<forall> y::nat. meet(x, join(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. join(x, meet(x, y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(x, join(y, z)) = join(mult(x, y), mult(x, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(join(x, y), z) = join(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(x, over(join(mult(x, y), z), y)) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. meet(y, undr(x, join(mult(x, y), z))) = y) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(over(x, y), y), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. join(mult(y, undr(y, x)), x) = x) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. mult(meet(x, y), z) = meet(mult(x, z), mult(y, z))) &
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. over(x, meet(y, z)) = join(over(x, y), over(x, z))) &
(\<forall> x::nat. \<forall> y::nat. invo(join(x, y)) = meet(invo(x), invo(y))) &
(\<forall> x::nat. \<forall> y::nat. invo(meet(x, y)) = join(invo(x), invo(y))) &
(\<forall> x::nat. invo(invo(x)) = x)
) \<longrightarrow>
(\<forall> x::nat. \<forall> y::nat. \<forall> z::nat. undr(x, join(y, z)) = join(undr(x, y), undr(x, z)))
"
nitpick[card nat=4,timeout=86400]
oops
end
|
{"author": "inpefess", "repo": "residuated-binars", "sha": "330aaa51b6dbcaa06f8a6d15fe17c1ce58ae2a59", "save_path": "github-repos/isabelle/inpefess-residuated-binars", "path": "github-repos/isabelle/inpefess-residuated-binars/residuated-binars-330aaa51b6dbcaa06f8a6d15fe17c1ce58ae2a59/involution/task4/T72.thy"}
|
from .models import movies, meansize
import numpy as np
import pandas as pd
x = []
vectors = {}
y = []
ms = {}
def jaccard_sim(x, y):
if len(x) == len(y):
inter = np.logical_and(x,y)
union = np.logical_or(x,y)
similarity = inter.sum()/ float(union.sum())
# print similarity
return similarity
def comp_distance(x, y,i,id):
jacc_dist = jaccard_sim(x,y)
return jacc_dist
def getKNN(id, K):
distance = []
# print(vectors)
for i in vectors:
if i != id:
dist = comp_distance(vectors[i], vectors[id],i,id)
distance.append((i, dist))
distance = sorted(distance, key=lambda x: x[1], reverse=True)
neighbors = []
for j in range(K):
neighbors.append(distance[j][0])
return neighbors
def recommend(id):
df = pd.DataFrame(list(movies.objects.values_list('vectors','movie_id')))
df2 = pd.DataFrame(list(meansize.objects.values_list('movie_id','mean','size')))
# print(df)
y = df2.values.tolist()
for i in y:
ms[i[0]] = i[1], i[2]
x = df.values.tolist()
for i in x:
vectors[i[1]] = list(map(int,i[0].split('|')))
K = 500
neighbors = getKNN(id, K)
real_neighbors = []
for i in neighbors:
real_neighbors.append((i, ms[i][0] + ms[i][1]))
real_neighbors = list(sorted(real_neighbors, key=lambda x: x[1], reverse=True))
real=[]
for i in real_neighbors:
real.append(i[0])
# print real
return real
|
{"hexsha": "7b67fa27d85fd16b5ccefcacf688f4e98003b7d7", "size": 1355, "ext": "py", "lang": "Python", "max_stars_repo_path": "maps/recommendation.py", "max_stars_repo_name": "OrangeKing/pyRecommender", "max_stars_repo_head_hexsha": "b572c6cf0e4efe6e78f0b42c27316b26206689cd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "maps/recommendation.py", "max_issues_repo_name": "OrangeKing/pyRecommender", "max_issues_repo_head_hexsha": "b572c6cf0e4efe6e78f0b42c27316b26206689cd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-03-18T21:24:52.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:33:57.000Z", "max_forks_repo_path": "maps/recommendation.py", "max_forks_repo_name": "OrangeKing/pyRecommender", "max_forks_repo_head_hexsha": "b572c6cf0e4efe6e78f0b42c27316b26206689cd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.9661016949, "max_line_length": 81, "alphanum_fraction": 0.6501845018, "include": true, "reason": "import numpy", "num_tokens": 387}
|
from OkDatabase import OkDatabase
from Database import Database
from InstagramDatabase import InstagramDatabase
import numpy as np
import numpy.lib.recfunctions as recfunctions
class OkInitializer(object):
def add_friends_by_link(self, user_id, reference):
if ('ok.ru/profile/' in reference):
us_split = reference.split('profile/')
ok_id = us_split[1]
# ok_nickname = ok_nickname.replace("/", "")
friends, ok_us_id = self.get_friends(ok_id)
# проверка на ограничения юзера
if (friends != None and len(friends) != 0):
db = OkDatabase()
db.add_ok_user_to_db(user_id, ok_us_id, friends)
print('added to db')
else:
print('private account')
def get_friends(self, id):
return 0
def get_friends_on_site(self, us_id):
db = Database()
proposed_fr_list = []
name_vk_table_with_friends = db.get_table_name_by_user_id(us_id)
if (name_vk_table_with_friends != None):
friends_on_site_vk = db.get_friends_us_id_by_table_name(name_vk_table_with_friends)
if (friends_on_site_vk != None):
proposed_fr_list = db.check_users_in_confidence_list(friends_on_site_vk, us_id)
inst_db = InstagramDatabase()
proposed_fr_list_inst = []
name_vk_table_with_friends = inst_db.get_table_name_by_user_id(us_id)
if (name_vk_table_with_friends != None):
friends_on_site_inst = inst_db.get_friends_us_id_by_table_name(name_vk_table_with_friends)
if (friends_on_site_inst != None):
proposed_fr_list_inst = inst_db.check_users_in_confidence_list(friends_on_site_inst, us_id)
ok_db = OkDatabase()
proposed_fr_list_ok = []
name_ok_table_with_friends = ok_db.get_table_name_by_user_id(us_id)
if (name_ok_table_with_friends != None):
friends_on_site_ok = ok_db.get_friends_us_id_by_table_name(name_vk_table_with_friends)
if (friends_on_site_ok != None):
proposed_fr_list_ok = ok_db.check_users_in_confidence_list(friends_on_site_ok, us_id)
# outer join proposed_fr_list & proposed_fr_list_inst
if (len(proposed_fr_list) > 0 or len(proposed_fr_list_inst) > 0 or len(proposed_fr_list_ok) > 0):
friends_array = self.get_joined_array(proposed_fr_list, proposed_fr_list_inst, proposed_fr_list_ok)
push_list = []
if (friends_array != None):
if (len(friends_array) > 0):
push_list, us_id = db.push_notifications(us_id, friends_array)
print(friends_array)
return friends_array, push_list, us_id
return None, None, us_id
def get_joined_array(self, arr1, arr2, arr3):
# https://stackoverflow.com/questions/23500754/numpy-how-to-outer-join-arrays
if (len(arr1) > 0):
box1 = arr1
if len(arr2) > 0:
box2 = arr2
else:
box2 = [box1[0]]
if len(arr3) > 0:
box3 = arr3
else:
box3 = [box1[0]]
else:
if len(arr2) > 0:
box2 = arr2
box1 = [box2[0]]
if len(arr3) > 0:
box3 = arr3
else:
box3 = [box2[0]]
else:
if len(arr3) > 0:
box3 = arr3
box2 = [box3[0]]
box1 = [box3[0]]
else:
return None
a3 = np.array(box1, dtype=[('col1', np.int8)])
a2 = np.array(box2, dtype=[('col1', np.int8)])
a1 = np.array(box3, dtype=[('col1', np.int8)])
result = a1
for a in (a2, a3):
cols = list(set(result.dtype.names).intersection(a.dtype.names))
result = recfunctions.join_by(cols, result, a, jointype='outer')
pr_fr_l = []
for item in result:
pr_fr_l.append(item[0])
print(pr_fr_l)
return pr_fr_l
db = OkDatabase()
# db.create_table_ok()
|
{"hexsha": "b2ed2e6b7618040122f175cc1672d5efc8746a65", "size": 4218, "ext": "py", "lang": "Python", "max_stars_repo_path": "OkInitializer.py", "max_stars_repo_name": "vanavski/vk_inst_ok_mutual_friends", "max_stars_repo_head_hexsha": "6e8e1ff52222656af6186148977d6e0972ac789e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "OkInitializer.py", "max_issues_repo_name": "vanavski/vk_inst_ok_mutual_friends", "max_issues_repo_head_hexsha": "6e8e1ff52222656af6186148977d6e0972ac789e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "OkInitializer.py", "max_forks_repo_name": "vanavski/vk_inst_ok_mutual_friends", "max_forks_repo_head_hexsha": "6e8e1ff52222656af6186148977d6e0972ac789e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.7142857143, "max_line_length": 111, "alphanum_fraction": 0.5806069227, "include": true, "reason": "import numpy", "num_tokens": 996}
|
```python
import holoviews as hv
hv.extension('bokeh')
hv.opts.defaults(hv.opts.Curve(width=500),
hv.opts.Image(width=500, colorbar=True, cmap='Viridis'))
```
```python
import numpy as np
import scipy.signal
import scipy.fft
from IPython.display import Audio
```
# Análisis de señales no estacionarias utilizando espectrogramas
## ¿Qué ocurre con el espectro de una señal si su frecuencia cambia en el tiempo?
Consideremos este ejemplo sencillo de una señal donde la frecuencia cambia abruptamente en un tiempo determinado
$$
s(t) = \begin{cases}\cos(2\pi f_1 t) & t <0 \\ \cos(2\pi f_2 t) & t \geq 0 \end{cases}
$$
Sea por ejemplo $f_1=440$ Hz y $f_2 = 220$ Hz. Si la graficamos:
```python
f1, f2, Fs = 440, 220, 44100
t = np.arange(-0.5, 0.5, step=1/Fs)
N = len(t)
s = np.concatenate((np.cos(2.0*np.pi*f1*t[:N//2]),
np.cos(2.0*np.pi*f2*t[N//2:])))
```
```python
hv.Curve((t, s), 'Tiempo [s]', 'Señal').opts(xlim=(-0.05, 0.05))
```
y si la escuchamos
```python
Audio(s, rate=Fs)
```
Si calculamos la FFT de la señal para obtener su espectro tenemos que
```python
f = scipy.fft.rfftfreq(n=N, d=1./Fs)
S = np.absolute(scipy.fft.rfft(s))
```
```python
hv.Curve((f, S), 'Frecuencia [Hz]', 'Espectro').opts(xlim=(0, 1000))
```
La DFT/FFT nos entrega un "resumen" de todas las frecuencias en la señal
:::{important}
No es posible diferenciar una señal donde ambas frecuencias ocurren al mismo tiempo de otra donde las frecuencias aparecen en tiempos distintos
:::
En general la transformada de Fourier asume que la señal de interés es **estacionaria**. Una señal estacionaria es aquella cuyas propiedades y momentos estadísticos se mantienen en el tiempo
## Frecuencia instantanea
Hasta ahora hemos estudiando señales donde la frecuencia es constante en el tiempo
Definamos la **frecuencia instantanea** como la tasa de cambio del ángulo (fase) en función del tiempo
$$
f(t) = \frac{1}{2\pi} \frac{d \phi (t)}{dt}
$$
Por ejemplo señal sinusoidal con una frecuencia que cambia en el tiempo sería entonces
$$
s(t) = A \cos( \phi(t) ) = A \cos \left(2\pi \int_0^t f(\tau) d\tau + \phi(0) \right)
$$
de donde podemos notar que si la frecuencia fuera constante, es decir $f(t) = f_0$ $\forall t$, entonces $\int_0^t f(\tau) d\tau = t f_0$ y recuperamos $A\cos(2\pi t f_0 + \phi)$
En cualquier caso donde $f(t)$ no es constante estaremos ante una señal no estacionaria
A continuación dos ejemplos de señales donde la frecuencia cambia con el tiempo
- El chirrido: señal cuya frecuencia cambia entre dos valores
- El vibrato: señal que está modulada en frecuencia por otra señal
### Chirrido o *Chirp*
> Loica (Sturnella loyca). Referencia: http://www.conserva.cl/2009/09/sonidos-de-aves-de-chile-loica.html
Un *chirp* es una señal cuya frecuencia varía suavemente entre un primer valor $f_0$ y un segundo valor $f_1$. Por ejemplo esta variación podría seguir una forma lineal
$$
f(t) = f_{0} + (f_{1} - f_{0}) \frac{(t - t_{0})}{(t_{1} - t_{0})},
$$
donde $t_0$ y $t_1$ son los tiempos en que la señal oscila a $f_0$ y $f_1$, respectivamente. También se puede usar una forma no lineal, por ejemplo cuadrática o exponencial
Los *chirp* se usan como modelo en aplicaciones asociadas a radar y sonar. También se han usado para modelar el canto de algunas aves con el objetivo de hacer identificación automática
Podemos crear un chirrido sintético con `scipy` usando
```python
scipy.signal.chirp(t, # Un vector de tiempos
f0, # La frecuencia en el tiempo t=0
t1, # El tiempo en el cual f=f1
f1, # La frecuencia para el tiempo t=t1
method='linear', # Otras opciones disponibles: 'quadratic', 'logarithmic' o 'hyperbolic'
...
)
```
```python
f0, f1, Fs = 4000, 2000, 44100
t = np.arange(0, 0.5, step=1./Fs);
s = 0.1*scipy.signal.chirp(t, f0=f0, f1=f1, t1=t[-1], method='quadratic')
```
En este ejemplo la frecuencia cambia cuadraticamente
```python
hv.Curve((t, f0 + (f1 - f0)*(t/t[-1])**2), 'Tiempo [s]', 'Frecuencia [Hz]')
```
El resultado sonoro se muestra a continuación
```python
Audio(s, rate=Fs, normalize=False)
```
### Frecuencia Modulada (FM)
La FM es una tecnología para guardar información en la frecuencia de una onda electromagnética. Es un tipo de **codificación** que se usa mucho en transmisiones de radio.
- La onda electromagnética se llama **señal portadora**. En radio corresponde a una sinusoide con una frecuencia central en el rango de 88 a los 108 [MHz]
- La información se llama **señal modulada**. En radio corresponde tipicamente a una señal de voz o a una canción, es decir que está en el rango de los 20 [Hz] a 20 [kHz] (rango audible humano)
Una señal en el rango audible puede viajar algunos metros. En cambio, si va codificada en la señal portadora puede viajar cerca de 50 km
El siguiente esquema muestra la operación que realiza una estación de radio que transmite señales
La radio que recibe la señal debe realizar el proceso inverso, es decir decodificar la información a partir de la frecuencia de la señal que está recepcionando
Matemáticamente la señal modulada $s_m(t)$ modifica la frecuencia central $f_c$ de la señal portadora como sigue
$$
\begin{align}
s(t) & = A_c \cos \left(2\pi \int_0^t \left(f_c + K s_m(\tau) \right) d\tau \right) \nonumber \\
&= A_c \cos \left(2\pi f_c t + 2\pi K \int_0^t s_m(\tau) d\tau \right), \nonumber
\end{align}
$$
donde $K$ es el coeficiente de modulación y $s(t)$ es la señal que finalmente viaja por el medio
Cada estación de radio transmite su información $s_m(t)$ usando una frecuencia portadora $f_c$ distinta para no traslaparse
### Vibrato
Un [vibrato](https://es.wikipedia.org/wiki/Vibrato) es un efecto musical que consiste en variar periódicamente el tono de una nota.
Un violinista logra este efecto presionando una cuerda y luego moviendo su dedo de forma regular como muestra la siguiente animación (en cámara lenta)
Podemos considerar el vibrato como un caso particular de modulación de frecuencia. Si consideremos sólo tonos puros podríamos definir $s_m(t) = \cos(2\pi f_m t)$, con lo que nos queda la siguiente señal
$$
s(t) = A_c \cos \left(2\pi f_c t + \frac{K}{f_m} \sin(2\pi f_m t) \right),
$$
De la expresión tenemos que
- $f_c$ es la frecuencia o tono central
- $f_m$ es la velocidad a la que cambia el tono central
- $K/f_m$ es la amplitud del cambio del tono cnetral
Podemos implementar un vibrato usando
```python
A_c, K, f_c, f_m, Fs = 1, 50, 220, 8, 44100
t = np.arange(0, 2, step=1/Fs)
sm = np.cos(2.0*np.pi*f_m*t)
s = A_c*np.cos(2.0*np.pi*f_c*t + (K/f_m)*np.sin(2.0*np.pi*f_m*t))
```
La frecuencia de la portadora (azul) aumenta con la amplitud de la modulada (roja)
```python
p1 = hv.Curve((t, s), 'Tiempo[s]', 'Señal', label='Portadora').opts(alpha=0.75)
p2 = hv.Curve((t, sm), 'Tiempo[s]', 'Señal', label='Modulada')
(p1 * p2).opts(hv.opts.Curve(xlim=(0, 0.2)))
```
```python
Audio(s, rate=Fs)
```
## Representación en tiempo y frecuencia
Para estudiar una señal cuya frecuencia cambia en el tiempo debemos estudiar la evolución temporal de su espectro. La herramienta más utilizada para esto se llama espectrograma
El **espectrograma** es una representación visual de la energía de la señal distribuida tanto en el tiempo y en la frecuencia. Es decir que es una representación bidimensional.
La siguiente imagen muestra un espectrograma de una señal de habla humana, una señal altamente no estacionaria cuya frecuencia puede presentar cambios bruscos
Notar que:
- El eje horizontal representa tiempo (segundos)
- El eje vertical representa frecuencia (Hz)
- Se usa color para representar la intensidad energética
En la imagen se puede apreciar como el contenido energético cambia su distribución de forma notoria en los momentos de respiración (breath) y habla (speech). Muchos algoritmos actuales de reconocimiento de habla (por ejemplo redes neuronales artificiales) operan reconociendo patrones a partir del espectrograma.
:::{important}
A diferencia del espectro, el espectrograma nos permite estudiar los cambios de energía "instantáneos" de la señal
:::
### ¿Cómo se obtiene el espectrograma?
Para calcular el espectrograma se utiliza la *short-time Fourier transform* (STFT). Para el caso de una señal discreta la STFT se define como
$$
S[m, k] = \sum_{n} s[n] w[n-m] e^{-j \frac{2\pi}{N} nk}
$$
Notemos como la STFT tanto del tiempo (índice m) como de la frecuencia (índice k)
En la práctica la STFT consiste en
1. multiplicar la señal por una ventana localizada $w[n-m]$
2. calcular la FFT sobre esa ventana
Esto se repite para distintas ventanas como muestra el siguiente diagrama.
En la parte superior de la imagen, la linea azul es la señal y las lineas rojas son las ventanas desplazadas. En la parte inferior se muestra que de cada ventana desplazada se obtiene un espectro. Finalmente el espectrograma consiste en juntar los espectros de amplitud de cada ventana. Notemos que puede haber traslape entre las ventanas
:::{note}
Cuando utilizamos el espectrograma estamos asumiendo que la señal es "localmente" estacionaria. Es decir que es estacionaria dentro de la ventana.
:::
## Espectrograma en Python
Podemos usar la función de `scipy.signal.spectrogram` cuyos parámetros más relevantes son
```python
spectrogram(x, # Señal
fs=1.0, # Frecuencia de muestreo
window=('tukey', 0.25), # Tipo de ventana y parámetros de ventana
nperseg=None, # Ancho de la ventana en número de muestras
noverlap=None, # Cantidad de traslape, por defecto es 1/8 del largo de ventana
...
)
```
Esta función retorna una tupla con
- Un arreglo con las frecuencias del espectrograma de largo M
- Un arreglo con los tiempos de las ventanas de largo N
- Una matriz de MxN con los valores del espectrograma
Calculemos y visualicemos el espectrograma de la señal de ejemplo que vimos al principio de esta lección
```python
f1, f2, Fs = 440, 220, 44100
t = np.arange(0.0, 1.0, step=1/Fs)
N = len(t)
s = np.concatenate((np.cos(2.0*np.pi*f1*t[:N//2]),
np.cos(2.0*np.pi*f2*t[N//2:])))
freqs, times, Sxx = scipy.signal.spectrogram(s, fs=Fs, nperseg=1024)
```
```python
hv.Image((times, freqs, Sxx), kdims=['Tiempo [s]', 'Frecuencia [Hz]']).opts(ylim=(0, 1000))
```
:::{important}
Con el espectrograma podemos visualizar donde (temporalmente) ocurre cada una de las frecuencias de la señal
:::
A continuación profundizaremos en como seleccionar los argumentos del espectrograma utilizando algunos ejemplos
### Argumentos y trade-off del espectrograma
Para calcular el espectrograma debemos seleccionar
- un tipo o función de ventana, por ejemplo Hamming, Tukey o Kaiser
- un ancho de ventana, expresado tipicamente en número de muestras
- un traslape de ventana, expresado tipicamente en número de muestras o en porcentaje de traslape
En la lección de "Fuga espectral" vimos que la resolución frecuencial en el espectro depende fuertemente del número de muestras (ancho) de la ventana y el tipo o función de la ventana. En particular mientras más ancha es la ventana mejor es la resolución en el eje de frecuencia.
Sin embargo si la ventana es demasiado ancha no podremos identificar bien los eventos más breves o rápidos.
:::{warning}
En el espectrograma existe un compromiso (*trade-off*) entre la resolución en el tiempo y la resolución en frecuencia. No se puede mejorar una sin empeorar la otra.
:::
El siguiente esquema ejemplifica esta situación
Más adelante veremos como el largo de la ventana afecta el espectrograma con un ejemplo práctico
### Espectrograma de un vibrato
Implementemos nuevamente el vibrato con frecuencia instantanea
$$
f(t) = f_c + K \cos(2.0\pi f_m t)
$$
```python
A_c, K, f_c, f_m, Fs = 1, 25, 440, 8, 44100
t = np.arange(0, 1, step=1/Fs)
sm = np.cos(2.0*np.pi*f_m*t)
s = A_c*np.cos(2.0*np.pi*f_c*t + (K/f_m)*np.sin(2.0*np.pi*f_m*t))
window = ('kaiser', 6.)
```
A continuación se muestran tres espectrogramas con distinto largo de ventana. La linea roja punteada corresponde $f(t)$ en función de $t$. En todos los espectrogramas se usó una ventana de Kaiser con $\beta=6$
```python
def plot_spectrogram(Nw):
display(f'Ventana de {1000*Nw/Fs:0.4f} [ms]')
freqs, times, Sxx = scipy.signal.spectrogram(s, fs=Fs, nperseg=Nw, noverlap=Nw//1.5, window=window)
formula = hv.Curve((t, f_c + K*sm)).opts(color='r', line_dash='dashed')
spectrogram = hv.Image((times, freqs, Sxx), kdims=['Tiempo [s]', 'Frecuencia [Hz]']).opts(ylim=(300, 600))
return spectrogram * formula
```
```python
plot_spectrogram(512)
```
**Ventana muy angosta:** Resolución temporal superior (eje horizontal) pero gran dispersión en la frecuencia (eje vertical)
```python
plot_spectrogram(8192)
```
**Ventana muy ancha:** Resolución frecuencial superior (eje vertical) pero gran dispersión en el tiempo (eje horizontal)
```python
plot_spectrogram(2048)
```
**Compromiso:** La ventana de 46 [ms] parece presentar un mejor compromiso entre resolución temporal y frecuencial.
:::{important}
El mejor compromiso depende de la tasa de cambio temporal y frecuencial de la señal en particular. No existe una receta para escoger el tamaño de ventana. Lo mejor es siempre probar y estudiar los resultados.
:::
### Espectrograma de una señal de voz
Consideremos la siguiente señal de voz humana y su espectrograma
Utilizaremos la librería `librosa` para cargar el archivo de audio en memoria
```python
import librosa
data, Fs = librosa.load("../../data/123.ogg")
time = np.arange(0.0, len(data)/Fs, step=1/Fs)
display(Audio(data, rate=Fs))
hv.Curve((time, data), 'Tiempo [s]', 'Señal')
```
```python
Nw = 2048
freqs, times, Sxx = scipy.signal.spectrogram(data, fs=Fs, window=('kaiser', 6),
nperseg=Nw, noverlap=Nw//1.5)
```
```python
hv.Image((times, freqs, 10*np.log10(Sxx+1e-10)),
kdims=['Tiempo [s]', 'Frecuencia [Hz]']).opts(ylim=(0, 2000))
```
Si comparamos con la señal de audio que graficamos antes podemos observar que
- Cada vocal tiene un tono y una distribución de energía distintos
- A diferencia de una señal sintética la voz humana es rica en armónicos
- A diferencia de una señal sintética hay ruido blanco que contamina el espectrograma
Notar que no graficamos la energía si no su logaritmo en base diez. Aplicar `log10` es muy usual para visualizar el espectro en señales de habla ya que su energía varía en un rango muy amplio
```python
```
|
{"hexsha": "acf1df2a22d1303e03211cb43638a1b17cd497a0", "size": 24264, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "lectures/unit2/lecture1.ipynb", "max_stars_repo_name": "phuijse/UACH-INFO183", "max_stars_repo_head_hexsha": "0e1b6bef0bd80cda2753bd11e62016268f2de638", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2018-08-27T23:53:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-16T23:31:05.000Z", "max_issues_repo_path": "lectures/unit2/lecture1.ipynb", "max_issues_repo_name": "phuijse/UACH-INFO183", "max_issues_repo_head_hexsha": "0e1b6bef0bd80cda2753bd11e62016268f2de638", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lectures/unit2/lecture1.ipynb", "max_forks_repo_name": "phuijse/UACH-INFO183", "max_forks_repo_head_hexsha": "0e1b6bef0bd80cda2753bd11e62016268f2de638", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2019-01-04T17:43:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-07T16:07:18.000Z", "avg_line_length": 31.4708171206, "max_line_length": 347, "alphanum_fraction": 0.5783877349, "converted": true, "num_tokens": 4509}
|
mutable struct Tree{T}
value::T
lchild::Nullable{Tree{T}}
rchild::Nullable{Tree{T}}
end
function replaceall!(t::Tree{T}, v::T) where T
t.value = v
isnull(lchild) || replaceall(get(lchild), v)
isnull(rchild) || replaceall(get(rchild), v)
return t
end
|
{"hexsha": "0af70095a881310b09bdc4ca797dc566d92e694c", "size": 279, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "lang/Julia/parametric-polymorphism.jl", "max_stars_repo_name": "ethansaxenian/RosettaDecode", "max_stars_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lang/Julia/parametric-polymorphism.jl", "max_issues_repo_name": "ethansaxenian/RosettaDecode", "max_issues_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lang/Julia/parametric-polymorphism.jl", "max_forks_repo_name": "ethansaxenian/RosettaDecode", "max_forks_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.4615384615, "max_line_length": 48, "alphanum_fraction": 0.6379928315, "num_tokens": 88}
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import timeit
from sklearn.linear_model import Ridge
from efficient_learning.model import *
FILE = 'data/movie_reviews_use.pkl'
### load data
def load_data():
df = pd.read_pickle(FILE)
X = np.array(df["document_features"].tolist())
Y = np.array(df["labels"].tolist())
X_tr, X_te, Y_tr, Y_te = train_test_split(X, Y, test_size=0.25, random_state=64)
return X_tr, X_te, Y_tr, Y_te
if __name__ == '__main__':
X_tr, X_te, Y_tr, Y_te = load_data()
## sklearn Ridge
start_time = timeit.default_timer()
clf = Ridge(alpha=0.001)
for i in range(1, X_tr.shape[0]):
clf.fit(X_tr, Y_tr)
print("Sklearn Ridge AUC : {}".format(roc_auc_score(Y_te, clf.predict(X_te))))
print("Total time: {} s".format(timeit.default_timer() - start_time))
print("#" * 10)
## LS-SVM
start_time = timeit.default_timer()
clf = LSSVM(lambda_p=0.001)
for i in range(1, X_tr.shape[0]):
clf.fit(X_tr[:i], Y_tr[:i])
clf.fit(X_tr, Y_tr)
print("Full LS-SVM : {}".format(roc_auc_score(Y_te, clf.predict(X_te))))
print("Total time: {} s".format(timeit.default_timer() - start_time))
print("#" * 10)
## Incremental LS-SVM SMW
start_time = timeit.default_timer()
clf = LSSVM(lambda_p=0.001)
clf.fit(X_tr[:2], Y_tr[:2])
for i in range(2, X_tr.shape[0]):
clf.fit_update(np.expand_dims(X_tr[i], axis=0), Y_tr[i])
print("Incremental LS-SVM SMW : {}".format(roc_auc_score(Y_te, clf.predict(X_te))))
print("Total time: {} s".format(timeit.default_timer() - start_time))
print("#" * 10)
## Incremental LS-SVM Poly
start_time = timeit.default_timer()
clf = LSSVMPOLY(lambda_p=0.001)
clf.fit(X_tr[:2], Y_tr[:2])
for i in range(2, X_tr.shape[0]):
clf.fit_update(np.expand_dims(X_tr[i], axis=0), Y_tr[i])
print("Incremental LS-SVM Poly : {}".format(roc_auc_score(Y_te, clf.predict(X_te))))
print("Total time: {} s".format(timeit.default_timer() - start_time))
print("#" * 10)
## Incremental LS-SVM SVD
start_time = timeit.default_timer()
clf = LSSVMSVD(lambda_p=0.001)
clf.fit(X_tr[:2], Y_tr[:2])
for i in range(2, X_tr.shape[0]):
clf.fit_update(np.expand_dims(X_tr[i], axis=0), Y_tr[i])
print("Incremental LS-SVM SVD : {}".format(roc_auc_score(Y_te, clf.predict(X_te))))
print("Total time: {} s".format(timeit.default_timer() - start_time))
print("#" * 10)
|
{"hexsha": "0d1d99799ac5871d76284ca3962673bde5d3a8bc", "size": 2413, "ext": "py", "lang": "Python", "max_stars_repo_path": "bench_speed.py", "max_stars_repo_name": "tkanchin/adaptive_reg_active_learning", "max_stars_repo_head_hexsha": "2526eace737a372a3e196cbe2c3f40afa62d2304", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bench_speed.py", "max_issues_repo_name": "tkanchin/adaptive_reg_active_learning", "max_issues_repo_head_hexsha": "2526eace737a372a3e196cbe2c3f40afa62d2304", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bench_speed.py", "max_forks_repo_name": "tkanchin/adaptive_reg_active_learning", "max_forks_repo_head_hexsha": "2526eace737a372a3e196cbe2c3f40afa62d2304", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1625, "max_line_length": 85, "alphanum_fraction": 0.6912556983, "include": true, "reason": "import numpy", "num_tokens": 745}
|
From mathcomp Require Import all_ssreflect.
Require Import Util Types Term Typing.
Lemma typed_rename M G t T :
typed M G t T ->
forall G' r,
(forall d x, nth d G x = nth d G' (r x)) ->
typed M G' (rename r t) T.
Proof.
induction 1 => /= ? ? Hnth; eauto.
- apply /typed_var => // ?.
by rewrite -Hnth H.
- apply /typed_abs; eauto.
by apply /IHtyped => ? [ | ? ] /=.
- apply /typed_let; eauto.
+ by rewrite value_rename.
+ by apply /IHtyped => ? [ | ? ] /=.
Qed.
Lemma typed_subst M G t T :
typed M G t T ->
forall s G',
( forall x T, (forall d, nth d G x = T) ->
exists (L : seq _), forall s',
(forall x, exists2 y, s' x = typ_fvar y & y \notin L) ->
typed M G' (s x) (typ_open s' T) ) ->
typed M G' (subst s t) T.
Proof.
induction 1 => /= s0 G' Hnth; eauto.
- move: (Hnth _ _ H) => [ L Hs' ].
rewrite -(env_subst_fvar M) -(env_subst_fvar G') -(typ_subst_fvar T)
-!(env_subst_ext (fun x => if x <= maximum (env_enum_fv M (env_enum_fv G' (typ_enum_fv T L))) then typ_fvar x else s (x - maximum (env_enum_fv M (env_enum_fv G' (typ_enum_fv T L)))).-1) typ_fvar) => [ | ? Hin | ? Hin ].
+ rewrite -(typ_subst_ext (fun x => if x <= maximum (env_enum_fv M (env_enum_fv G' (typ_enum_fv T L))) then typ_fvar x else s (x - maximum (env_enum_fv M (env_enum_fv G' (typ_enum_fv T L)))).-1)) => [ | ? Hin ].
{ rewrite -(typ_open_ext (typ_subst (fun x => if x <= maximum (env_enum_fv M (env_enum_fv G' (typ_enum_fv T L))) then typ_fvar x else s (x - maximum (env_enum_fv M (env_enum_fv G' (typ_enum_fv T L)))).-1) \o (typ_fvar \o addn (maximum (env_enum_fv M (env_enum_fv G' (typ_enum_fv T L)))).+1))) => [ | ? /typ_bv_subst_elim [ ? | [ z ] ] /= ].
- rewrite -typ_subst_open_distr => [ | z ? ? ].
+ apply /typed_subst_typ => [ | z ? ].
* apply Hs' => y. repeat eexists. apply /negP => Hin.
have : (maximum (env_enum_fv M (env_enum_fv G' (typ_enum_fv T L)))).+1 + y <= (maximum (env_enum_fv M (env_enum_fv G' (typ_enum_fv T L)))).
{ apply /maximum_sup.
by rewrite !env_enum_fv_inE_aux typ_enum_fv_inE_aux Hin !orbT. }
by rewrite addSn ltnNge leq_addr.
* by rewrite 3!fun_if /= H0 if_same.
+ by rewrite 3!fun_if /= H0 if_same.
- by rewrite addSn ltnNge leq_addr /= subSKn addnC addnK.
- by rewrite 3!fun_if /= H0 if_same. }
by rewrite maximum_sup // !env_enum_fv_inE_aux typ_enum_fv_inE_aux Hin !orbT.
+ by rewrite maximum_sup // !env_enum_fv_inE_aux typ_enum_fv_inE_aux Hin !orbT.
+ by rewrite maximum_sup // !env_enum_fv_inE_aux typ_enum_fv_inE_aux Hin.
- apply /typed_abs; eauto.
apply /IHtyped => [ [ ? /(_ (typ_fvar 0)) /= -> | ? ? /= /Hnth [ L ? ] ] ].
+ exists [::] => ? Hs. apply /typed_var => //= x. by case (Hs x) => ? ->.
+ exists L => ? ?. apply /typed_rename; eauto.
- apply /typed_let; eauto.
+ exact /value_subst.
+ apply /IHtyped => [ [ ? /(_ (typ_fvar 0)) /= -> | ? ? /= /Hnth [ L' ? ] ] ].
* exists L => ? Hs. apply /typed_var => //= x. by case (Hs x) => ? ->.
* exists L' => ? ?. apply /typed_rename; eauto.
Qed.
Corollary typed_subst_single M G t t0 T T0 :
typed M (T0 :: G) t T ->
forall (L : seq _),
(forall s',
(forall x, exists2 y, s' x = typ_fvar y & y \notin L) ->
typed M G t0 (typ_open s' T0)) ->
typed M G (subst (scons t0 var) t) T.
Proof.
move => /typed_subst Hsubst ? ?.
apply /Hsubst => [ [ ? /(_ typ_unit) <- /= | /= ? ? ? ] ]; eauto.
exists [::] => ? Hs.
apply /typed_var => // x.
by case (Hs x) => ? ->.
Qed.
Lemma preservation :
forall H M G,
size H = size M ->
(forall i, typed M G (nth trm_unit H i) (nth typ_unit M i)) ->
forall t H' t',
cbv H t H' t' ->
forall T,
typed M G t T ->
exists M', typed M' G t' T /\
(forall x T, (forall d, nth d M x = T) -> forall d, nth d M' x = T) /\
size H' = size M' /\
(forall i, typed M' G (nth trm_unit H' i) (nth typ_unit M' i)).
Proof.
induction 3; inversion 1; subst; eauto.
- inversion H7. subst.
exists M. repeat split; eauto.
apply /(typed_subst_single _ _ _ _ _ _ H11 [::]) => ? Hs.
rewrite typ_open_bvar_eq => // ?.
by rewrite H10.
- move: (IHcbv _ H7) => [ M' [ ? [ ? [ ? ? ] ] ] ].
exists M'. repeat split; eauto.
apply /typed_app; eauto.
apply /(typed_weaken_store M); eauto.
- move: (IHcbv _ H10) => [ M' [ ? [ ? [ ? ? ] ] ] ].
exists M'. repeat split; eauto.
apply /typed_app; eauto.
apply /(typed_weaken_store M); eauto.
- have ? : forall x T,
(forall d, nth d M x = T) ->
(forall d, nth d (rcons M T0) x = T).
{ move => x T Hnth d.
by move: nth_rcons (leqP (size M) x)
(Hnth (if x == size M then T0 else d)) => -> [ /(nth_default _) -> | ]. }
exists (rcons M T0).
(repeat split; eauto) => [ | | l ].
+ apply /typed_loc => /= [ | ? ].
* apply /typed_closed. eauto.
* by rewrite nth_rcons H0 ltnn eqxx.
+ by rewrite !size_rcons H0.
+ rewrite !nth_rcons -H0.
case (ltngtP (size H) l) => ? //;
apply /(typed_weaken_store M); eauto.
- move: (IHcbv _ H6) => [ ? [ ? [ ? [ ? ? ] ] ] ]. eauto 6.
- inversion H6. subst.
rewrite -(H2 trm_unit) -(H9 typ_unit). eauto.
- move: (IHcbv _ H6) => [ ? [ ? [ ? [ ? ? ] ] ] ]. eauto 6.
- inversion H7. subst.
exists M. (repeat split; eauto) => [ | l' ].
+ rewrite size_set_nth -H0.
apply /maxn_idPr.
case (leqP (size H) l) => // Hleq.
move: (H10 typ_unit).
by rewrite -(H10 (typ_fvar 0)) !nth_default -?H0.
+ rewrite nth_set_nth /=.
case (@eqP _ l' l) => [ -> | ]; eauto.
by rewrite H10.
- move: (IHcbv _ H7) => [ M' [ ? [ ? [ ? ? ] ] ] ].
exists M'. repeat split; eauto.
apply /typed_update; eauto.
apply /typed_weaken_store; eauto.
- move: (IHcbv _ H10) => [ M' [ ? [ ? [ ? ? ] ] ] ].
exists M'. repeat split; eauto.
apply /typed_update; eauto.
apply /typed_weaken_store; eauto.
- exists M. repeat split; eauto.
exact /(typed_subst_single _ _ _ _ _ _ H10 L).
- by move: (cbv_value _ _ _ _ H2 H6).
Qed.
Lemma canonical_arrow : forall v M T1 T2,
value v ->
typed M [::] v (typ_arrow T1 T2) ->
exists t, v = trm_abs t.
Proof. case => //; inversion 2. eauto. Qed.
Lemma canonical_ref : forall v M T,
value v ->
typed M [::] v (typ_ref T) ->
exists l, v = trm_loc l.
Proof. case => //; inversion 2. eauto. Qed.
Lemma progress H M :
size H = size M ->
(forall i, typed M [::] (nth trm_unit H i) (nth typ_unit M i)) ->
forall t T,
typed M [::] t T ->
value t \/ exists H' t', cbv H t H' t'.
Proof.
move => Hsize Hstore t.
induction t; inversion 1; subst; eauto.
- move: (H2 typ_unit).
by rewrite -(H2 (typ_bvar 0)) !nth_default.
- move: (IHt1 _ H4) => [ /((canonical_arrow _ _ _ _)^~H4) [ ? -> ] | [ ? [ ? ? ] ] ]; eauto.
move: (IHt2 _ H6) => [ ? | [ ? [ ? ? ] ] ]; eauto.
- move: (IHt _ H3) => [ ? | [ ? [ ? ? ] ] ]; eauto.
- move: (IHt _ H3) => [ /((canonical_ref _ _ _)^~H3) [ l ? ] | [ ? [ ? ? ] ] ]; subst; eauto.
inversion H3. subst.
right. repeat eexists. apply /(cbv_deref _ _ (nth trm_unit H l)) => ?.
case (leqP (size H) l).
+ rewrite Hsize => ?.
move: (H6 typ_unit).
by rewrite -(H6 (typ_fvar 0)) !(@nth_default _ _ M).
+ exact /set_nth_default.
- move: (IHt1 _ H4) => [ /((canonical_ref _ _ _)^~H4) [ l ? ] | [ ? [ ? ? ] ] ]; subst; eauto.
inversion H4. subst.
move: (IHt2 _ H6) => [ ? | [ ? [ ? ? ] ] ]; eauto.
Qed.
|
{"author": "fetburner", "repo": "type-infer", "sha": "11192dd3f7385e98a33cb58ee53a47f882533618", "save_path": "github-repos/coq/fetburner-type-infer", "path": "github-repos/coq/fetburner-type-infer/type-infer-11192dd3f7385e98a33cb58ee53a47f882533618/theories/TypeSafety.v"}
|
from __future__ import absolute_import, division, print_function
from ctd import DataFrame, derive_cnv, lp_filter
from ctd.utilities import Path
import numpy as np
import pytest
data_path = Path(__file__).parent.joinpath('data')
@pytest.fixture
def load_spiked_ctd(name):
return DataFrame.from_cnv(data_path.joinpath(name))
# Split.
def test_split_return_tuple():
raw = load_spiked_ctd('CTD-spiked-unfiltered.cnv.bz2').split()
assert isinstance(raw, tuple)
def test_split_cnv():
cast = load_spiked_ctd('CTD-spiked-unfiltered.cnv.bz2')
downcast, upcast = cast.split()
assert (downcast.index.size + upcast.index.size == cast.index.size)
# Despike.
def test_despike():
# Looking at downcast only.
dirty = load_spiked_ctd('CTD-spiked-filtered.cnv.bz2')['c0S/m'].split()[0]
clean = dirty.despike(n1=2, n2=20, block=500)
spikes = clean.isnull()
equal = (dirty[~spikes] == clean[~spikes]).all()
assert (spikes.any() and equal)
# Filter.
def test_lp_filter():
kw = {
'sample_rate': 24.0,
'time_constant': 0.15,
}
expected = load_spiked_ctd('CTD-spiked-filtered.cnv.bz2').index.values
unfiltered = load_spiked_ctd('CTD-spiked-unfiltered.cnv.bz2').index.values
filtered = lp_filter(unfiltered, **kw)
# Caveat: Not really a good test...
np.testing.assert_almost_equal(
filtered,
expected,
decimal=1,
)
# Pressure check.
def test_press_check():
unchecked = load_spiked_ctd('CTD-spiked-unfiltered.cnv.bz2')['t090C']
press_checked = unchecked.press_check()
reversals = press_checked.isnull()
equal = (unchecked[~reversals] == press_checked[~reversals]).all()
assert (reversals.any() and equal)
def test_bindata():
delta = 1.
down = load_spiked_ctd('CTD-spiked-filtered.cnv.bz2')['t090C'].split()[0]
down = down.bindata(delta=delta)
assert (np.unique(np.diff(down.index.values)) == delta)
def test_derive_cnv():
cast = load_spiked_ctd('CTD-spiked-unfiltered.cnv.bz2')
cast.lat = cast['latitude'].mean()
cast.lon = cast['longitude'].mean()
derived = derive_cnv(cast)
new_cols = set(derived).symmetric_difference(cast.columns)
assert (['CT', 'SA', 'SP', 'SR', 'sigma0_CT', 'z'] == sorted(new_cols))
|
{"hexsha": "2c69ddb397ab4f3edd8eee74573a627ced8f713e", "size": 2288, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_processing.py", "max_stars_repo_name": "brorfred/python-ctd", "max_stars_repo_head_hexsha": "86e039ec1ab406283713d2bab8ef47dce1905d9c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_processing.py", "max_issues_repo_name": "brorfred/python-ctd", "max_issues_repo_head_hexsha": "86e039ec1ab406283713d2bab8ef47dce1905d9c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_processing.py", "max_forks_repo_name": "brorfred/python-ctd", "max_forks_repo_head_hexsha": "86e039ec1ab406283713d2bab8ef47dce1905d9c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9024390244, "max_line_length": 78, "alphanum_fraction": 0.6818181818, "include": true, "reason": "import numpy", "num_tokens": 638}
|
"""Use Wordshoal model to estimate ideology from Senate speeches.
Wordshoal [1] fits speeches in two stages. First, for speaker i at debate t,
the count of word j is denoted by y_{ijt} and is distributed:
y_{ijt} ~ Pois(exp(nu_{it} + lambda_{jt} + chi_{it} * b_{jt})),
where psi_{it} is the ideal point for debate t, b_{jt} is the polarity of
word j for debate t, and nu_{it} and lambda_{jt} are debate-specific speaker-
and word-intercepts.
Wordshoal includes a second stage factor model to aggregate ideal points across
debates. Factorizing the fitted psi_{it}, the model posits:
psi_{it} ~ Normal(alpha_t + x_i * beta_t, tau_i),
where x_i is now the aggregated ideal point and beta_t is the aggregated
polarity. The model includes Gaussian priors on the real-valued parameters, and
a Gamma prior on tau_i.
We fit both stages using variational inference with reparameterization
gradients, using a Gaussian variational family for all parameters except for
tau_i, where we use a log-normal due to the positivity constraint. We note that
this is different from the inference procedure used in [1].
#### References
[1] Benjamin E. Lauderdale and Alexander Herzog. Measuring Political Positions
from Legislative Speech. In _Political Analysis_, 2016.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import time
# Dependency imports
from absl import flags
import numpy as np
import scipy.sparse as sparse
import tensorflow as tf
import tensorflow_probability as tfp
flags.DEFINE_float("learning_rate",
default=0.01,
help="Adam learning rate.")
flags.DEFINE_integer("max_steps",
default=20000,
help="Number of training steps to run.")
flags.DEFINE_integer("num_samples",
default=1,
help="Number of samples for ELBO approximation.")
flags.DEFINE_enum("data",
default="senate-speech-comparisons",
enum_values=["senate-speech-comparisons"],
help="Data set used.")
flags.DEFINE_integer("batch_size",
default=1024,
help="Batch size. Used only for stage 1, because we use"
"the full data set for a batch in stage 2.")
flags.DEFINE_integer("senate_session",
default=113,
help="Senate session.")
flags.DEFINE_integer("print_steps",
default=100,
help="Number of steps to print and save results.")
flags.DEFINE_integer("seed",
default=123,
help="Random seed to be used.")
FLAGS = flags.FLAGS
def standardize(x):
return (x - np.mean(x)) / np.std(x)
def build_input_pipeline(data_dir, batch_size, random_state):
"""Load data and build iterator for minibatches.
Args:
data_dir: The directory where the data is located. There must be five
files inside the rep: `counts.npz`, `author_indices.npy`,
`debate_indices.npy`, `author_map.txt`, and `vocabulary.txt`.
batch_size: The batch size to use for training.
random_state: A NumPy `RandomState` object, used to shuffle the data.
"""
counts = sparse.load_npz(os.path.join(data_dir, "counts.npz"))
dataset_size, num_words = counts.shape
author_indices = np.load(
os.path.join(data_dir, "author_indices.npy")).astype(np.int32)
num_authors = np.max(author_indices + 1)
debate_indices = np.load(
os.path.join(data_dir, "debate_indices.npy")).astype(np.int32)
num_debates = np.max(debate_indices + 1)
author_map = np.loadtxt(os.path.join(data_dir, "author_map.txt"),
dtype=str,
delimiter="\n")
def get_row_py_func(idx):
def get_row_python(idx_py):
batch_counts = np.squeeze(np.array(counts[idx_py].todense()), axis=0)
return batch_counts
py_func = tf.py_func(get_row_python, [idx], tf.float32, stateful=False)
py_func.set_shape((num_words,))
return py_func
indices = random_state.permutation(dataset_size)
shuffled_author_indices = author_indices[indices]
shuffled_debate_indices = debate_indices[indices]
dataset = tf.data.Dataset.from_tensor_slices(
(indices, shuffled_author_indices, shuffled_debate_indices))
dataset = dataset.map(lambda index, author, debate: (
get_row_py_func(index),
author,
debate))
batches = dataset.repeat().batch(batch_size).prefetch(batch_size)
iterator = batches.make_one_shot_iterator()
vocabulary = np.loadtxt(os.path.join(data_dir, "vocabulary.txt"),
dtype=str,
delimiter="\n")
return (iterator, vocabulary, author_map, num_words, num_authors,
num_debates, dataset_size, author_indices, debate_indices)
def print_polarities(polarity_mean, vocabulary):
"""Sort words by polarity for Tensorboard.
Args:
polarity_mean: The mean of the polarity variational parameter, a NumPy
matrix with shape [num_words, num_debates].
vocabulary: A list of the vocabulary with shape [num_words].
Returns:
polarities: A list of the highest polarity words for a sample of debates.
"""
num_words_to_print = 10
num_debates_to_print = 50
# polarity_mean has shape [num_words, num_debates]
most_negative_words = np.argsort(
polarity_mean[:, :num_debates_to_print],
axis=0)[:num_words_to_print].T
most_positive_words = np.argsort(
-polarity_mean[:, :num_debates_to_print],
axis=0)[:num_words_to_print].T
polarities = []
for debate_idx in range(num_debates_to_print):
negative_start_string = "Negative {}:".format(debate_idx)
negative_row = [vocabulary[word] for word in
most_negative_words[debate_idx]]
negative_row_string = ", ".join(negative_row)
negative_string = " ".join([negative_start_string, negative_row_string])
positive_start_string = "Positive {}:".format(debate_idx)
positive_row = [vocabulary[word] for word in
most_positive_words[debate_idx]]
positive_row_string = ", ".join(positive_row)
positive_string = " ".join([positive_start_string, positive_row_string])
polarities.append(" \n".join([negative_string, positive_string]))
return np.array(polarities)
def print_ideal_points(stage_1_author_factor_loc, author_map):
"""Sort authors by ideal points and print for Tensorboard."""
return ", ".join(author_map[np.argsort(stage_1_author_factor_loc)])
def get_log_prior(samples, prior='normal', scale=1.):
"""Return log prior of samples.
Args:
samples: A `Tensor` with shape `[num_samples, :, num_debates]`.
prior: String denoting the type of distribution. Either "normal" or
"gamma".
scale: Scale term for normal prior.
Returns:
log_prior: A `Tensor` with shape `[num_samples]`, with the log priors
summed across batch- and word-dimensions.
"""
if prior == "normal":
prior_distribution = tfp.distributions.Normal(loc=0., scale=scale)
elif prior == "gamma":
prior_distribution = tfp.distributions.Gamma(concentration=1., rate=1.)
else:
raise ValueError("Unrecognized prior distribution.")
if len(samples.shape) == 2:
axes = [1]
elif len(samples.shape) == 3:
axes = [1, 2]
else:
raise ValueError("Incorrect shape for log prior samples.")
log_prior = tf.reduce_sum(prior_distribution.log_prob(samples), axis=axes)
return log_prior
def get_stage_1_elbo(counts,
author_indices,
debate_indices,
author_factor_distribution,
author_intercept_distribution,
word_factor_distribution,
word_intercept_distribution,
dataset_size,
batch_size,
num_samples=1):
"""Approximate first stage ELBO using reparameterization.
Args:
counts: A matrix with shape `[batch_size, num_words]`.
author_indices: An int-vector with shape `[batch_size]`.
debate_indices: An int-vector with shape `[batch_size]`.
author_factor_distribution: A real `Distribution` object with parameter
shape `[num_authors, num_debates]`.
author_intercept_distribution: A real `Distribution` object with parameter
shape `[num_authors, num_debates]`.
word_factor_distribution: A real `Distribution` object with parameter shape
`[num_words, num_debates]`.
word_intercept_distribution: A real `Distribution` object with parameter
shape `[num_words, num_debates]`.
dataset_size: The number of rows in the total data set (used to calculate
log-likelihood scaling factor).
batch_size: Batch size (used to calculate log-likelihood scaling factor).
num_samples: Number of Monte-Carlo samples.
Returns:
elbo: A scalar representing a Monte-Carlo sample of the ELBO. This value is
averaged across samples and summed across batches.
"""
ideal_point_samples = author_factor_distribution.sample(num_samples)
author_intercept_samples = author_intercept_distribution.sample(
num_samples)
polarity_samples = word_factor_distribution.sample(num_samples)
word_intercept_samples = word_intercept_distribution.sample(num_samples)
# From [1]:
"""
We place normal priors with mean 0 on all of the sets of the parameters in
the model, with standard deviation 1 for the debate-specific positions psi
and 5 for the other model parameters.
"""
ideal_point_log_prior = get_log_prior(ideal_point_samples)
author_intercept_log_prior = get_log_prior(author_intercept_samples,
scale=5.)
polarity_log_prior = get_log_prior(polarity_samples, scale=5.)
word_intercept_log_prior = get_log_prior(word_intercept_samples, scale=5.)
log_prior = (ideal_point_log_prior +
author_intercept_log_prior +
polarity_log_prior +
word_intercept_log_prior)
ideal_point_entropy = -tf.reduce_sum(
author_factor_distribution.log_prob(ideal_point_samples),
axis=[1, 2])
author_intercept_entropy = -tf.reduce_sum(
author_intercept_distribution.log_prob(author_intercept_samples),
axis=[1, 2])
polarity_entropy = -tf.reduce_sum(
word_factor_distribution.log_prob(polarity_samples),
axis=[1, 2])
word_intercept_entropy = -tf.reduce_sum(
word_intercept_distribution.log_prob(word_intercept_samples),
axis=[1, 2])
entropy = (ideal_point_entropy +
author_intercept_entropy +
polarity_entropy +
word_intercept_entropy)
indices_2d = tf.concat(
[author_indices[:, tf.newaxis], debate_indices[:, tf.newaxis]],
axis=1)
selected_ideal_points = tf.transpose(
tf.gather_nd(tf.transpose(ideal_point_samples, [1, 2, 0]), indices_2d),
[1, 0])
selected_author_intercepts = tf.transpose(
tf.gather_nd(tf.transpose(author_intercept_samples, [1, 2, 0]),
indices_2d),
[1, 0])
selected_polarities = tf.transpose(
tf.gather(polarity_samples, debate_indices, axis=2),
[0, 2, 1])
selected_word_intercepts = tf.transpose(
tf.gather(word_intercept_samples, debate_indices, axis=2),
[0, 2, 1])
rate = tf.exp(
selected_author_intercepts[:, :, tf.newaxis] +
selected_word_intercepts +
selected_ideal_points[:, :, tf.newaxis] *
selected_polarities)
count_distribution = tfp.distributions.Poisson(rate=rate)
count_log_likelihood = count_distribution.log_prob(counts)
count_log_likelihood = tf.reduce_sum(count_log_likelihood, axis=[1, 2])
# Adjust for the fact that we're only using a minibatch.
count_log_likelihood = count_log_likelihood * (dataset_size / batch_size)
elbo = log_prior + count_log_likelihood + entropy
elbo = tf.reduce_mean(elbo)
tf.summary.scalar("stage_1_elbo/elbo", elbo)
tf.summary.scalar("stage_1_elbo/log_prior", tf.reduce_mean(log_prior))
tf.summary.scalar("stage_1_elbo/count_log_likelihood",
tf.reduce_mean(count_log_likelihood))
tf.summary.scalar("stage_1_elbo/entropy", tf.reduce_mean(entropy))
return elbo
def get_stage_2_elbo(flat_fitted_author_factor,
ideal_point_distribution,
debate_factor_distribution,
debate_intercept_distribution,
variance_distribution,
author_indices,
debate_indices,
num_samples=1):
"""Approximate second stage ELBO using reparameterization.
Args:
fitted_author_factor: A tensor with shape `[dataset_size]`, containing the
fitted per-debate ideal points from stage 1. This variable does not
receive gradients, so it is no longer being trained.
ideal_point_distribution: A real `Distribution` object with parameter
shape `[num_authors]`.
debate_factor_distribution: A real `Distribution` object with parameter
shape `[num_debates]`.
debate_intercept_distribution: A real `Distribution` object with parameter
shape `[num_debates]`.
variance_distribution: A positive `Distribution` object with parameter
shape `[num_authors]`.
author_indices: A vector with shape `[dataset_size]`.
debate_indices: A vector with shape `[dataset_size]`.
num_samples: Number of Monte-Carlo samples.
Returns:
elbo: A scalar representing a Monte-Carlo sample of the ELBO. This value is
averaged across samples and summed across batches.
"""
ideal_point_samples = ideal_point_distribution.sample(num_samples)
debate_factor_samples = debate_factor_distribution.sample(num_samples)
debate_intercept_samples = debate_intercept_distribution.sample(num_samples)
variance_samples = variance_distribution.sample(num_samples)
# Following the prior distributions in [1].
ideal_point_log_prior = get_log_prior(ideal_point_samples)
debate_factor_log_prior = get_log_prior(debate_factor_samples, scale=0.5)
debate_intercept_log_prior = get_log_prior(debate_intercept_samples,
scale=0.5)
variance_log_prior = get_log_prior(variance_samples, prior='gamma')
log_prior = (ideal_point_log_prior +
debate_factor_log_prior +
debate_intercept_log_prior +
variance_log_prior)
ideal_point_entropy = -tf.reduce_sum(
ideal_point_distribution.log_prob(ideal_point_samples),
axis=1)
debate_factor_entropy = -tf.reduce_sum(
debate_factor_distribution.log_prob(debate_factor_samples),
axis=1)
debate_intercept_entropy = -tf.reduce_sum(
debate_intercept_distribution.log_prob(debate_intercept_samples),
axis=1)
variance_entropy = -tf.reduce_sum(
variance_distribution.log_prob(variance_samples),
axis=1)
entropy = (ideal_point_entropy +
debate_factor_entropy +
debate_intercept_entropy +
variance_entropy)
selected_debate_intercepts = tf.gather(debate_intercept_samples,
debate_indices,
axis=1)
selected_ideal_points = tf.gather(ideal_point_samples,
author_indices,
axis=1)
selected_debate_factors = tf.gather(debate_factor_samples,
debate_indices,
axis=1)
selected_variance_samples = tf.gather(variance_samples,
author_indices,
axis=1)
output_mean = (selected_debate_intercepts +
selected_ideal_points *
selected_debate_factors)
output_scale = selected_variance_samples
author_factor_distribution = tfp.distributions.Normal(loc=output_mean,
scale=output_scale)
author_factor_log_likelihood = author_factor_distribution.log_prob(
flat_fitted_author_factor)
author_factor_log_likelihood = tf.reduce_sum(author_factor_log_likelihood,
axis=1)
elbo = log_prior + author_factor_log_likelihood + entropy
elbo = tf.reduce_mean(elbo)
tf.summary.scalar("stage_2_elbo/elbo", elbo)
tf.summary.scalar("stage_2_elbo/log_prior", tf.reduce_mean(log_prior))
tf.summary.scalar("stage_2_elbo/author_factor_log_likelihood",
tf.reduce_mean(author_factor_log_likelihood))
tf.summary.scalar("stage_2_elbo/entropy", tf.reduce_mean(entropy))
return elbo
def main(argv):
print("Starting Senate Session {}".format(FLAGS.senate_session))
del argv
tf.set_random_seed(FLAGS.seed)
random_state = np.random.RandomState(FLAGS.seed)
project_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir))
source_dir = os.path.join(project_dir, "data/{}/wordshoal/{}".format(
FLAGS.data, FLAGS.senate_session))
data_dir = os.path.join(source_dir, "clean")
save_dir = os.path.join(source_dir, "wordshoal-fits")
if tf.gfile.Exists(save_dir):
tf.logging.warn("Deleting old log directory at {}".format(save_dir))
tf.gfile.DeleteRecursively(save_dir)
tf.gfile.MakeDirs(save_dir)
param_save_dir = os.path.join(save_dir, "params/")
if not tf.gfile.Exists(param_save_dir):
tf.gfile.MakeDirs(param_save_dir)
np.warnings.filterwarnings('ignore') # suppress scipy.sparse warnings.
(iterator, vocabulary, author_map, num_words, num_authors,
num_debates, dataset_size, all_author_indices,
all_debate_indices) = build_input_pipeline(data_dir,
FLAGS.batch_size,
random_state)
counts, batch_author_indices, batch_debate_indices = iterator.get_next()
stage_1_author_factor_loc = tf.get_variable(
"stage_1_author_factor_loc",
shape=[num_authors, num_debates],
dtype=tf.float32)
stage_1_author_intercept_loc = tf.get_variable(
"stage_1_author_intercept_loc",
shape=[num_authors, num_debates],
dtype=tf.float32)
stage_1_word_factor_loc = tf.get_variable(
"stage_1_word_factor_loc",
shape=[num_words, num_debates],
dtype=tf.float32)
stage_1_word_intercept_loc = tf.get_variable(
"stage_1_word_intercept_loc",
shape=[num_words, num_debates],
dtype=tf.float32)
stage_1_author_factor_scale_logit = tf.get_variable(
"stage_1_author_factor_scale_logit",
shape=[num_authors, num_debates],
dtype=tf.float32)
stage_1_author_intercept_scale_logit = tf.get_variable(
"stage_1_author_intercept_scale_logit",
shape=[num_authors, num_debates],
dtype=tf.float32)
stage_1_word_factor_scale_logit = tf.get_variable(
"stage_1_word_factor_scale_logit",
shape=[num_words, num_debates],
dtype=tf.float32)
stage_1_word_intercept_scale_logit = tf.get_variable(
"stage_1_word_intercept_scale_logit",
shape=[num_words, num_debates],
dtype=tf.float32)
stage_1_author_factor_scale = tf.nn.softplus(
stage_1_author_factor_scale_logit)
stage_1_author_intercept_scale = tf.nn.softplus(
stage_1_author_intercept_scale_logit)
stage_1_word_factor_scale = tf.nn.softplus(
stage_1_word_factor_scale_logit)
stage_1_word_intercept_scale = tf.nn.softplus(
stage_1_word_intercept_scale_logit)
tf.summary.histogram("stage_1_params/author_factor_loc",
stage_1_author_factor_loc)
tf.summary.histogram("stage_1_params/author_intercept_loc",
stage_1_author_intercept_loc)
tf.summary.histogram("stage_1_params/word_factor_loc",
stage_1_word_factor_loc)
tf.summary.histogram("stage_1_params/word_intercept_loc",
stage_1_word_intercept_loc)
tf.summary.histogram("stage_1_params/author_intercept_scale",
stage_1_author_intercept_scale)
tf.summary.histogram("stage_1_params/author_factor_scale",
stage_1_author_factor_scale)
tf.summary.histogram("stage_1_params/word_factor_scale",
stage_1_word_factor_scale)
tf.summary.histogram("stage_1_params/word_intercept_scale",
stage_1_word_intercept_scale)
stage_1_author_factor_distribution = tfp.distributions.Normal(
loc=stage_1_author_factor_loc,
scale=stage_1_author_factor_scale)
stage_1_author_intercept_distribution = tfp.distributions.Normal(
loc=stage_1_author_intercept_loc,
scale=stage_1_author_intercept_scale)
stage_1_word_factor_distribution = tfp.distributions.Normal(
loc=stage_1_word_factor_loc,
scale=stage_1_word_factor_scale)
stage_1_word_intercept_distribution = tfp.distributions.Normal(
loc=stage_1_word_intercept_loc,
scale=stage_1_word_intercept_scale)
stage_1_elbo = get_stage_1_elbo(counts,
batch_author_indices,
batch_debate_indices,
stage_1_author_factor_distribution,
stage_1_author_intercept_distribution,
stage_1_word_factor_distribution,
stage_1_word_intercept_distribution,
dataset_size,
FLAGS.batch_size,
num_samples=FLAGS.num_samples)
stage_1_loss = -stage_1_elbo
stage_1_optim = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
stage_1_train_op = stage_1_optim.minimize(
stage_1_loss,
var_list=[stage_1_author_factor_loc, stage_1_author_intercept_loc,
stage_1_word_factor_loc, stage_1_word_intercept_loc,
stage_1_author_factor_scale_logit,
stage_1_author_intercept_scale_logit,
stage_1_word_factor_scale_logit,
stage_1_word_intercept_scale_logit])
stage_2_ideal_point_loc = tf.get_variable(
"stage_2_ideal_point_loc",
shape=[num_authors],
dtype=tf.float32)
stage_2_debate_factor_loc = tf.get_variable(
"stage_2_debate_factor_loc",
shape=[num_debates],
dtype=tf.float32)
stage_2_debate_intercept_loc = tf.get_variable(
"stage_2_debate_intercept_loc",
shape=[num_debates],
dtype=tf.float32)
stage_2_variance_loc = tf.get_variable(
"stage_2_variance_loc",
shape=[num_authors],
dtype=tf.float32)
stage_2_ideal_point_scale_logit = tf.get_variable(
"stage_2_ideal_point_scale_logit",
shape=[num_authors],
dtype=tf.float32)
stage_2_debate_factor_scale_logit = tf.get_variable(
"stage_2_debate_factor_scale_logit",
shape=[num_debates],
dtype=tf.float32)
stage_2_debate_intercept_scale_logit = tf.get_variable(
"stage_2_debate_intercept_scale_logit",
shape=[num_debates],
dtype=tf.float32)
stage_2_variance_scale_logit = tf.get_variable(
"stage_2_variance_scale_logit",
shape=[num_authors],
dtype=tf.float32)
stage_2_ideal_point_scale = tf.nn.softplus(stage_2_ideal_point_scale_logit)
stage_2_debate_factor_scale = tf.nn.softplus(
stage_2_debate_factor_scale_logit)
stage_2_debate_intercept_scale = tf.nn.softplus(
stage_2_debate_intercept_scale_logit)
stage_2_variance_scale = tf.nn.softplus(stage_2_variance_scale_logit)
tf.summary.histogram("stage_2_params/ideal_point_loc",
stage_2_ideal_point_loc)
tf.summary.histogram("stage_2_params/debate_factor_loc",
stage_2_debate_factor_loc)
tf.summary.histogram("stage_2_params/debate_intercept_loc",
stage_2_debate_intercept_loc)
tf.summary.histogram("stage_2_params/variance_loc",
stage_2_variance_loc)
tf.summary.histogram("stage_2_params/ideal_point_scale",
stage_2_ideal_point_scale)
tf.summary.histogram("stage_2_params/debate_factor_scale",
stage_2_debate_factor_scale)
tf.summary.histogram("stage_2_params/debate_intercept_scale",
stage_2_debate_intercept_scale)
tf.summary.histogram("stage_2_params/variance_scale",
stage_2_variance_scale)
stage_2_ideal_point_distribution = tfp.distributions.Normal(
loc=stage_2_ideal_point_loc,
scale=stage_2_ideal_point_scale)
stage_2_debate_factor_distribution = tfp.distributions.Normal(
loc=stage_2_debate_factor_loc,
scale=stage_2_debate_factor_scale)
stage_2_debate_intercept_distribution = tfp.distributions.Normal(
loc=stage_2_debate_intercept_loc,
scale=stage_2_debate_intercept_scale)
stage_2_variance_distribution = tfp.distributions.LogNormal(
loc=stage_2_variance_loc,
scale=stage_2_variance_scale)
fitted_author_factor = stage_1_author_factor_loc
# We don't want to factorize the entire [num_authors, num_debates] matrix.
# Not all authors speak at every debate, so we only factorize the non-missing
# values, rather than the random values.
# Also, we use the full batch in the second stage becuase it fits into
# memory. To do this, we need to use `all_author_indices` and
# `all_debate_indices` rather than `batch_author_indices` and
# `batch_debate_indices`.
indices_2d = tf.concat(
[all_author_indices[:, np.newaxis], all_debate_indices[:, np.newaxis]],
axis=1)
# We flatten, so `flat_fitted_author_factor` has shape `[dataset_size]`.
flat_fitted_author_factor = tf.gather_nd(fitted_author_factor, indices_2d)
# We stop the gradient on the fitted author factor to make sure it is not
# being trained in the second stage.
stage_2_elbo = get_stage_2_elbo(tf.stop_gradient(flat_fitted_author_factor),
stage_2_ideal_point_distribution,
stage_2_debate_factor_distribution,
stage_2_debate_intercept_distribution,
stage_2_variance_distribution,
all_author_indices,
all_debate_indices,
num_samples=FLAGS.num_samples)
stage_2_loss = -stage_2_elbo
stage_2_optim = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
stage_2_train_op = stage_2_optim.minimize(
stage_2_loss,
var_list=[stage_2_ideal_point_loc, stage_2_debate_factor_loc,
stage_2_debate_intercept_loc, stage_2_variance_loc,
stage_2_ideal_point_scale_logit,
stage_2_debate_factor_scale_logit,
stage_2_debate_intercept_scale_logit,
stage_2_variance_scale_logit])
polarities = tf.py_func(
functools.partial(print_polarities, vocabulary=vocabulary),
[stage_1_word_factor_loc],
tf.string,
stateful=False)
tf.summary.text("polarities", polarities)
ideal_point_list = tf.py_func(
functools.partial(print_ideal_points, author_map=author_map),
[stage_2_ideal_point_loc],
tf.string, stateful=False)
tf.summary.text("ideal_points", ideal_point_list)
summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter(save_dir, sess.graph)
sess.run(init)
# Train Stage 1
for step in range(FLAGS.max_steps):
start_time = time.time()
(_, stage_1_elbo_val) = sess.run([stage_1_train_op, stage_1_elbo])
duration = time.time() - start_time
if step % FLAGS.print_steps == 0:
print("Stage 1: Step: {:>3d} ELBO: {:.3f} ({:.3f} sec)".format(
step, stage_1_elbo_val, duration))
summary_str = sess.run(summary)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
# Train Stage 2
for step in range(FLAGS.max_steps):
start_time = time.time()
(_, stage_2_elbo_val) = sess.run([stage_2_train_op, stage_2_elbo])
duration = time.time() - start_time
if step % FLAGS.print_steps == 0:
print("Stage 2: Step: {:>3d} ELBO: {:.3f} ({:.3f} sec)".format(
step, stage_2_elbo_val, duration))
summary_str = sess.run(summary)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
if step % 500 == 0:
(stage_1_author_factor_loc_val, stage_1_author_factor_scale_val,
stage_2_ideal_point_loc_val, stage_2_ideal_point_scale_val,
stage_2_debate_factor_loc_val, stage_2_debate_factor_scale_val,
stage_2_variance_loc_val, stage_2_variance_scale_val) = sess.run([
stage_1_author_factor_loc, stage_1_author_factor_scale,
stage_2_ideal_point_loc, stage_2_ideal_point_scale,
stage_2_debate_factor_loc, stage_2_debate_factor_scale,
stage_2_variance_loc, stage_2_variance_scale])
np.save(os.path.join(param_save_dir, "stage_1_author_factor_loc"),
stage_1_author_factor_loc_val)
np.save(os.path.join(param_save_dir, "stage_1_author_factor_scale"),
stage_1_author_factor_scale_val)
np.save(os.path.join(param_save_dir, "stage_2_ideal_point_loc"),
stage_2_ideal_point_loc_val)
np.save(os.path.join(param_save_dir, "stage_2_ideal_point_scale"),
stage_2_ideal_point_scale_val)
np.save(os.path.join(param_save_dir, "stage_2_debate_factor_loc"),
stage_2_debate_factor_loc_val)
np.save(os.path.join(param_save_dir, "stage_2_debate_factor_scale"),
stage_2_debate_factor_scale_val)
np.save(os.path.join(param_save_dir, "stage_2_variance_loc"),
stage_2_variance_loc_val)
np.save(os.path.join(param_save_dir, "stage_2_variance_scale"),
stage_2_variance_scale_val)
if __name__ == "__main__":
tf.app.run()
|
{"hexsha": "7b045b3690750bddb8d30d382de39193ef8bdfe3", "size": 30366, "ext": "py", "lang": "Python", "max_stars_repo_path": "model_comparison/wordshoal.py", "max_stars_repo_name": "n-longuetmarx/tbip", "max_stars_repo_head_hexsha": "c6f137167aec8075c2ae98183cdf4c5e7dbc700a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 31, "max_stars_repo_stars_event_min_datetime": "2020-05-14T02:30:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T13:39:29.000Z", "max_issues_repo_path": "model_comparison/wordshoal.py", "max_issues_repo_name": "n-longuetmarx/tbip", "max_issues_repo_head_hexsha": "c6f137167aec8075c2ae98183cdf4c5e7dbc700a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2020-04-30T20:06:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:45:06.000Z", "max_forks_repo_path": "model_comparison/wordshoal.py", "max_forks_repo_name": "n-longuetmarx/tbip", "max_forks_repo_head_hexsha": "c6f137167aec8075c2ae98183cdf4c5e7dbc700a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2020-05-25T03:25:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-14T17:12:21.000Z", "avg_line_length": 42.4699300699, "max_line_length": 80, "alphanum_fraction": 0.6880063229, "include": true, "reason": "import numpy,import scipy", "num_tokens": 6976}
|
import datetime
import warnings
import numpy as np
from estimagic.differentiation.derivatives import first_derivative
from estimagic.exceptions import get_traceback
from estimagic.logging.database_utilities import append_row
from estimagic.utilities import hash_array
DERIVATIVE_ERROR_MESSAGE = (
"Error during derivative evaluation at parameters at which the criterion has "
"already been evaluated and used by the optimizer before. Thus it is not possible "
"to simply replace the criterion function by a penalty function."
)
CRITERION_ERROR_MESSAGE = (
"Error during criterion evaluation at parameters at which the derivative has "
"already been evaluated and used by the optimizer before. Thus it is not possible "
"to simply replace the criterion function by a penalty function."
)
NO_PRIMARY_MESSAGE = (
"The primary criterion entry of the {} algorithm is {} but the output of your "
"criterion function only contains the enties:\n{}"
)
def internal_criterion_and_derivative_template(
x,
*,
task,
direction,
criterion,
params,
reparametrize_from_internal,
convert_derivative,
algorithm_info,
derivative,
criterion_and_derivative,
numdiff_options,
database,
database_path,
log_options,
error_handling,
error_penalty,
first_criterion_evaluation,
cache,
cache_size,
):
"""Template for the internal criterion and derivative function.
The internal criterion and derivative function only has the arguments x and task
and algorithm_info. The other arguments will be partialed in by estimagic at some
point. Algorithm_info and possibly even task will be partialed in by the algorithm.
That is the reason why this function is called a template.
Args:
x (np.ndarray): 1d numpy array with internal parameters.
task (str): One of "criterion", "derivative" and "criterion_and_derivative".
direction (str): One of "maximize" or "minimize"
criterion (callable): (partialed) user provided criterion function that takes a
parameter dataframe as only argument and returns a scalar, an array like
object or a dictionary. See :ref:`criterion`.
params (pd.DataFrame): see :ref:`params`
reparametrize_from_internal (callable): Function that takes x and returns a
numpy array with the values of the external parameters.
convert_derivative (callable): Function that takes the derivative of criterion
at the external version of x and x and returns the derivative
of the internal criterion.
algorithm_info (dict): Dict with the following entries:
"primary_criterion_entry": One of "value", "contributions" and
"root_contributions"
"parallelizes": Bool that indicates if the algorithm calls the internal
criterion function in parallel. If so, caching is disabled.
"needs_scaling": bool
"name": string
derivative (callable, optional): (partialed) user provided function that
calculates the first derivative of criterion. For most algorithm, this is
the gradient of the scalar output (or "value" entry of the dict). However
some algorithms (e.g. bhhh) require the jacobian of the "contributions"
entry of the dict. You will get an error if you provide the wrong type of
derivative.
criterion_and_derivative (callable): Function that returns criterion
and derivative as a tuple. This can be used to exploit synergies in the
evaluation of both functions. The fist element of the tuple has to be
exactly the same as the output of criterion. The second has to be exactly
the same as the output of derivative.
numdiff_options (dict): Keyword arguments for the calculation of numerical
derivatives. See :ref:`first_derivative` for details. Note that the default
method is changed to "forward" for speed reasons.
database (sqlalchemy.MetaData): Bound MetaData object.
database_path (pathlib.Path): Path to the database.
log_options (dict): Additional keyword arguments to configure the logging.
error_handling (str): Either "raise" or "continue". Note that "continue" does
not absolutely guarantee that no error is raised but we try to handle as
many errors as possible in that case without aborting the optimization.
error_penalty (dict): Dict with the entries "constant" (float) and "slope"
(float). If the criterion or derivative raise an error and error_handling is
"continue", return ``constant + slope * norm(params - start_params)`` where
``norm`` is the euclidean distance as criterion value and adjust the
derivative accordingly. This is meant to guide the optimizer back into a
valid region of parameter space (in direction of the start parameters).
Note that the constant has to be high enough to ensure that the penalty is
actually a bad function value. The default constant is 2 times the criterion
value at the start parameters. The default slope is 0.1.
first_criterion_evaluation (dict): Dictionary with entries "internal_params",
"external_params", "output".
cache (dict): Dictionary used as cache for criterion and derivative evaluations.
cache_size (int): Number of evaluations that are kept in cache. Default 10.
Returns:
float, np.ndarray or tuple: If task=="criterion" it returns the output of
criterion which can be a float or 1d numpy array. If task=="derivative" it
returns the first derivative of criterion, which is a numpy array.
If task=="criterion_and_derivative" it returns both as a tuple.
"""
if algorithm_info["primary_criterion_entry"] == "root_contributions":
if direction == "maximize":
msg = (
"Optimizers that exploit a least squares structure like {} can only be "
"used for minimization."
)
raise ValueError(msg.format(algorithm_info["name"]))
x_hash = hash_array(x)
cache_entry = cache.get(x_hash, {})
to_dos = _determine_to_dos(task, cache_entry, derivative, criterion_and_derivative)
caught_exceptions = []
new_criterion, new_derivative, new_external_derivative = None, None, None
current_params = params.copy()
external_x = reparametrize_from_internal(x)
current_params["value"] = external_x
if to_dos == []:
pass
elif "numerical_criterion_and_derivative" in to_dos:
def func(x):
external_x = reparametrize_from_internal(x)
p = params.copy()
p["value"] = external_x
return criterion(p)
options = numdiff_options.copy()
options["key"] = algorithm_info["primary_criterion_entry"]
options["f0"] = cache_entry.get("criterion", None)
options["return_func_value"] = True
try:
derivative_dict = first_derivative(func, x, **options)
new_derivative = {
algorithm_info["primary_criterion_entry"]: derivative_dict["derivative"]
}
new_criterion = derivative_dict["func_value"]
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
caught_exceptions.append(get_traceback())
if "criterion" in cache_entry:
raise Exception(DERIVATIVE_ERROR_MESSAGE) from e
elif "criterion_and_derivative" in to_dos:
try:
new_criterion, new_external_derivative = criterion_and_derivative(
current_params
)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
caught_exceptions.append(get_traceback())
if "criterion" in cache_entry:
raise Exception(DERIVATIVE_ERROR_MESSAGE) from e
else:
if "criterion" in to_dos:
try:
new_criterion = criterion(current_params)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
caught_exceptions.append(get_traceback())
if "derivative" in cache_entry:
raise Exception(CRITERION_ERROR_MESSAGE) from e
if "derivative" in to_dos:
try:
new_external_derivative = derivative(current_params)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
caught_exceptions.append(get_traceback())
if "criterion" in cache_entry:
raise Exception(DERIVATIVE_ERROR_MESSAGE) from e
if new_derivative is None and new_external_derivative is not None:
if not isinstance(new_external_derivative, dict):
new_external_derivative = {
algorithm_info["primary_criterion_entry"]: new_external_derivative
}
new_derivative = {
k: convert_derivative(v, internal_values=x)
for k, v in new_external_derivative.items()
}
if caught_exceptions:
if error_handling == "continue":
new_criterion, new_derivative = _penalty_and_derivative(
x, first_criterion_evaluation, error_penalty, algorithm_info
)
warnings.warn("\n\n".join(caught_exceptions))
else:
raise Exception("\n\n".join(caught_exceptions))
if not algorithm_info["parallelizes"] and cache_size >= 1:
_cache_new_evaluations(new_criterion, new_derivative, x_hash, cache, cache_size)
new_criterion = _check_and_harmonize_criterion_output(
cache_entry.get("criterion", new_criterion), algorithm_info
)
new_derivative = _check_and_harmonize_derivative(
cache_entry.get("derivative", new_derivative), algorithm_info
)
if (new_criterion is not None or new_derivative is not None) and database:
_log_new_evaluations(
new_criterion=new_criterion,
new_derivative=new_derivative,
external_x=external_x,
caught_exceptions=caught_exceptions,
database=database,
database_path=database_path,
log_options=log_options,
)
res = _get_output_for_optimizer(
new_criterion, new_derivative, task, algorithm_info, direction
)
return res
def _determine_to_dos(task, cache_entry, derivative, criterion_and_derivative):
"""Determine which functions have to be evaluated at the new parameters.
Args:
task (str): One of "criterion", "derivative", "criterion_and_derivative"
cache_entry (dict): Possibly empty dict.
derivative (Callable or None): Only used to determine if a closed form
derivative is available.
criterion_and_derivative (callable or None): Only used to determine if this
function is available.
Returns:
list: List of functions that have to be evaluated. Possible values are:
- [] if nothing has to be done
- ["criterion_and_derivative"]
- ["numerical_criterion_and_derivative"]
- ["criterion", "derivative"]
- ["criterion"]
- ["derivative"]
"""
criterion_needed = "criterion" in task and "criterion" not in cache_entry
derivative_needed = "derivative" in task and "derivative" not in cache_entry
to_dos = []
if criterion_and_derivative is not None and criterion_needed and derivative_needed:
to_dos.append("criterion_and_derivative")
elif (
derivative is None
and criterion_and_derivative is not None
and derivative_needed
):
to_dos.append("criterion_and_derivative")
elif derivative is None and derivative_needed:
to_dos.append("numerical_criterion_and_derivative")
else:
if derivative_needed:
to_dos.append("derivative")
if criterion_needed:
to_dos.append("criterion")
return to_dos
def _penalty_and_derivative(x, first_eval, error_penalty, algorithm_info):
constant = error_penalty["constant"]
slope = error_penalty["slope"]
x0 = first_eval["internal_params"]
primary = algorithm_info["primary_criterion_entry"]
if primary == "value":
penalty = _penalty_value(x, constant, slope, x0)
derivative = _penalty_value_derivative(x, constant, slope, x0)
elif primary == "contributions":
dim_out = len(first_eval["output"][primary])
penalty = _penalty_contributions(x, constant, slope, x0, dim_out)
derivative = _penalty_contributions_derivative(x, constant, slope, x0, dim_out)
elif primary == "root_contributions":
dim_out = len(first_eval["output"][primary])
penalty = _penalty_root_contributions(x, constant, slope, x0, dim_out)
derivative = _penalty_root_contributions_derivative(
x, constant, slope, x0, dim_out
)
else:
raise ValueError()
return penalty, derivative
def _penalty_value(x, constant, slope, x0, dim_out=None):
return constant + slope * np.linalg.norm(x - x0)
def _penalty_contributions(x, constant, slope, x0, dim_out):
contrib = (constant + slope * np.linalg.norm(x - x0)) / dim_out
return np.ones(dim_out) * contrib
def _penalty_root_contributions(x, constant, slope, x0, dim_out):
contrib = np.sqrt((constant + slope * np.linalg.norm(x - x0)) / dim_out)
return np.ones(dim_out) * contrib
def _penalty_value_derivative(x, constant, slope, x0, dim_out=None):
return slope * (x - x0) / np.linalg.norm(x - x0)
def _penalty_contributions_derivative(x, constant, slope, x0, dim_out):
row = slope * (x - x0) / (dim_out * np.linalg.norm(x - x0))
return np.full((dim_out, len(x)), row)
def _penalty_root_contributions_derivative(x, constant, slope, x0, dim_out):
inner_deriv = slope * (x - x0) / np.linalg.norm(x - x0)
outer_deriv = 0.5 / np.sqrt(_penalty_value(x, constant, slope, x0) * dim_out)
row = outer_deriv * inner_deriv
return np.full((dim_out, len(x)), row)
def _cache_new_evaluations(new_criterion, new_derivative, x_hash, cache, cache_size):
cache_entry = cache.get(x_hash, {}).copy()
if len(cache) >= cache_size:
# list(dict) returns keys in insertion order: https://tinyurl.com/o464nrz
oldest_entry = list(cache)[0]
del cache[oldest_entry]
if new_criterion is not None:
cache_entry["criterion"] = new_criterion
if new_derivative is not None:
cache_entry["derivative"] = new_derivative
cache[x_hash] = cache_entry
def _check_and_harmonize_criterion_output(output, algorithm_info):
algo_name = algorithm_info.get("name", "your algorithm")
primary = algorithm_info["primary_criterion_entry"]
if output is not None:
if np.isscalar(output):
output = {"value": float(output)}
if not isinstance(output, dict):
raise ValueError("The output of criterion must be a scalar or dict.")
if "contributions" not in output and "root_contributions" in output:
output["contributions"] = output["root_contributions"] ** 2
if "value" not in output and "contributions" in output:
output["value"] = output["contributions"].sum()
if primary not in output:
raise ValueError(
NO_PRIMARY_MESSAGE.format(algo_name, primary, list(output))
)
return output
def _check_and_harmonize_derivative(derivative, algorithm_info):
primary = algorithm_info["primary_criterion_entry"]
if not isinstance(derivative, dict) and derivative is not None:
derivative = {primary: derivative}
if derivative is None:
pass
else:
if primary not in derivative:
raise ValueError(
"If derivative returns a dict and you use an optimizer that works with "
f"{primary}, the derivative dictionary also must contain {primary} "
"as a key."
)
if "value" in derivative and np.atleast_2d(derivative["value"]).shape[0] != 1:
raise ValueError("The derivative of a scalar optimizer must be a 1d array.")
if "contributions" in derivative:
if len(derivative["contributions"].shape) != 2:
raise ValueError(
"The derivative of an optimizer that exploits a sum "
"structure must be a 2d array."
)
if "root_contributions" in derivative:
if len(derivative["root_contributions"].shape) != 2:
raise ValueError(
"The derivative of an optimizer that exploits a least squares "
"structure must be a 2d array."
)
return derivative
def _log_new_evaluations(
new_criterion,
new_derivative,
external_x,
caught_exceptions,
database,
database_path,
log_options,
):
"""Write the new evaluations and additional information into the database.
Note: There are some seemingly unnecessary type conversions because sqlalchemy
can fail silently when called with numpy dtypes instead of the equivalent python
types.
"""
data = {
"params": external_x,
"timestamp": datetime.datetime.now(),
"valid": True,
}
if new_derivative is not None:
data["internal_derivative"] = new_derivative
if caught_exceptions:
separator = "\n" + "=" * 80 + "\n"
data["exceptions"] = separator.join(caught_exceptions)
data["valid"] = False
if new_criterion is not None:
data = {**data, **new_criterion}
data["value"] = float(data["value"])
if "suffix" in log_options:
name = "optimization_iterations" + "_" + log_options["suffix"]
else:
name = "optimization_iterations"
fast_logging = log_options.get("fast_logging", False)
append_row(data, name, database, database_path, fast_logging)
def _get_output_for_optimizer(
new_criterion, new_derivative, task, algorithm_info, direction
):
primary = algorithm_info["primary_criterion_entry"]
if "criterion" in task:
crit = new_criterion[primary]
crit = crit if np.isscalar(crit) else np.array(crit)
crit = crit if direction == "minimize" else -crit
if "derivative" in task:
deriv = np.array(new_derivative[primary])
deriv = deriv if direction == "minimize" else -deriv
if task == "criterion_and_derivative":
res = (crit, deriv)
elif task == "criterion":
res = crit
elif task == "derivative":
res = deriv
else:
raise ValueError()
return res
|
{"hexsha": "831bf2d514bd14abaa184ece9260db339d2e0da6", "size": 19039, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/estimagic/optimization/internal_criterion_template.py", "max_stars_repo_name": "OpenSourceEconomics/estimagic", "max_stars_repo_head_hexsha": "85163b4cdc601d60d654c6ca1f42b9db17a130a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 83, "max_stars_repo_stars_event_min_datetime": "2019-09-26T04:44:03.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T20:24:02.000Z", "max_issues_repo_path": "src/estimagic/optimization/internal_criterion_template.py", "max_issues_repo_name": "OpenSourceEconomics/estimagic", "max_issues_repo_head_hexsha": "85163b4cdc601d60d654c6ca1f42b9db17a130a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 243, "max_issues_repo_issues_event_min_datetime": "2019-06-25T18:15:53.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-26T09:17:44.000Z", "max_forks_repo_path": "src/estimagic/optimization/internal_criterion_template.py", "max_forks_repo_name": "OpenSourceEconomics/estimagic", "max_forks_repo_head_hexsha": "85163b4cdc601d60d654c6ca1f42b9db17a130a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2019-07-03T11:16:55.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T00:57:38.000Z", "avg_line_length": 38.8551020408, "max_line_length": 88, "alphanum_fraction": 0.6604338463, "include": true, "reason": "import numpy", "num_tokens": 4063}
|
# stolen from http://nbviewer.jupyter.org/github/BVLC/caffe/blob/master/examples/brewing-logreg.ipynb
import time
start_time = time.time()
import os
caffe_home = os.environ['CAFFE_HOME']
import sys
sys.path.insert(0, os.path.join(caffe_home, 'python'))
import caffe
import h5py
import numpy as np
import pickle
import scipy.io
import shutil
import sklearn
import tempfile
import fnmatch
gpu_to_use = int(sys.argv[1])
nCategories = int(sys.argv[2])
dirname = sys.argv[3]
# PART 1: define the top_k function
def top_k(features,labels,k):
nExamples = len(labels)
topKs = np.zeros((nExamples))
for item in range(nExamples):
idxs = np.argsort(features[item,:])
topKs[item] = int(labels[item] in idxs[-k:])
return np.mean(topKs)
# PART 2: write out the data for Caffe
## write the training data
train_f = []
with open(os.path.join(dirname, 'train.txt'), 'w') as f:
for file in os.listdir(dirname):
if fnmatch.fnmatch(file, '*train*.h5'):
file_name = os.path.join(dirname,file)
f.write(file_name + '\n')
train_f += [h5py.File(file_name,'r')]
## write the testing data - fixed in size
test_filename = os.path.join(dirname, 'test.h5')
with open(os.path.join(dirname, 'test.txt'), 'w') as f:
f.write(test_filename + '\n')
test_f = h5py.File(test_filename,'r')
# PART 3: Define the network
from caffe import layers as L
from caffe import params as P
def logreg(hdf5, batch_size):
# read in the data
n = caffe.NetSpec()
n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
# a bit of preprocessing - helpful!
n.log = L.Log(n.data, base=-1, scale=1, shift=1)
n.norm = L.BatchNorm(n.log,use_global_stats=False)
n.scaled = L.Scale(n.norm, bias_term=True)
# the actual regression - the core of what we want to do!
n.dropout = L.Dropout(n.scaled, dropout_ratio=0.5)
n.ip = L.InnerProduct(n.dropout, num_output=nCategories, weight_filler=dict(type='xavier'))
# don't mess with these. They don't affect learning.
n.prob = L.Softmax(n.ip)
n.accuracy1 = L.Accuracy(n.prob, n.label)
if nCategories > 5:
n.accuracy5 = L.Accuracy(n.prob, n.label, top_k = 5)
n.loss = L.SoftmaxWithLoss(n.ip, n.label)
return n.to_proto()
train_net_path = os.path.join(dirname, 'logreg_auto_train.prototxt')
with open(train_net_path, 'w') as f:
f.write(str(logreg(os.path.join(dirname, 'train.txt'), 100)))
test_net_path = os.path.join(dirname, 'logreg_auto_test.prototxt')
with open(test_net_path, 'w') as f:
f.write(str(logreg(os.path.join(dirname, 'test.txt'), 100)))
# PART 4: Define the solver
from caffe.proto import caffe_pb2
# for 1,000 categories, should be base_lr = 0.001, and weight_decay = 5e-3
def solver(train_net_path, test_net_path, n_examples, batch_size):
min_max_iter = 10000
target_epochs = 100
# n_iters should be 10000 iters or 100 epochs, whichever comes later
n_iters = max(target_epochs*n_examples/batch_size,min_max_iter)
s = caffe_pb2.SolverParameter()
s.train_net = train_net_path # where train network is
s.test_net.append(test_net_path) # where test network is
s.test_interval = n_iters/100 # test after every 1 epochs
s.test_iter.append(500) # Test 500 batches each time we test.
s.max_iter = n_iters # the number of training iterations
s.base_lr = 1e-6 # the initial learning rate for SGD.
s.lr_policy = 'step' # lr <- lr*gamma every stepsize iters
s.gamma = 0.9 #
s.stepsize = n_iters/25 #
s.momentum = 0.9 # weighted avg of current and previous gradients
s.weight_decay = 0 # regularizes learning to help prevent overfitting
s.display = min(n_iters/100,1000) # display outputs every so often
s.snapshot = n_iters # snapshot at the end
s.snapshot_prefix = os.path.join(dirname, 'train')
s.solver_mode = caffe_pb2.SolverParameter.GPU
return s
solver_path = os.path.join(dirname, 'logreg_solver.prototxt')
with open(solver_path, 'w') as f:
f.write(str(solver(train_net_path, test_net_path, sum([len(f['data']) for f in train_f]), 100)))
# PART 5: run the solver
print 'GPU used: {:d}'.format(gpu_to_use)
caffe.set_mode_gpu()
caffe.set_device(gpu_to_use)
solver = caffe.get_solver(solver_path)
solver.solve()
# PART 6: test the solver
labels = test_f['label'].value
batch_size = solver.test_nets[0].blobs['data'].num
test_iters = int(len(labels) / batch_size)
features = np.zeros((len(labels),nCategories))
for i in range(test_iters):
solver.test_nets[0].forward()
start = i*batch_size
stop = min(len(labels),start + batch_size)
features[start:stop,:] = solver.test_nets[0].blobs['prob'].data[0:(stop-start),:]
# PART 7: save results
os.remove(os.path.join(dirname, 'train.txt'))
os.remove(os.path.join(dirname, 'train.h5'))
os.remove(os.path.join(dirname, 'test.txt'))
os.remove(os.path.join(dirname, 'test.h5'))
result_file = os.path.join(dirname, 'results.mat')
top_1 = top_k(features,labels,1)
top_5 = top_k(features,labels,5)
scipy.io.savemat(result_file,{'features' : features,
'top_1' : top_1,
'top_5' : top_5})
# PART 8: report the results
stop_time = time.time()
print 'top-1: {:.3f}'.format(top_1)
print 'top-5: {:.3f}'.format(top_5)
print 'time to train and test classifier: {:.2f}s'.format(stop_time-start_time)
|
{"hexsha": "605f10445c9bf8973333b9ba12185eeae43df359", "size": 5525, "ext": "py", "lang": "Python", "max_stars_repo_path": "classification/mnlr.py", "max_stars_repo_name": "joshrule/matlab-utils", "max_stars_repo_head_hexsha": "8780791dd42d4afa3b7a3c2884ec42fae3f8face", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "classification/mnlr.py", "max_issues_repo_name": "joshrule/matlab-utils", "max_issues_repo_head_hexsha": "8780791dd42d4afa3b7a3c2884ec42fae3f8face", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "classification/mnlr.py", "max_forks_repo_name": "joshrule/matlab-utils", "max_forks_repo_head_hexsha": "8780791dd42d4afa3b7a3c2884ec42fae3f8face", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8766233766, "max_line_length": 101, "alphanum_fraction": 0.6747511312, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1526}
|
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Module for a distribution network.
"""
from dnet.configset import ConfigSet
from dnet.unionfind import UnionFind
from dnet.util import flatten, is_tree
from itertools import product
from graphillion import GraphSet
from math import sqrt
import networkx as nx
import yaml
class Graph(object):
"""Is a dual representation graph of the distribution network.
"""
def __init__(self):
self.graph = None
self.edges = []
self.roots = set()
self._switch2edge = {}
self._edge2switch = {}
self._section2vertex = {}
self._vertex2sections = {}
class Node(object):
"""Represents a ZDD node.
"""
def __init__(self, str):
n, v, l, h = str.split()
self.n = n
self.v = int(v)
self.l = l
self.h = h
class SearchSpace(object):
"""Represents a search space for the optimization.
"""
def __init__(self):
self.graph = nx.DiGraph()
self.start = None
self.end = None
class Network(object):
"""Represents a distribution network.
"""
def __init__(self, file_or_dir, format=None):
if format == 'fukui-tepco':
from dnet.converter import FukuiTepcoConverter
obj = yaml.load(FukuiTepcoConverter(file_or_dir).convert())
else:
obj = yaml.load(open(file_or_dir))
self.nodes = obj['nodes']
self.sections = obj['sections']
self.switches = obj['switches']
self._switch_set = set(self.switches) # for fast membership query
for s in self.sections.values():
l = s['load']
z = s['impedance']
s['load'] = []
s['impedance'] = []
for i in range(Network.NUM_PHASES):
s['load'].append(l[2*i] + l[2*i + 1]*1j)
s['impedance'].append(z[2*i] + z[2*i + 1]*1j)
if [l for s in self.sections.values() for l in s['load'] if l.real < 0]:
msg = 'Warning: it is assumed that section loads are non-negative'
sys.stderr.write(msg + '\n')
self._neighbor_cache = {}
self.graph = self._build_graph()
self.search_space = SearchSpace()
self._elec_feasible_configs = {}
def enumerate(self, topology_constraints_only=None, suspicious_cut=[]):
gs = self._enumerate_forests(suspicious_cut)
if not topology_constraints_only:
for root in self._get_root_sections():
if root not in self._elec_feasible_configs:
self._elec_feasible_configs[root] = self._enumerate_trees(root)
gs &= self._elec_feasible_configs[root]
return ConfigSet(self, gs)
def loss(self, config, is_optimal=False):
loss = 0
for s in self._get_root_sections():
loss += self._calc_loss(s, set(config), set())
if not is_optimal:
return loss
else:
lower_bound = 0 # theoretical lower bound in root sections
for i in range(Network.NUM_PHASES):
total_loads = 0.0
for s in self.sections:
total_loads += self.sections[s]['load'][i]
resistance_sum = 0.0
for s in self._get_root_sections():
resistance_sum += 1 / self.sections[s]['impedance'][i].real
for root in self._get_root_sections():
resistance = self.sections[root]['impedance'][i].real
current = total_loads / (resistance * resistance_sum)
lower_bound += self._do_calc_loss(current, resistance)
comp_loss = 0 # loss without root sections
for s in self._get_root_sections():
comp_loss += self._calc_loss(s, set(config), set(), no_root=True)
return loss, lower_bound + comp_loss
def optimize(self, gs):
comps = self._find_components()
self._zdd = { 'B': Node('B %d B B ' % (len(self.switches) + 1)),
'T': Node('T %d T T ' % (len(self.switches) + 1)) }
for line in gs.dumps().split('\n'):
if line.startswith('.'):
break
n = Node(line)
self._zdd[n.n] = n
self.search_space.start = n.n
entries = set([self.search_space.start])
for comp in comps:
entries = self._rebuild(entries, comp)
self.search_space.end = 'T'
path = nx.dijkstra_path(self.search_space.graph, self.search_space.start,
self.search_space.end)
closed_switches = []
for i in range(len(path) - 1):
x, y = path[i], path[i + 1]
closed_switches.extend(list(self.search_space.graph[x][y]['config']))
return sorted(list(set(closed_switches)))
def unrestorable_cuts(self, max_cut_size):
unrestorable_cuts = set()
graph = self.graph.graph
max_degree = max([graph.degree(v) for v in graph.nodes()])
configs = self.enumerate()
hitting_sets = configs.hitting().minimal()
for k in range(1, max_cut_size * (max_degree - 1) + 1):
for hitting_set in hitting_sets.len(k):
for suspicious_cut in self._cut_from_hit(hitting_set):
suspicious_cut = sorted(list(set(suspicious_cut)))
if len(suspicious_cut) <= max_cut_size and \
len(self.enumerate(suspicious_cut=suspicious_cut)) == 0:
unrestorable_cut = tuple([self._to_section(v) for v in suspicious_cut])
unrestorable_cuts.add(unrestorable_cut)
return list(unrestorable_cuts)
def _has_same_topology(self, other):
return self.nodes == other.nodes and self.switches == other.switches
def _to_edge(self, switch):
return self.graph._switch2edge[switch]
def _to_switch(self, edge):
return self.graph._edge2switch[edge]
def _to_section(self, vertex):
return self.graph._vertex2sections[vertex]
def _to_config(self, forest):
return [self._to_switch(e) for e in forest]
def _to_forest(self, config):
return [self._to_edge(s) for s in config]
def _get_root_sections(self):
root_sections = set()
for s in self.sections:
if self.sections[s]['substation']:
root_sections.add(s)
return root_sections
def _find_neighbors(self, s):
if s not in self._neighbor_cache:
neighbors = flatten([n for n in self.nodes if s in n])
self._neighbor_cache[s] = set(neighbors) - set([s])
return self._neighbor_cache[s]
def _build_tree(self, root, closed_switches, processed_elems):
branches = []
neighbors = self._find_neighbors(root) - processed_elems
if len(neighbors) == 1:
s = neighbors.pop()
assert s in self._switch_set
if s in closed_switches:
t = (self._find_neighbors(s) - set([root])).pop()
branches.append((root, t))
ps = processed_elems | set([root, s, t])
bs = self._build_tree(t, closed_switches - set([s]), ps)
branches.extend(bs)
elif len(neighbors) > 1: # junction
for s in neighbors:
assert s in self.sections, (root, neighbors, s)
branches.append((root, s))
for s in neighbors:
ps = processed_elems | set([root]) | neighbors
bs = self._build_tree(s, closed_switches.copy(), ps)
branches.extend(bs)
return branches
def _calc_current(self, root, branches):
n_phases = Network.NUM_PHASES
current = { root: [0, 0, 0] }
for branch in branches:
s, t = branch
load = self.sections[t]['load']
if t not in current:
current[t] = [0, 0, 0]
current[t] = [current[t][i] + load[i] for i in range(n_phases)]
while True:
if s not in current:
current[s] = [0, 0, 0]
current[s] = [current[s][i] + load[i] for i in range(n_phases)]
upper_branch = [b for b in branches if b[1] == s]
assert len(upper_branch) <= 1
if len(upper_branch) == 1:
s, t = upper_branch[0]
else:
break
load = self.sections[root]['load']
current[root] = [current[root][i] + load[i] for i in range(n_phases)]
return current
def _calc_loss(self, root, closed_switches, barrier, no_root=False):
branches = self._build_tree(root, closed_switches, barrier)
assert is_tree(branches), 'loop found'
sections = set([root] + flatten(branches))
current = self._calc_current(root, branches)
loss = 0.0
for s in sections:
if no_root and self.sections[s]['substation']:
continue
for i in range(Network.NUM_PHASES):
j = current[s][i]
r = self.sections[s]['impedance'][i].real
loss += self._do_calc_loss(j, r)
return loss
def _do_calc_loss(self, current, resistance):
assert not isinstance(resistance, complex)
return abs(current)**2 * resistance
def _build_graph(self):
graph = Graph()
sorted_sections = []
for s in self.switches:
ns = set()
for t in self._find_neighbors(s):
if t in self.sections:
ns.add(t)
neighbors = set()
is_root = False
for t in sorted(ns):
junctions = set([t])
for u in self._find_neighbors(t):
if u in self.sections:
junctions.add(u)
if u in self.sections and u < t:
t = u
neighbors.add(t)
if t not in sorted_sections:
sorted_sections.append(t)
v = sorted_sections.index(t) + 1
graph._section2vertex[t] = v
graph._vertex2sections[v] = tuple(junctions)
e = tuple([sorted_sections.index(t) + 1 for t in sorted(neighbors)])
assert len(e) == 2
graph.edges.append(e)
graph._switch2edge[s] = e
graph._edge2switch[e] = s
assert len(graph.edges) == len(self.switches)
for s in self.sections:
if self.sections[s]['substation']:
for t in self._find_neighbors(s):
if t < s:
s = t
graph.roots.add(sorted_sections.index(s) + 1)
assert len(graph.roots) == len(self._get_root_sections())
GraphSet.set_universe(graph.edges, traversal='as-is')
graph.graph = nx.Graph(graph.edges)
return graph
def _enumerate_forests(self, suspicious_cut):
vg = [[r] for r in self.graph.roots]
dc = {}
l = len(self.graph.graph.nodes())
for v in self.graph.graph.nodes():
if v in suspicious_cut: dc[v] = 0
elif v in self.graph.roots: dc[v] = xrange(l)
else: dc[v] = xrange(1, l)
return GraphSet.graphs(vertex_groups=vg, degree_constraints=dc,
no_loop=True)
def _find_neighbor_switches(self, s, processed_sections):
switches = set()
if s in self._switch_set:
for t in self._find_neighbors(s) - processed_sections:
assert t in self.sections
processed_sections.add(t)
for u in self._find_neighbor_switches(t, processed_sections.copy()):
switches.add(u)
else:
processed_sections.add(s)
for t in self._find_neighbors(s) - processed_sections:
if t in self._switch_set:
switches.add(t)
else:
for u in self._find_neighbor_switches(t, processed_sections.copy()):
switches.add(u)
return switches - set([s])
def _find_surrounding_switches(self, root, closed_switches):
if len(closed_switches) > 0:
switches = set()
for s in closed_switches:
for t in self._find_neighbor_switches(s, set()):
switches.add(t)
return switches - closed_switches
else:
return self._find_neighbor_switches(root, set())
def _find_border_switches(self, root):
assert self.sections[root]['substation']
border = set()
for r in self._get_root_sections() - set([root]):
for s in self._find_neighbor_switches(r, set()):
border.add(s)
return border
def _satisfies_electric_constraints(self, root, closed_switches):
branches = self._build_tree(root, closed_switches, set())
if not is_tree(branches):
return False
current = self._calc_current(root, branches)
for i in range(Network.NUM_PHASES):
if abs(current[root][i]) > Network.MAX_CURRENT:
return False
assert len(current) == len(set(flatten(branches)))
leaves = set(flatten(branches)) - set([b[0] for b in branches])
for s in leaves:
voltage_drop = []
for i in range(Network.NUM_PHASES):
j = current[s][i]
z = self.sections[s]['impedance'][i]
voltage_drop.append(j * z / 2)
bs = [b for b in branches if b[1] == s]
assert len(bs) == 1
s, t = bs[0]
while True:
for i in range(Network.NUM_PHASES):
j = current[s][i]
z = self.sections[s]['impedance'][i]
voltage_drop[i] += j * z
upper_branch = [b for b in branches if b[1] == s]
assert len(upper_branch) <= 1
if len(upper_branch) == 1:
s, t = upper_branch[0]
else:
break
v = voltage_drop
v0 = Network.SENDING_VOLTAGE
vl, vh = Network.VOLTAGE_RANGE
for i in range(Network.NUM_PHASES):
if abs(v0 - v[i]) < vl or vh < abs(v0 - v[i]):
return False
return True
def _find_trees(self, closed_switches, open_switches):
closed_switches = [self._to_edge(s) for s in closed_switches]
open_switches = [self._to_edge(s) for s in open_switches]
return GraphSet({'include': closed_switches, 'exclude': open_switches})
def _do_enumerate_trees(self, root, closed_switches, fixed_switches):
gs = GraphSet()
sur_switches = self._find_surrounding_switches(root, closed_switches)
unfixed_switches = sur_switches - fixed_switches
if len(unfixed_switches) == 0:
return gs
s = sorted(unfixed_switches)[0]
fixed_switches.add(s)
gs |= self._do_enumerate_trees(root, closed_switches.copy(), fixed_switches.copy())
closed_switches.add(s)
if self._satisfies_electric_constraints(root, closed_switches):
sur_switches = self._find_surrounding_switches(root, closed_switches)
gs |= self._find_trees(closed_switches, sur_switches)
gs |= self._do_enumerate_trees(root, closed_switches.copy(), fixed_switches.copy())
return gs
def _enumerate_trees(self, root):
gs = GraphSet()
if self._satisfies_electric_constraints(root, set()):
sur_switches = self._find_surrounding_switches(root, set())
gs = self._find_trees(set(), sur_switches)
border_switches = self._find_border_switches(root)
return gs | self._do_enumerate_trees(root, set(), border_switches)
def _find_components(self):
switches = set(self.switches)
sections = set(self.sections.keys())
roots = self._get_root_sections()
uf = UnionFind()
uf.insert_objects(switches | sections - roots)
for s in sorted(switches | sections - roots):
neighbors = set()
for n in [m for m in self.nodes if s in m]:
if [t for t in n if t in roots] == []:
for t in n:
neighbors.add(t)
for t in sorted(neighbors - set([s])):
uf.union(s, t)
i = 1
comps = {}
for s in self.switches:
c = uf.find(s)
if c not in comps:
comps[c] = (i, set())
i += 1
comps[c][1].add(s)
for t in self._find_neighbors(s):
comps[c][1].add(t)
assert sum([len(c[1]) for c in comps.values()]) == len(switches | sections - roots)
comps = [comps[c][1] for c in sorted(comps, key=lambda c: comps[c][0])]
s = None
for c in comps:
switches = [t for t in c if t in self._switch_set]
assert s is None or self.switches.index(s) < min([self.switches.index(t) for t in switches]), \
'switches must be ordered by independent components'
s = switches[0]
for t in switches:
if self.switches.index(t) > self.switches.index(s):
s = t
assert len([t for s in self.sections if s < 0
for t in self._find_neighbors(s) if t in self._switch_set]) == 0, \
'root sections must be connected to a junction, not a switch'
return comps
def _find_configs(self, n, comp, closed_switches):
n = self._zdd[n]
configs = []
if n.v > len(self.switches) or self.switches[n.v - 1] not in comp:
configs.append((closed_switches, n.n))
else:
if n.l <> 'B':
configs2 = self._find_configs(n.l, comp, closed_switches.copy())
configs.extend(configs2)
assert n.h <> 'B'
closed_switches.add(self.switches[n.v - 1])
configs.extend(self._find_configs(n.h, comp, closed_switches.copy()))
return configs
def _calc_component_loss(self, comp_roots, closed_switches):
loss = 0
for root, barrier in comp_roots:
loss += self._calc_loss(root, closed_switches, barrier)
return loss
def _rebuild(self, entries, comp):
comp_roots = []
for s in comp:
if s in self.sections:
for t in self._find_neighbors(s):
if t in self.sections and self.sections[t]['substation']:
assert not self.sections[s]['substation']
barrier = set()
for u in self._find_neighbors(s):
if u in self.sections:
barrier.add(u)
comp_roots.append((s, barrier))
break
next_entries = set()
loss_cache = {}
for n in entries:
for closed_switches, m in self._find_configs(n, comp, set()):
next_entries.add(m)
key = ','.join([str(s) for s in sorted(closed_switches)])
if key in loss_cache:
loss = loss_cache[key]
else:
loss = self._calc_component_loss(comp_roots, closed_switches)
loss_cache[key] = loss
if not(n in self.search_space.graph \
and m in self.search_space.graph[n] \
and loss > self.search_space.graph[n][m]['weight']):
self.search_space.graph.add_edge(n, m, weight=loss,
config=closed_switches)
return next_entries
def _cut_from_hit(self, hitting_set):
hitting_edge_set = [self._to_edge(sw) for sw in hitting_set]
for hitting_vertex_set in product(*hitting_edge_set):
yield hitting_vertex_set
MAX_CURRENT = 300
SENDING_VOLTAGE = 6600 / sqrt(3)
VOLTAGE_RANGE = (6300 / sqrt(3), 6900 / sqrt(3))
NUM_PHASES = 3
|
{"hexsha": "9c459b3b9bae85c4a3a9072edb40e0c9cf816be3", "size": 21443, "ext": "py", "lang": "Python", "max_stars_repo_path": "dnet/network.py", "max_stars_repo_name": "ZiiCee/dnet", "max_stars_repo_head_hexsha": "2777ef76a6f74142652ef6ca1b89f6a45f2b57f6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-02-13T14:44:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-12T02:30:16.000Z", "max_issues_repo_path": "dnet/network.py", "max_issues_repo_name": "ZiiCee/dnet", "max_issues_repo_head_hexsha": "2777ef76a6f74142652ef6ca1b89f6a45f2b57f6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dnet/network.py", "max_forks_repo_name": "ZiiCee/dnet", "max_forks_repo_head_hexsha": "2777ef76a6f74142652ef6ca1b89f6a45f2b57f6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-02-13T14:44:28.000Z", "max_forks_repo_forks_event_max_datetime": "2019-02-13T14:44:28.000Z", "avg_line_length": 39.6358595194, "max_line_length": 107, "alphanum_fraction": 0.560322716, "include": true, "reason": "import networkx", "num_tokens": 4859}
|
# stdlib imports
from collections import OrderedDict
from xml.dom import minidom
import sys
from urllib.request import urlopen
import warnings
from datetime import datetime
import os.path
import json
# third party imports
import numpy as np
import pandas as pd
from obspy.io.quakeml.core import Unpickler
from obspy.clients.fdsn import Client
from impactutils.time.ancient_time import HistoricTime
from openpyxl import load_workbook
import requests
from scipy.special import erfc, erfcinv
# local imports
from libcomcat.classes import VersionOption
# constants
CATALOG_SEARCH_TEMPLATE = 'https://earthquake.usgs.gov/fdsnws/event/1/catalogs'
CONTRIBUTORS_SEARCH_TEMPLATE = 'https://earthquake.usgs.gov/fdsnws/event/1/contributors'
TIMEOUT = 60
TIMEFMT1 = '%Y-%m-%dT%H:%M:%S'
TIMEFMT2 = '%Y-%m-%dT%H:%M:%S.%f'
DATEFMT = '%Y-%m-%d'
COUNTRYFILE = 'ne_10m_admin_0_countries.shp'
# where is the PAGER fatality model found?
FATALITY_URL = 'https://raw.githubusercontent.com/usgs/pager/master/losspager/data/fatality.xml'
ECONOMIC_URL = 'https://raw.githubusercontent.com/usgs/pager/master/losspager/data/economy.xml'
def get_phase_dataframe(detail, catalog='preferred'):
"""Return a Pandas DataFrame consisting of Phase arrival data.
Args:
detail (DetailEvent): DetailEvent object.
catalog (str): Source network ('us','ak', etc. ,or 'preferred'.)
Returns:
DataFrame: Pandas DataFrame containing columns:
- Channel: Network.Station.Channel.Location (NSCL) style station
description. ("--" indicates missing information)
- Distance: Distance (kilometers) from epicenter to station.
- Azimuth: Azimuth (degrees) from epicenter to station.
- Phase: Name of the phase (Pn,Pg, etc.)
- Arrival Time: Pick arrival time (UTC).
- Status: "manual" or "automatic".
- Residual: Arrival time residual.
- Weight: Arrival weight.
- Agency: Agency ID.
Raises:
AttributeError: If input DetailEvent does not have a phase-data product
for the input catalog.
"""
if catalog is None:
catalog = 'preferred'
df = pd.DataFrame(columns=['Channel', 'Distance', 'Azimuth',
'Phase', 'Arrival Time', 'Status',
'Residual', 'Weight', 'Agency'])
phasedata = detail.getProducts('phase-data', source=catalog)[0]
quakeurl = phasedata.getContentURL('quakeml.xml')
try:
fh = urlopen(quakeurl, timeout=TIMEOUT)
data = fh.read()
fh.close()
except Exception:
return None
unpickler = Unpickler()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
catalog = unpickler.loads(data)
catevent = catalog.events[0]
for pick in catevent.picks:
phaserow = _get_phaserow(pick, catevent)
if phaserow is None:
continue
df = df.append(phaserow, ignore_index=True)
return df
def _get_phaserow(pick, catevent):
"""Return a dictionary containing Phase data matching that found on ComCat event page.
Example: https://earthquake.usgs.gov/earthquakes/eventpage/us2000ahv0#origin
(Click on the Phases tab).
Args:
pick (Pick): Obspy Catalog Pick object.
catevent (Event): Obspy Catalog Event object.
Returns:
dict: Containing fields:
- Channel: NSCL-style channel string.
- Distance: Distance (km) from station to origin.
- Azimuth: Azimuth (deg.) from epicenter to station.
- Phase: Name of the phase (Pn,Pg, etc.)
- Arrival Time: Pick arrival time (UTC).
- Status: "manual" or "automatic".
- Residual: Arrival time residual.
- Weight: Arrival weight.
- Agency: Agency ID.
"""
pick_id = pick.resource_id
waveform_id = pick.waveform_id
arrival = get_arrival(catevent, pick_id)
if arrival is None:
return None
# save info to row of dataframe
etime = pick.time.datetime
channel = stringify(waveform_id)
row = {'Channel': channel,
'Distance': arrival.distance,
'Azimuth': arrival.azimuth,
'Phase': arrival.phase,
'Arrival Time': etime,
'Status': pick.evaluation_mode,
'Residual': arrival.time_residual,
'Weight': arrival.time_weight,
'Agency': arrival.creation_info.agency_id}
return row
def stringify(waveform):
"""Turn waveform object into NSCL-style station code
Args:
waveform (Waveform): Obspy Catalog Waveform object.
Returns:
str: NSCL- style string representation of waveform object.
"""
fmt = '%s.%s.%s.%s'
network = '--'
if waveform.network_code is not None:
network = waveform.network_code
station = '--'
if waveform.station_code is not None:
station = waveform.station_code
channel = '--'
if waveform.channel_code is not None:
channel = waveform.channel_code
location = '--'
if waveform.location_code is not None:
location = waveform.location_code
tpl = (network, station, channel, location)
return fmt % tpl
def get_arrival(event, pickid):
"""Find the arrival object in a Catalog Event corresponding to input pick id.
Args:
event (Event): Obspy Catalog Event object.
pickid (str): Pick ID string.
Returns:
Arrival: Obspy Catalog arrival object.
"""
for origin in event.origins:
idlist = [arr.pick_id for arr in origin.arrivals]
if pickid not in idlist:
continue
idx = idlist.index(pickid)
arrival = origin.arrivals[idx]
return arrival
if pickid is None:
return None
def get_magnitude_data_frame(detail, catalog, magtype):
"""Return a Pandas DataFrame consisting of magnitude data.
Args:
detail (DetailEvent): DetailEvent object.
catalog (str): Source catalog ('us','ak', etc. ,or 'preferred'.)
magtype (str): Magnitude type (mb, ml, etc.)
Returns:
DataFrame: Pandas DataFrame containing columns:
- Channel: Network.Station.Channel.Location (NSCL) style station
description. ("--" indicates missing information)
- Type: Magnitude type.
- Amplitude: Amplitude of seismic wave at each station (m).
- Period: Period of seismic wave at each station (s).
- Status: "manual" or "automatic".
- Magnitude: Locally determined magnitude.
- Weight: Magnitude weight.
Raises:
AttributeError if input DetailEvent does not have a phase-data product
for the input catalog.
"""
columns = columns = ['Channel', 'Type', 'Amplitude',
'Period', 'Status', 'Magnitude',
'Weight']
df = pd.DataFrame()
phasedata = detail.getProducts('phase-data', source=catalog)[0]
quakeurl = phasedata.getContentURL('quakeml.xml')
try:
fh = urlopen(quakeurl, timeout=TIMEOUT)
data = fh.read()
fh.close()
except Exception:
return None
unpickler = Unpickler()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
catalog = unpickler.loads(data)
catevent = catalog.events[0] # match this to input catalog
for magnitude in catevent.magnitudes:
if magnitude.magnitude_type != magtype:
continue
for contribution in magnitude.station_magnitude_contributions:
row = {}
smag = contribution.station_magnitude_id.get_referred_object()
ampid = smag.amplitude_id
amp = ampid.get_referred_object()
waveid = amp.waveform_id
fmt = '%s.%s.%s.%s'
tpl = (waveid.network_code,
waveid.station_code,
waveid.channel_code,
waveid.location_code)
row['Channel'] = fmt % tpl
row['Type'] = smag.station_magnitude_type
row['Amplitude'] = amp.generic_amplitude
row['Period'] = amp.period
row['Status'] = amp.evaluation_mode
row['Magnitude'] = smag.mag
row['Weight'] = contribution.weight
df = df.append(row, ignore_index=True)
df = df[columns]
return df
def get_detail_data_frame(events, get_all_magnitudes=False,
get_tensors='preferred',
get_focals='preferred',
get_moment_supplement=False,
verbose=False):
"""Extract the detailed event informat into a pandas DataFrame.
Usage:
TODO
Args:
events (list): List of SummaryEvent objects as returned by search() function.
get_all_magnitudes (bool): Boolean indicating whether to return all
magnitudes in results for each event.
get_tensors (str): String option of 'none', 'preferred', or 'all'.
get_focals (str): String option of 'none', 'preferred', or 'all'.
get_moment_supplement (bool): Indicates whether derived origin and
double-couple/source time information
should be extracted (when available.)
Returns:
DataFrame: Pandas DataFrame with one row per event, and all
relevant information in columns.
"""
elist = []
ic = 0
inc = min(100, np.power(10, np.floor(np.log10(len(events))) - 1))
if verbose:
sys.stderr.write(
'Getting detailed event info - reporting every %i events.\n' % inc)
for event in events:
try:
detail = event.getDetailEvent()
except Exception as e:
print('Failed to get detailed version of event %s' % event.id)
continue
edict = detail.toDict(get_all_magnitudes=get_all_magnitudes,
get_tensors=get_tensors,
get_moment_supplement=get_moment_supplement,
get_focals=get_focals)
elist.append(edict)
if ic % inc == 0 and verbose:
msg = 'Getting detailed information for %s, %i of %i events.\n'
sys.stderr.write(msg % (event.id, ic, len(events)))
ic += 1
df = pd.DataFrame(elist)
first_columns = ['id', 'time', 'latitude',
'longitude', 'depth', 'magnitude']
all_columns = df.columns
rem_columns = [col for col in all_columns if col not in first_columns]
new_columns = first_columns + rem_columns
df = df[new_columns]
return df
def get_summary_data_frame(events):
"""Take the results of a search and extract the summary event informat in a pandas DataFrame.
Usage:
TODO
Args:
events (list): List of SummaryEvent objects as returned by search()
function.
Returns:
DataFrame: Pandas DataFrame with one row per event, and columns:
- id (string) Authoritative ComCat event ID.
- time (datetime) Authoritative event origin time.
- latitude (float) Authoritative event latitude.
- longitude (float) Authoritative event longitude.
- depth (float) Authoritative event depth.
- magnitude (float) Authoritative event magnitude.
"""
elist = []
for event in events:
elist.append(event.toDict())
df = pd.DataFrame(elist)
return df
def get_pager_data_frame(detail, get_losses=False,
get_country_exposures=False,
get_all_versions=False):
"""Extract PAGER results for an event as a DataFrame.
Args:
detail (DetailEvent): Detailed information for a given event.
get_losses (bool): Indicates whether to retrieve predicted fatalities
and dollar losses and uncertainties.
get_country_exposures (bool): Indicates whether to retrieve per-country
shaking exposures.
get_all_versions (bool): Indicates whether to retrieve PAGER results for
all versions.
Returns:
(DataFrame): DataFrame whose columns will vary depending on input:
(all):
id - ComCat Event ID
location - Location string for event.
time - Date/time of event.
latitude - Event latitude (dd)
longitude - Event longitude (dd)
depth - Event depth (km)
magnitude - Event magnitude.
mmi1 - Estimated population exposed to shaking at MMI intensity 1.
...
mmi10 - Estimated population exposed to shaking at MMI intensity 10.
"""
default_columns = ['id', 'location', 'time',
'latitude', 'longitude',
'depth', 'magnitude', 'country',
'pager_version',
'mmi1', 'mmi2',
'mmi3', 'mmi4',
'mmi5', 'mmi6',
'mmi7', 'mmi8',
'mmi9', 'mmi10']
if not detail.hasProduct('losspager'):
return None
df = None
for pager in detail.getProducts('losspager', version=VersionOption.ALL):
total_row = {}
default = {}
default['id'] = detail.id
default['location'] = detail.location
lat = detail.latitude
lon = detail.longitude
default['time'] = detail.time
default['latitude'] = lat
default['longitude'] = lon
default['depth'] = detail.depth
default['magnitude'] = detail.magnitude
default['pager_version'] = pager.version
total_row.update(default)
total_row['country'] = 'Total'
if len(pager.getContentsMatching('exposures.json')):
total_row, country_rows = _get_json_exposure(total_row,
pager,
get_country_exposures,
default)
if get_losses:
loss_json = pager.getContentBytes(
'losses.json')[0].decode('utf-8')
jdict = json.loads(loss_json)
empfat = jdict['empirical_fatality']
# get the list of country codes
ccodes = [cfat['country_code']
for cfat in empfat['country_fatalities']]
gfat, geco = get_g_values(ccodes)
# get the total fatalities
total_row['predicted_fatalities'] = empfat['total_fatalities']
gfat_total, geco_total = _get_total_g(pager)
# get the Gs/sigmas for total fatality
fat_sigma = get_sigma(empfat['total_fatalities'], gfat_total)
total_row['fatality_sigma'] = fat_sigma
# get the total economic losses
emploss = jdict['empirical_economic']
total_row['predicted_dollars'] = emploss['total_dollars']
# get the Gs/sigmas for total dollars
eco_sigma = get_sigma(emploss['total_dollars'], geco_total)
total_row['dollars_sigma'] = eco_sigma
if get_country_exposures:
for country_fat in empfat['country_fatalities']:
fat = country_fat['fatalities']
ccode = country_fat['country_code']
# in at least one case (not sure why) PAGER results
# have fatalities per country but not exposures.
if ccode not in country_rows:
country_rows[ccode] = {}
country_rows[ccode].update(default)
country_rows[ccode]['country'] = ccode
country_rows[ccode]['mmi1'] = np.nan
country_rows[ccode]['mmi2'] = np.nan
country_rows[ccode]['mmi3'] = np.nan
country_rows[ccode]['mmi4'] = np.nan
country_rows[ccode]['mmi5'] = np.nan
country_rows[ccode]['mmi6'] = np.nan
country_rows[ccode]['mmi7'] = np.nan
country_rows[ccode]['mmi8'] = np.nan
country_rows[ccode]['mmi9'] = np.nan
country_rows[ccode]['mmi10'] = np.nan
country_rows[ccode]['predicted_fatalities'] = fat
gvalue = gfat[ccode]
country_rows[ccode]['fatality_sigma'] = get_sigma(
fat, gvalue)
for country_eco in emploss['country_dollars']:
eco = country_eco['us_dollars']
ccode = country_eco['country_code']
country_rows[ccode]['predicted_dollars'] = eco
gvalue = gfat[ccode]
country_rows[ccode]['dollars_sigma'] = get_sigma(
eco, gvalue)
else: # event does not have JSON content
country_rows = {}
total_row = _get_xml_exposure(total_row, pager, get_losses)
columns = default_columns
if get_losses:
columns = default_columns + ['predicted_fatalities',
'fatality_sigma',
'predicted_dollars',
'dollars_sigma']
if df is None:
df = pd.DataFrame(columns=columns)
df = df.append(total_row, ignore_index=True)
for ccode, country_row in country_rows.items():
df = df.append(country_row, ignore_index=True)
df = df[columns]
# countries with zero fatalities don't report, so fill in with zeros
if get_losses:
df['predicted_fatalities'] = df['predicted_fatalities'].fillna(value=0)
df['fatality_sigma'] = df['fatality_sigma'].fillna(value=0)
df['predicted_dollars'] = df['predicted_dollars'].fillna(value=0)
df['dollars_sigma'] = df['dollars_sigma'].fillna(value=0)
return df
def _invphi(input):
"""Inverse phi function.
Args:
input (float or ndarray): Float (scalar or array) value.
Returns:
float: invphi(input)
"""
return -1 * np.sqrt(2) * erfcinv(input/0.5)
def _get_total_g(pager):
"""Retrieve the G norm value for the aggregated losses.
Args:
pager (Product): PAGER ComCat Product.
Returns:
tuple: (Aggregated Fatality G value, Aggregated Economic G value)
"""
alert_json = pager.getContentBytes(
'alerts.json')[0].decode('utf-8')
jdict = json.loads(alert_json)
gfat = jdict['fatality']['gvalue']
geco = jdict['economic']['gvalue']
return (gfat, geco)
def _get_xml_exposure(total_row, pager, get_losses):
"""Retrieve aggregated exposure from events prior to new PAGER release.
Args:
total_row (dict): Dictionary to be filled in with exposures.
pager (Product): PAGER ComCat Product.
get_losses (bool): If losses are desired, fill in values with NaN.
Returns:
dict: Filled in total_row.
"""
exposure_xml = pager.getContentBytes('pager.xml')[0].decode('utf-8')
root = minidom.parseString(exposure_xml)
pager = root.getElementsByTagName('pager')[0]
if get_losses:
total_row['predicted_fatalities'] = np.nan
total_row['predicted_dollars'] = np.nan
for node in pager.childNodes:
if node.localName != 'exposure':
continue
mmistr = 'mmi%i' % (int(float(node.getAttribute('dmax'))))
total_row[mmistr] = int(node.getAttribute('exposure'))
total_row['ccode'] = 'Total'
root.unlink()
return total_row
def _get_json_exposure(total_row, pager, get_country_exposures, default):
"""Retrieve aggregated/country exposures from events after new PAGER release.
Args:
total_row (dict): Dictionary to be filled in with exposures.
pager (Product): PAGER ComCat Product.
get_country_exposures (bool): Extract exposures for each affected country.
Returns:
tuple: (total_row, country_rows)
"""
exposure_json = pager.getContentBytes('exposures.json')[0].decode('utf-8')
jdict = json.loads(exposure_json)
exp = jdict['population_exposure']['aggregated_exposure']
total_row['mmi1'] = exp[0]
total_row['mmi2'] = exp[1]
total_row['mmi3'] = exp[2]
total_row['mmi4'] = exp[3]
total_row['mmi5'] = exp[4]
total_row['mmi6'] = exp[5]
total_row['mmi7'] = exp[6]
total_row['mmi8'] = exp[7]
total_row['mmi9'] = exp[8]
total_row['mmi10'] = exp[9]
country_rows = {}
if get_country_exposures:
for country in jdict['population_exposure']['country_exposures']:
country_row = {}
ccode = country['country_code']
country_row.update(default)
country_row['country'] = ccode
exp = country['exposure']
country_row['mmi1'] = exp[0]
country_row['mmi2'] = exp[1]
country_row['mmi3'] = exp[2]
country_row['mmi4'] = exp[3]
country_row['mmi5'] = exp[4]
country_row['mmi6'] = exp[5]
country_row['mmi7'] = exp[6]
country_row['mmi8'] = exp[7]
country_row['mmi9'] = exp[8]
country_row['mmi10'] = exp[9]
country_rows[ccode] = country_row
return (total_row, country_rows)
def get_sigma(loss, gvalue):
"""Calculate sigma value for a given loss value and G statistic.
Args:
loss (float): Fatality or economic loss value.
gvalue (float): G statistic for model.
Returns:
float: One sigma value.
"""
if loss == 0:
loss = 0.5
percent = 0.6827
prob = round(np.exp(gvalue * _invphi(percent) + np.log(loss)))
return prob
def get_g_values(ccodes):
"""Retrieve G values for given country codes from PAGER repository.
Args:
ccodes (list): Sequence of two-letter country codes.
Returns:
tuple: (Dictionary of fatality G values, Dictionary of economic G values)
"""
res = requests.get(FATALITY_URL)
root = minidom.parseString(res.text)
res.close()
models = root.getElementsByTagName(
'models')[0].getElementsByTagName('model')
fatmodels = {}
for model in models:
ccode = model.getAttribute('ccode')
if ccode in ccodes:
fatmodels[ccode] = float(model.getAttribute('evalnormvalue'))
root.unlink()
res = requests.get(ECONOMIC_URL)
root = minidom.parseString(res.text)
models = root.getElementsByTagName(
'models')[0].getElementsByTagName('model')
ecomodels = {}
for model in models:
ccode = model.getAttribute('ccode')
if ccode in ccodes:
ecomodels[ccode] = float(model.getAttribute('evalnormvalue'))
root.unlink()
return (fatmodels, ecomodels)
def get_impact_data_frame(detail, effect_types=None, loss_types=None,
loss_extents=None, all_sources=False, include_contributing=False,
source='preferred', version=VersionOption.PREFERRED):
"""Return a Pandas DataFrame consisting of impact data.
Args:
detail (DetailEvent): DetailEvent object.
effect_types (list): List of requested effect types. Default is None.
loss_types (list): List of requested loss types. Default is None.
loss_extents (list): List of requested loss extents. Default is None.
all_sources (bool): Include all sources including those that are
not the most recent or authoritative. Default is False.
include_contributing (bool): Include contributing features, not
just the total summary. Default is False.
source (str): Default is 'preferred'. Can be any one of:
- 'preferred' Get version(s) of products from preferred source.
- 'all' Get version(s) of products from all sources.
- Any valid source network for this type of product ('us','ak',etc.)
version (VersionOption): Product version. Default is VersionOption.PREFERRED.
Returns:
dataframe: Dataframe of the impact information.
Raises:
Exception: If the impact.json file cannot be read. Likely do to one
not existing.
"""
# Define spreadsheet columns and equivalent geojson keys
columns = ['Source Network', 'ID', 'EventID', 'Time',
'Magnitude', 'EffectType', 'LossType',
'LossExtent', 'LossValue', 'LossMin',
'LossMax', 'CollectionTime',
'CollectionAuthor', 'CollectionSource',
'Authoritative', 'Lat', 'Lon',
'LossQuantifier', 'Comment']
geojson_equivalent = {'EffectType': 'effect-type',
'LossType': 'loss-type',
'LossExtent': 'loss-extent',
'LossValue': 'loss-value',
'LossMin': 'loss-min',
'LossMax': 'loss-max',
'CollectionTime': 'collection-time',
'CollectionAuthor': 'collection-author',
'CollectionSource': 'collection-source',
'Authoritative': 'authoritative',
'LossQuantifier': 'loss-quantifier',
'Comment': 'comment'
}
# Define valid parameters
valid_effects = ['all', 'coal bump', 'dam failure', 'faulting', 'fire',
'geyser activity', 'ground cracking', 'landslide', 'lights', 'liquefaction',
'mine blast', 'mine collapse', 'odors', 'other', 'rockburst',
'sandblows', 'seiche', 'shaking', 'subsidence', 'tsunami',
'undifferentiated', 'uplift', 'volcanic activity']
valid_loss_extents = ['damaged', 'damaged or destroyed', 'destroyed',
'displaced', 'injured', 'killed', 'missing']
valid_loss_types = ['bridges', 'buildings', 'dollars', 'electricity',
'livestock', 'people', 'railroads', 'roads', 'telecommunications','water']
# Convert arguments to lists
if isinstance(effect_types, str):
effect_types = [effect_types]
if isinstance(loss_types, str):
loss_types = [loss_types]
if isinstance(loss_extents, str):
loss_extents = [loss_extents]
# Set defaults if no user input and validate options
if effect_types is None:
effect_types = valid_effects
else:
for effect in effect_types:
if effect not in valid_effects:
raise Exception('%r is not a valid effect type.' % effect)
if loss_types is None:
loss_types = valid_loss_types
loss_types += ['']
else:
for loss in valid_loss_types:
if loss not in valid_loss_types:
raise Exception('%r is not a valid loss type.' % loss)
if loss_extents is None:
loss_extents = valid_loss_extents
loss_extents += ['']
else:
for extent in loss_extents:
if extent not in valid_loss_extents:
raise Exception('%r is not a valid loss extent.' % extent)
# Get the product(s)
impacts = detail.getProducts('impact', source=source, version=version)
table = OrderedDict()
for col in columns:
table[col] =[]
# Each product append to the OrderedDict
for impact in impacts:
# Attempt to read the json file
impact_url = impact.getContentURL('impact.json')
# Look for previous naming scheme
if impact_url is None:
impact_url = impact.getContentURL('.geojson')
try:
fh = urlopen(impact_url, timeout=TIMEOUT)
file_text = fh.read().decode("utf-8")
impact_data = json.loads(file_text)
fh.close()
except Exception as e:
raise Exception('Unable to read impact.json for %s '
'which includes the file(s): %r' % (impact, impact.contents))
features = impact_data['features']
main_properties = {}
# Get total feature lines
for feature in features:
# Impact-totals denotes the summary/total feature
# This only considers summary/total features
if 'impact-totals' in feature['properties']:
main_properties['Time'] = feature['properties']['time']
main_properties['ID'] = feature['properties']['id']
main_properties['Source Network'] = feature['properties']['eventsource']
main_properties['EventID'] = main_properties['Source Network'] + main_properties['ID']
main_properties['Magnitude'] = feature['properties']['magnitude']
for impact_total in feature['properties']['impact-totals']:
# Ensure that the "row" is valid
for column in columns:
# for totals the lat/lon fields are always empty
if column == 'Lat':
table['Lat'] += ['']
elif column == 'Lon':
table['Lon'] += ['']
elif column not in geojson_equivalent:
table[column] += [main_properties[column]]
else:
key = geojson_equivalent[column]
if key in impact_total:
table[column] += [impact_total[key]]
else:
table[column] += ['']
break
features.remove(feature)
# Get contributing feature lines
if include_contributing:
for feature in features:
# Ensure that the "row" is valid
for column in columns:
if column == 'Lat':
lat = feature['geometry']['coordinates'][1]
table['Lat'] += [lat]
elif column == 'Lon':
lon = feature['geometry']['coordinates'][0]
table['Lon'] += [lon]
elif column not in geojson_equivalent:
table[column] += [main_properties[column]]
else:
key = geojson_equivalent[column]
if key in feature['properties']:
table[column] += [feature['properties'][key]]
else:
table[column] += ['']
# Create the dataframe
df = pd.DataFrame.from_dict(table)
df = df[(df.LossExtent.isin(loss_extents))]
df = df[(df.LossType.isin(loss_types))]
df = df[(df.EffectType.isin(effect_types))]
# Get most recent sources
if not all_sources:
df = df[(df.Authoritative == 1)]
if not all_sources and len(df) > 1:
df = _get_most_recent(df, effect_types, loss_extents, loss_types)
return df
def _get_most_recent(df, effect_types, loss_extents, loss_types):
"""Get the most recent (most "trusted") source.
Args:
effect_types (list): List of requested effect types.
loss_types (list): List of requested loss types.
loss_extents (list): List of requested loss extents.
Returns:
dataframe: Dataframe without older sources.
"""
drop_list = []
for effect in effect_types:
for loss in loss_types:
for extent in loss_extents:
boolean_df = df[(df.EffectType == effect) & (df.LossType == loss) & (df.LossExtent == extent)]
if len(boolean_df) > 0:
max_date = max(boolean_df['CollectionTime'])
idx = df.index[(df.EffectType == effect) & (df.LossType == loss) & (df.LossExtent == extent) & (df.CollectionTime != max_date)].tolist()
drop_list += idx
df = df.drop(set(drop_list))
return df
|
{"hexsha": "eae627e1b33a0a6378837947c05a364fb23acd01", "size": 32916, "ext": "py", "lang": "Python", "max_stars_repo_path": "libcomcat/dataframes.py", "max_stars_repo_name": "NLTGit/libcomcat", "max_stars_repo_head_hexsha": "82b5517bd7e5461a8effd6e50b18ada05a912b71", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "libcomcat/dataframes.py", "max_issues_repo_name": "NLTGit/libcomcat", "max_issues_repo_head_hexsha": "82b5517bd7e5461a8effd6e50b18ada05a912b71", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libcomcat/dataframes.py", "max_forks_repo_name": "NLTGit/libcomcat", "max_forks_repo_head_hexsha": "82b5517bd7e5461a8effd6e50b18ada05a912b71", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.4373464373, "max_line_length": 156, "alphanum_fraction": 0.5728521084, "include": true, "reason": "import numpy,from scipy", "num_tokens": 7124}
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 27 19:14:01 2021
Figure 3.2: The comparison of the shrinkage parameter, MSE and Riemannian dist-
ance for Ledoit-Wolf shrinkage and grid search.
"""
from matplotlib import pyplot as plt
import numpy as np
from pyriemann.utils.distance import distance_riemann
from utilities import estimate_covariance, generate_covariance, MSE, shrinkage_regularization
plt.rcParams['legend.title_fontsize'] = 'x-large'
plt.rc('xtick', labelsize='x-large')
plt.rc('ytick', labelsize='x-large')
def ledoit_wolf_parameter(x):
"""
Calculate the shrinkage parameter according
to the Ledoit-Wolf shrinkage technique.
Parameters
----------
x : ndarray of shape (n_samples, n_channels)
Input array.
Returns
-------
float
The shrinkage parameter; the range of the return value is [0,1].
"""
_, shrinkage = shrinkage_regularization(x, location = 'tangent space', z_score = True)
return shrinkage
def grid_search_parameter(x, target):
"""
Wrapper function for grid search.
Parameters
----------
x : ndarray of shape (n_samples, n_channels)
Input array.
target : ndarray of shape (n_channels, n_channels)
The true covariance matrix (population).
Returns
-------
float
The parameter that minimizes the metric.
"""
return grid_search(MSE, 0.01, estimate_covariance(x), target)
def grid_search(metric, stepsize, estimator, target):
"""
Find the parameter that minimizes the metric given the estimator
and the target.
Parameters
----------
metric : function
The metric that is used for the grid search.
stepsize : float
The stepsize in the grid.
estimator : ndarray of shape (n_channels, n_channels)
The estimator of the covariance matrix (sample).
target : ndarray of shape (n_channels, n_channels)
The true covariance matrix (population).
Returns
-------
float
The parameter that minimizes the metric.
"""
grid = np.linspace(0,1,int(1/stepsize + 1))
grid_parameter = np.argmin([metric(shrink(estimator, param), target) for param in grid])
return grid_parameter*stepsize
def shrink(scm, rho):
"""
Calculate the improved estimator of the population covariance matrix
according to the Ledoit-Wolf shrinkage technique.
Parameters
----------
scm : ndarray of shape (n_channels, n_channels)
The estimated sample covariance matrix.
rho : int
The shrinkage parameter.
Returns
-------
ndarray of shape (n_channels, n_channels)
The improved estimator.
"""
d = scm.shape[0]
v = np.trace(scm)/d
return (1-rho)*scm + rho*v*np.identity(d)
# Data generation
def generate_epochs(n_epochs, n_samples, n_channels, target):
"""
Samples |n_epochs| epochs according to a multivariate normal
distribution with a mean of 0.
Parameters
----------
n_epochs : int
The number of epochs that are sampled.
n_samples : int
The number of samples (timepoints) per epoch.
n_channels : int
The number of channels per epoch.
target : ndarray of shape (n_channels, n_channels)
The generated population covariance matrix.
Returns
-------
iteration : [ndarray of shape (n_samples, n_channels)]
A list with the simulated data.
"""
mean = np.zeros(n_channels)
iteration = [np.random.multivariate_normal(mean, target, n_samples) for i in range(n_epochs)]
return iteration
# Observation generation
def get_summary_statistics(technique, iteration, **kwargs):
"""
Calculate the mean and the Standard Error of the Mean (SEM) of the
given technique over a single iteration.
Parameters
----------
technique : function
The technique for which the summary statistics can be calculated.
iteration : [ndarray of shape (n_samples, n_channels)]
A list with the simulated data.
**kwargs : unknown
The arguments of the technique, apart from the data.
Returns
-------
mean : float
The mean value over the iteration given the technique.
sem : float
The SEM over the iteration given the technique.
"""
parameter_values = [technique(epoch, **kwargs) for epoch in iteration]
return np.mean(parameter_values), np.std(parameter_values)/np.sqrt(len(iteration))
def get_summary_statistics_metric(metric, iterations):
"""
Calculate the mean and the Standard Error of the Mean (SEM) given the
metric of the Ledoit-Wolf shrinkage technique, the original estimator
and the grid search estimator over a single iteration. Covariance
matrix estimation includes z-scoring.
Parameters
----------
metric : function
The metric for which the summary statistics can be calculated.
iteration : [ndarray of shape (n_samples, n_channels)]
A list with the simulated data.
Returns
-------
mean_lw : float
The mean over the iteration given the Ledoit_Wolf shrinkage technique
and the metric.
mean_grid : float
The mean over the iteration given the grid search shrinkage
and the metric.
mean_scm : float
The mean over the iteration given the sample covariance matrix
and the metric.
sem_lw : float
The SEM over the iteration given the Ledoit_Wolf shrinkage technique
and the metric.
sem_grid : float
The SEM over the iteration given the grid search shrinkage
and the metric.
sem_scm : float
The SEM over the iteration given the sample covariance matrix
and the metric.
"""
mean_lw = np.zeros(len(iterations))
mean_grid = np.zeros(len(iterations))
mean_scm = np.zeros(len(iterations))
sem_lw = np.zeros(len(iterations))
sem_grid = np.zeros(len(iterations))
sem_scm = np.zeros(len(iterations))
for i in range(len(iterations)):
scores_lw = np.zeros(len(iterations[i]))
scores_grid = np.zeros(len(iterations[i]))
scores_scm = np.zeros(len(iterations[i]))
for epoch in range(len(iterations[i])):
data = iterations[i][epoch]
scm = estimate_covariance(data)
lw_matrix, rho = shrinkage_regularization(data.T, location = 'tangent space', z_score = True)
scores_lw[epoch] = metric(lw_matrix, targets[i])
grid_matrix = shrink(scm, grid_search_parameter(data, targets[i]))
scores_grid[epoch] = metric(grid_matrix, targets[i])
scores_scm[epoch] = metric(scm, targets[i])
mean_lw[i] = np.mean(scores_lw)
sem_lw[i] = np.std(scores_lw)/np.sqrt(len(iterations[i]))
mean_grid[i] = np.mean(scores_grid)
sem_grid[i] = np.std(scores_grid)/np.sqrt(len(iterations[i]))
mean_scm[i] = np.mean(scores_scm)
sem_scm[i] = np.std(scores_scm)/np.sqrt(len(iterations[i]))
return mean_lw, mean_grid, mean_scm, sem_lw, sem_grid, sem_scm
# Set hyperparameters
np.random.seed(42)
n_iterations = 20
n_epochs = 100
n_channels = 31
n_samples = 100
# Generate the data
targets = [generate_covariance(n_channels) for i in range(n_iterations)]
iterations = [generate_epochs(n_epochs, n_samples, n_channels, t) for t in targets]
# Calculate the summary statistics for the shrinkage parameter values
summary_statistics_lw = [get_summary_statistics(ledoit_wolf_parameter, i) for i in iterations]
summary_statistics_grid = [get_summary_statistics(grid_search_parameter, iterations[i], target = targets[i]) for i in range(len(iterations))]
# Unwrap the lists
mean_lw, sem_lw = zip(*summary_statistics_lw)
mean_grid, sem_grid = zip(*summary_statistics_grid)
# Plot the observations
plt.figure(figsize=(20, 4), dpi = 200)
width = 0.4
plt.bar(np.linspace(1,len(iterations), 20) - width/2, mean_lw, yerr = sem_lw, label = 'Ledoit-Wolf', capsize = 3, width = width)
plt.bar(np.linspace(1,len(iterations), 20) + width/2, mean_grid, yerr = sem_grid, label = 'Shrinkage parameter with minimum MSE', capsize = 3, width = width)
plt.ylim(0,max([max(mean_lw), max(mean_grid)]) * 1.4)
plt.legend(title = 'Method', loc = 'upper right', fontsize = 'x-large')
plt.ylabel('Mean shrinkage parameter value', fontsize = 'xx-large')
plt.xlabel('Target covariance index', fontsize = 'xx-large')
plt.title('The mean shrinkage parameter per simulated target covariance matrix\n{} features & {} samples'.format(n_channels, n_samples), fontsize = 'xx-large')
plt.show()
# Calculate the summary statstics for the MSE
mean_lw, mean_grid, mean_scm , sem_lw, sem_grid, sem_scm = get_summary_statistics_metric(MSE, iterations)
# Plot the observations
plt.figure(figsize=(20, 4), dpi = 200)
width = 0.3
plt.bar(np.linspace(1,len(iterations), 20), mean_lw, yerr = sem_lw, label = 'Ledoit-Wolf', capsize = 2, width = width, align='center')
plt.bar(np.linspace(1,len(iterations), 20)- width, mean_scm, yerr = sem_scm, label = 'Sample covariance matrix', capsize = 2, width = width, align='center', color='green')
plt.bar(np.linspace(1,len(iterations), 20)+ width, mean_grid, yerr = sem_grid, label = 'Shrinkage parameter with minimum MSE', capsize = 2, width = width,align='center')
plt.ylim(0,max([max(mean_lw), max(mean_grid), max(mean_scm)]) * 1.4)
plt.legend(title = 'Method',loc = 'upper right', fontsize = 'x-large')
plt.ylabel('Mean Squared Error', fontsize = 'xx-large')
plt.xlabel('Target covariance index', fontsize = 'xx-large')
plt.title('The mean MSE per simulated target covariance matrix\n{} features & {} samples'.format(n_channels, n_samples), fontsize = 'xx-large')
plt.show()
# Calculate the summary statstics for the Riemannian distance
mean_lw, mean_grid, mean_scm , sem_lw, sem_grid, sem_scm = get_summary_statistics_metric(distance_riemann, iterations)
# Plot the observations
plt.figure(figsize=(20, 4), dpi = 200)
width = 0.3
plt.bar(np.linspace(1,len(iterations), 20), mean_lw, yerr = sem_lw, label = 'Ledoit-Wolf', capsize = 2, width = width, align='center')
plt.bar(np.linspace(1,len(iterations), 20)- width, mean_scm, yerr = sem_scm, label = 'Sample covariance matrix', capsize = 2, width = width, align='center', color='green')
plt.bar(np.linspace(1,len(iterations), 20)+ width, mean_grid, yerr = sem_grid, label = 'Shrinkage parameter with minimum MSE', capsize = 2, width = width,align='center')
plt.ylim(0,max([max(mean_lw), max(mean_grid), max(mean_scm)]) * 1.1)
plt.legend(title = 'Method', loc = 'upper right', fontsize = 'x-large')
plt.ylabel('Riemannian distance', fontsize = 'xx-large')
plt.xlabel('Target covariance index', fontsize = 'xx-large')
plt.title('The mean Riemannian distance per simulated target covariance matrix\n{} features & {} samples'.format(n_channels, n_samples), fontsize = 'xx-large')
plt.show()
|
{"hexsha": "b6b49e875e003e98c06ce5ea6416064467cfcfe7", "size": 11337, "ext": "py", "lang": "Python", "max_stars_repo_path": "figures/figure_3_2.py", "max_stars_repo_name": "Racemuis/epoch-based-covariance-estimates", "max_stars_repo_head_hexsha": "9ae2faba05c043031bfdb69c6937492d18c7db1e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "figures/figure_3_2.py", "max_issues_repo_name": "Racemuis/epoch-based-covariance-estimates", "max_issues_repo_head_hexsha": "9ae2faba05c043031bfdb69c6937492d18c7db1e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "figures/figure_3_2.py", "max_forks_repo_name": "Racemuis/epoch-based-covariance-estimates", "max_forks_repo_head_hexsha": "9ae2faba05c043031bfdb69c6937492d18c7db1e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3645833333, "max_line_length": 172, "alphanum_fraction": 0.6598747464, "include": true, "reason": "import numpy", "num_tokens": 2732}
|
Require Import Setoid.
Require Import Statement.
Definition fn { T U V : Type } (P : T >> U) (Q : U >> V) : T >> V :=
fun s s' => (exists sx, P s sx /\ Q sx s') /\ (forall sx, P s sx -> exists s', Q sx s').
Notation "P ⊟ Q" := (fn P Q) (at level 90, right associativity, format "P ⊟ Q").
Theorem assoc : forall { T U V W : Type } (P : T >> U) (Q : U >> V) (R : V >> W) s s', (P ⊟ Q ⊟ R) s s' <-> ((P ⊟ Q) ⊟ R) s s'.
Proof.
intros T U V W P Q R s s'.
split.
{ intros ((sx,(HHp,((sy,(HHq,HHr)),HHqr))),HHpqr).
split.
{ exists sy; split; auto.
firstorder.
}
{ firstorder. }
}
{ intros ((sy,(((sx,(HHp,HHq)),HHpq),HHr)),HHpqr).
split.
{ exists sx; split; auto.
firstorder.
}
{ intros sz HHp'.
destruct (HHpq _ HHp') as (sz',HHq').
destruct (HHpqr sz') as (sz'',HHr').
{ firstorder. }
{ exists sz''; firstorder. }
}
}
Qed.
Theorem right_monotonic : forall (T U V : Type) (P : T >> U) (Q1 Q2 : U >> V),
(forall s s', Q1 s s' -> Q2 s s') -> (forall s s', (P ⊟ Q1) s s' -> (P ⊟ Q2) s s').
Proof.
intros T U V P Q1 Q2 HHq1q2 s s' ((sx,(HHpq1,HHpq1')),HHpq1'').
split.
{ destruct (HHpq1'' _ HHpq1) as (sx',HHq1).
exists sx; split; auto.
}
{ intros sy HHp.
destruct (HHpq1'' _ HHp) as (sy',HHq1).
exists sy'; auto.
}
Qed.
Theorem left_extensionality : forall { T U V : Type } (P1 P2 : T >> U) (Q : U >> V),
(forall s s', P1 s s' <-> P2 s s') -> (forall s s', (P1 ⊟ Q) s s' <-> (P2 ⊟ Q) s s').
Proof. firstorder. Qed.
Theorem right_extensionality : forall { T U V : Type } (P : T >> U) (Q1 Q2 : U >> V),
(forall s s', Q1 s s' <-> Q2 s s') -> (forall s s', (P ⊟ Q1) s s' <-> (P ⊟ Q2) s s').
Proof.
intros.
split; apply right_monotonic; firstorder.
Qed.
Theorem extensionality : forall { T U V : Type } (P1 P2 : T >> U) (Q1 Q2 : U >> V),
(forall s s', P1 s s' <-> P2 s s') -> (forall s s', Q1 s s' <-> Q2 s s') -> (forall s s', (P1 ⊟ Q1) s s' <-> (P2 ⊟ Q2) s s').
Proof.
intros; split; intros HH.
{ rewrite (left_extensionality _ _ _ H) in HH.
rewrite (right_extensionality _ _ _ H0) in HH.
auto.
}
{ rewrite (left_extensionality _ _ _ H).
rewrite (right_extensionality _ _ _ H0).
auto.
}
Qed.
Theorem left_identity_neutrality : forall { T U : Type } (Q : T >> U), (forall s s', ((fun s s' => s = s') ⊟ Q) s s' <-> Q s s').
Proof.
split.
{ intros ((sx,(HH11,HH12)),HH2).
subst sx; firstorder.
}
{ intros; split.
{ eauto. }
{ intros sx HH; subst sx; eauto. }
}
Qed.
Theorem right_identity_neutrality : forall { T U : Type } (Q : T >> U), (forall s s', (Q ⊟ (fun s s' => s = s')) s s' <-> Q s s').
Proof.
split.
{ intros ((sx,(HH11,HH12)),HH2).
subst sx; firstorder.
}
{ intros; split; eauto. }
Qed.
|
{"author": "bsall", "repo": "AMToPR-ICFEM-2019", "sha": "980d6d6ef5c9ad72a6b2cbd4fa549bc4705f0bed", "save_path": "github-repos/coq/bsall-AMToPR-ICFEM-2019", "path": "github-repos/coq/bsall-AMToPR-ICFEM-2019/AMToPR-ICFEM-2019-980d6d6ef5c9ad72a6b2cbd4fa549bc4705f0bed/src/theory/DemonicComposition.v"}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import tflib as tl
conv = partial(slim.conv2d, activation_fn=None)
dconv = partial(slim.conv2d_transpose, activation_fn=None)
fc = partial(tl.flatten_fully_connected, activation_fn=None)
relu = tf.nn.relu
lrelu = tf.nn.leaky_relu
batch_norm = partial(slim.batch_norm, scale=True, updates_collections=None)
def conv_mnist():
def Enc(img, z_dim, dim=64, use_bn=False, is_training=True, sigma=False):
bn = partial(batch_norm, is_training=is_training) if use_bn else None
conv_bn_lrelu = partial(conv, normalizer_fn=bn, activation_fn=lrelu)
conv_lrelu = partial(conv, normalizer_fn=None, activation_fn=lrelu)
with tf.variable_scope('Enc', reuse=tf.AUTO_REUSE):
y = conv_lrelu(img, dim, 5, 2)
y = conv_bn_lrelu(y, dim * 2, 5, 2)
y = lrelu(fc(y, 1024, normalizer_fn=bn))
z_mu = fc(y, z_dim)
if sigma:
z_log_sigma_sq = fc(y, z_dim, biases_initializer=tf.constant_initializer(2. * np.log(0.1)))
return z_mu, z_log_sigma_sq
else:
return z_mu
def Dec(z, dim=64, channels=1, use_bn=False, is_training=True):
bn = partial(batch_norm, is_training=is_training) if use_bn else None
dconv_relu = partial(dconv, normalizer_fn=None, activation_fn=relu)
with tf.variable_scope('Dec', reuse=tf.AUTO_REUSE):
y = relu(fc(z, 1024))
y = relu(fc(y, 7 * 7 * dim * 2, normalizer_fn=bn))
y = tf.reshape(y, [-1, 7, 7, dim * 2])
y = dconv_relu(y, dim * 1, 5, 2)
img = tf.tanh(dconv(y, channels, 5, 2))
return img
return Enc, Dec
def conv_32():
def Enc(img, z_dim, dim=64, use_bn=False, is_training=True, sigma=False):
bn = partial(batch_norm, is_training=is_training) if use_bn else None
conv_bn_lrelu = partial(conv, normalizer_fn=bn, activation_fn=lrelu)
conv_lrelu = partial(conv, normalizer_fn=None, activation_fn=lrelu)
with tf.variable_scope('Enc', reuse=tf.AUTO_REUSE):
y = conv_lrelu(img, dim, 5, 2)
y = conv_bn_lrelu(y, dim * 2, 5, 2)
y = conv_bn_lrelu(y, dim * 4, 5, 2)
z_mu = fc(y, z_dim)
if sigma:
z_log_sigma_sq = fc(y, z_dim, biases_initializer=tf.constant_initializer(2. * np.log(0.1)))
return z_mu, z_log_sigma_sq
else:
return z_mu
def Dec(z, dim=64, channels=3, use_bn=False, is_training=True):
bn = partial(batch_norm, is_training=is_training) if use_bn else None
dconv_bn_relu = partial(dconv, normalizer_fn=bn, activation_fn=relu)
dconv_relu = partial(dconv, normalizer_fn=None, activation_fn=relu)
with tf.variable_scope('Dec', reuse=tf.AUTO_REUSE):
y = relu(fc(z, 4 * 4 * dim * 4))
y = tf.reshape(y, [-1, 4, 4, dim * 4])
y = dconv_bn_relu(y, dim * 2, 5, 2)
y = dconv_relu(y, dim * 1, 5, 2)
img = tf.tanh(dconv(y, channels, 5, 2))
return img
return Enc, Dec
def resnet_32():
def res_block(inputs, out_channels, scale=None, use_bn=False, is_training=True, enc_first=False):
assert scale in ['up', 'down', None]
bn = partial(batch_norm, is_training=is_training) if use_bn else None
conv1 = partial(conv, num_outputs=out_channels, kernel_size=1, stride=1)
conv3 = partial(conv, num_outputs=out_channels, kernel_size=3, stride=1)
skip, res = inputs, inputs
if not enc_first:
res = relu(bn(res)) if use_bn else relu(res)
if scale == 'up':
skip, res = tl.unpool(skip), tl.unpool(res)
if out_channels != inputs.shape[-1] or scale:
skip = conv1(skip)
res = conv3(conv3(res, normalizer_fn=bn, activation_fn=relu))
outputs = skip + res
if scale == 'down':
outputs = tl.pool(outputs)
return outputs
def Enc(img, z_dim, dim=512, use_bn=False, is_training=True, sigma=False):
rb = partial(res_block, use_bn=use_bn, is_training=is_training)
with tf.variable_scope('Enc', reuse=tf.AUTO_REUSE):
y = rb(img, dim, 'down', enc_first=True)
y = rb(y, dim, 'down')
y = rb(y, dim)
y = relu(rb(y, dim))
z_mu = fc(y, z_dim)
if sigma:
z_log_sigma_sq = fc(y, z_dim, biases_initializer=tf.constant_initializer(2. * np.log(0.1)))
return z_mu, z_log_sigma_sq
else:
return z_mu
def Dec(z, dim=512, channels=3, use_bn=False, is_training=True):
rb = partial(res_block, use_bn=use_bn, is_training=is_training)
with tf.variable_scope('Dec', reuse=tf.AUTO_REUSE):
y = fc(z, 4 * 4 * dim * 4)
y = tf.reshape(y, [-1, 4, 4, dim * 4])
y = rb(y, dim, 'up')
y = rb(y, dim, 'up')
y = rb(y, dim, 'up')
img = tf.tanh(conv(relu(y), channels, 3, 1))
return img
return Enc, Dec
def conv_64():
def Enc(img, z_dim, dim=64, use_bn=False, is_training=True, sigma=False):
bn = partial(batch_norm, is_training=is_training) if use_bn else None
conv_bn_lrelu = partial(conv, normalizer_fn=bn, activation_fn=lrelu)
conv_lrelu = partial(conv, normalizer_fn=None, activation_fn=lrelu)
with tf.variable_scope('Enc', reuse=tf.AUTO_REUSE):
y = conv_lrelu(img, dim, 5, 2)
y = conv_lrelu(y, dim * 2, 5, 2)
y = conv_bn_lrelu(y, dim * 4, 5, 2)
y = conv_bn_lrelu(y, dim * 8, 5, 2)
z_mu = fc(y, z_dim)
if sigma:
z_log_sigma_sq = fc(y, z_dim, biases_initializer=tf.constant_initializer(2. * np.log(0.1)))
return z_mu, z_log_sigma_sq
else:
return z_mu
def Dec(z, dim=64, channels=3, use_bn=False, is_training=True):
bn = partial(batch_norm, is_training=is_training) if use_bn else None
dconv_bn_relu = partial(dconv, normalizer_fn=bn, activation_fn=relu)
dconv_relu = partial(dconv, normalizer_fn=None, activation_fn=relu)
with tf.variable_scope('Dec', reuse=tf.AUTO_REUSE):
y = relu(fc(z, 4 * 4 * dim * 8))
y = tf.reshape(y, [-1, 4, 4, dim * 8])
y = dconv_bn_relu(y, dim * 4, 5, 2)
y = dconv_bn_relu(y, dim * 2, 5, 2)
y = dconv_relu(y, dim * 1, 5, 2)
img = tf.tanh(dconv(y, channels, 5, 2))
return img
return Enc, Dec
|
{"hexsha": "67ebb1549897a7ffdc95bf0afbef64276c05164b", "size": 6818, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models.py", "max_stars_repo_name": "zj10/PGA", "max_stars_repo_head_hexsha": "aa908554925540f8c79c5228cde7fb306fe8868a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2019-06-26T00:45:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T20:39:55.000Z", "max_issues_repo_path": "src/models.py", "max_issues_repo_name": "zj10/PGA", "max_issues_repo_head_hexsha": "aa908554925540f8c79c5228cde7fb306fe8868a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-05-16T18:22:46.000Z", "max_issues_repo_issues_event_max_datetime": "2019-05-27T22:26:57.000Z", "max_forks_repo_path": "src/models.py", "max_forks_repo_name": "zj10/PGA", "max_forks_repo_head_hexsha": "aa908554925540f8c79c5228cde7fb306fe8868a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.3431952663, "max_line_length": 107, "alphanum_fraction": 0.5959225579, "include": true, "reason": "import numpy", "num_tokens": 1900}
|
section "Affine Sets"
theory Affine
imports Linear_Algebra
begin
lemma if_smult: "(if P then x else (y::real)) *\<^sub>R v = (if P then x *\<^sub>R v else y *\<^sub>R v)"
by (fact if_distrib)
lemma sum_delta_notmem:
assumes "x \<notin> s"
shows "sum (\<lambda>y. if (y = x) then P x else Q y) s = sum Q s"
and "sum (\<lambda>y. if (x = y) then P x else Q y) s = sum Q s"
and "sum (\<lambda>y. if (y = x) then P y else Q y) s = sum Q s"
and "sum (\<lambda>y. if (x = y) then P y else Q y) s = sum Q s"
apply (rule_tac [!] sum.cong)
using assms
apply auto
done
lemmas independent_finite = independent_imp_finite
lemma span_substd_basis:
assumes d: "d \<subseteq> Basis"
shows "span d = {x. \<forall>i\<in>Basis. i \<notin> d \<longrightarrow> x\<bullet>i = 0}"
(is "_ = ?B")
proof -
have "d \<subseteq> ?B"
using d by (auto simp: inner_Basis)
moreover have s: "subspace ?B"
using subspace_substandard[of "\<lambda>i. i \<notin> d"] .
ultimately have "span d \<subseteq> ?B"
using span_mono[of d "?B"] span_eq_iff[of "?B"] by blast
moreover have *: "card d \<le> dim (span d)"
using independent_card_le_dim[of d "span d"] independent_substdbasis[OF assms]
span_superset[of d]
by auto
moreover from * have "dim ?B \<le> dim (span d)"
using dim_substandard[OF assms] by auto
ultimately show ?thesis
using s subspace_dim_equal[of "span d" "?B"] subspace_span[of d] by auto
qed
lemma basis_to_substdbasis_subspace_isomorphism:
fixes B :: "'a::euclidean_space set"
assumes "independent B"
shows "\<exists>f d::'a set. card d = card B \<and> linear f \<and> f ` B = d \<and>
f ` span B = {x. \<forall>i\<in>Basis. i \<notin> d \<longrightarrow> x \<bullet> i = 0} \<and> inj_on f (span B) \<and> d \<subseteq> Basis"
proof -
have B: "card B = dim B"
using dim_unique[of B B "card B"] assms span_superset[of B] by auto
have "dim B \<le> card (Basis :: 'a set)"
using dim_subset_UNIV[of B] by simp
from obtain_subset_with_card_n[OF this] obtain d :: "'a set" where d: "d \<subseteq> Basis" and t: "card d = dim B"
by auto
let ?t = "{x::'a::euclidean_space. \<forall>i\<in>Basis. i \<notin> d \<longrightarrow> x\<bullet>i = 0}"
have "\<exists>f. linear f \<and> f ` B = d \<and> f ` span B = ?t \<and> inj_on f (span B)"
proof (intro basis_to_basis_subspace_isomorphism subspace_span subspace_substandard span_superset)
show "d \<subseteq> {x. \<forall>i\<in>Basis. i \<notin> d \<longrightarrow> x \<bullet> i = 0}"
using d inner_not_same_Basis by blast
qed (auto simp: span_substd_basis independent_substdbasis dim_substandard d t B assms)
with t \<open>card B = dim B\<close> d show ?thesis by auto
qed
subsection \<open>Affine set and affine hull\<close>
definition\<^marker>\<open>tag important\<close> affine :: "'a::real_vector set \<Rightarrow> bool"
where "affine s \<longleftrightarrow> (\<forall>x\<in>s. \<forall>y\<in>s. \<forall>u v. u + v = 1 \<longrightarrow> u *\<^sub>R x + v *\<^sub>R y \<in> s)"
lemma affine_alt: "affine s \<longleftrightarrow> (\<forall>x\<in>s. \<forall>y\<in>s. \<forall>u::real. (1 - u) *\<^sub>R x + u *\<^sub>R y \<in> s)"
unfolding affine_def by (metis eq_diff_eq')
lemma affine_empty [iff]: "affine {}"
unfolding affine_def by auto
lemma affine_sing [iff]: "affine {x}"
unfolding affine_alt by (auto simp: scaleR_left_distrib [symmetric])
lemma affine_UNIV [iff]: "affine UNIV"
unfolding affine_def by auto
lemma affine_Inter [intro]: "(\<And>s. s\<in>f \<Longrightarrow> affine s) \<Longrightarrow> affine (\<Inter>f)"
unfolding affine_def by auto
lemma affine_Int[intro]: "affine s \<Longrightarrow> affine t \<Longrightarrow> affine (s \<inter> t)"
unfolding affine_def by auto
lemma affine_scaling: "affine s \<Longrightarrow> affine (image (\<lambda>x. c *\<^sub>R x) s)"
apply (clarsimp simp add: affine_def)
apply (rule_tac x="u *\<^sub>R x + v *\<^sub>R y" in image_eqI)
apply (auto simp: algebra_simps)
done
lemma affine_affine_hull [simp]: "affine(affine hull s)"
unfolding hull_def
using affine_Inter[of "{t. affine t \<and> s \<subseteq> t}"] by auto
lemma affine_hull_eq[simp]: "(affine hull s = s) \<longleftrightarrow> affine s"
by (metis affine_affine_hull hull_same)
lemma affine_hyperplane: "affine {x. a \<bullet> x = b}"
by (simp add: affine_def algebra_simps) (metis distrib_right mult.left_neutral)
subsubsection\<^marker>\<open>tag unimportant\<close> \<open>Some explicit formulations\<close>
text "Formalized by Lars Schewe."
lemma affine:
fixes V::"'a::real_vector set"
shows "affine V \<longleftrightarrow>
(\<forall>S u. finite S \<and> S \<noteq> {} \<and> S \<subseteq> V \<and> sum u S = 1 \<longrightarrow> (\<Sum>x\<in>S. u x *\<^sub>R x) \<in> V)"
proof -
have "u *\<^sub>R x + v *\<^sub>R y \<in> V" if "x \<in> V" "y \<in> V" "u + v = (1::real)"
and *: "\<And>S u. \<lbrakk>finite S; S \<noteq> {}; S \<subseteq> V; sum u S = 1\<rbrakk> \<Longrightarrow> (\<Sum>x\<in>S. u x *\<^sub>R x) \<in> V" for x y u v
proof (cases "x = y")
case True
then show ?thesis
using that by (metis scaleR_add_left scaleR_one)
next
case False
then show ?thesis
using that *[of "{x,y}" "\<lambda>w. if w = x then u else v"] by auto
qed
moreover have "(\<Sum>x\<in>S. u x *\<^sub>R x) \<in> V"
if *: "\<And>x y u v. \<lbrakk>x\<in>V; y\<in>V; u + v = 1\<rbrakk> \<Longrightarrow> u *\<^sub>R x + v *\<^sub>R y \<in> V"
and "finite S" "S \<noteq> {}" "S \<subseteq> V" "sum u S = 1" for S u
proof -
define n where "n = card S"
consider "card S = 0" | "card S = 1" | "card S = 2" | "card S > 2" by linarith
then show "(\<Sum>x\<in>S. u x *\<^sub>R x) \<in> V"
proof cases
assume "card S = 1"
then obtain a where "S={a}"
by (auto simp: card_Suc_eq)
then show ?thesis
using that by simp
next
assume "card S = 2"
then obtain a b where "S = {a, b}"
by (metis Suc_1 card_1_singletonE card_Suc_eq)
then show ?thesis
using *[of a b] that
by (auto simp: sum_clauses(2))
next
assume "card S > 2"
then show ?thesis using that n_def
proof (induct n arbitrary: u S)
case 0
then show ?case by auto
next
case (Suc n u S)
have "sum u S = card S" if "\<not> (\<exists>x\<in>S. u x \<noteq> 1)"
using that unfolding card_eq_sum by auto
with Suc.prems obtain x where "x \<in> S" and x: "u x \<noteq> 1" by force
have c: "card (S - {x}) = card S - 1"
by (simp add: Suc.prems(3) \<open>x \<in> S\<close>)
have "sum u (S - {x}) = 1 - u x"
by (simp add: Suc.prems sum_diff1 \<open>x \<in> S\<close>)
with x have eq1: "inverse (1 - u x) * sum u (S - {x}) = 1"
by auto
have inV: "(\<Sum>y\<in>S - {x}. (inverse (1 - u x) * u y) *\<^sub>R y) \<in> V"
proof (cases "card (S - {x}) > 2")
case True
then have S: "S - {x} \<noteq> {}" "card (S - {x}) = n"
using Suc.prems c by force+
show ?thesis
proof (rule Suc.hyps)
show "(\<Sum>a\<in>S - {x}. inverse (1 - u x) * u a) = 1"
by (auto simp: eq1 sum_distrib_left[symmetric])
qed (use S Suc.prems True in auto)
next
case False
then have "card (S - {x}) = Suc (Suc 0)"
using Suc.prems c by auto
then obtain a b where ab: "(S - {x}) = {a, b}" "a\<noteq>b"
unfolding card_Suc_eq by auto
then show ?thesis
using eq1 \<open>S \<subseteq> V\<close>
by (auto simp: sum_distrib_left distrib_left intro!: Suc.prems(2)[of a b])
qed
have "u x + (1 - u x) = 1 \<Longrightarrow>
u x *\<^sub>R x + (1 - u x) *\<^sub>R ((\<Sum>y\<in>S - {x}. u y *\<^sub>R y) /\<^sub>R (1 - u x)) \<in> V"
by (rule Suc.prems) (use \<open>x \<in> S\<close> Suc.prems inV in \<open>auto simp: scaleR_right.sum\<close>)
moreover have "(\<Sum>a\<in>S. u a *\<^sub>R a) = u x *\<^sub>R x + (\<Sum>a\<in>S - {x}. u a *\<^sub>R a)"
by (meson Suc.prems(3) sum.remove \<open>x \<in> S\<close>)
ultimately show "(\<Sum>x\<in>S. u x *\<^sub>R x) \<in> V"
by (simp add: x)
qed
qed (use \<open>S\<noteq>{}\<close> \<open>finite S\<close> in auto)
qed
ultimately show ?thesis
unfolding affine_def by meson
qed
lemma affine_hull_explicit:
"affine hull p = {y. \<exists>S u. finite S \<and> S \<noteq> {} \<and> S \<subseteq> p \<and> sum u S = 1 \<and> sum (\<lambda>v. u v *\<^sub>R v) S = y}"
(is "_ = ?rhs")
proof (rule hull_unique)
show "p \<subseteq> ?rhs"
proof (intro subsetI CollectI exI conjI)
show "\<And>x. sum (\<lambda>z. 1) {x} = 1"
by auto
qed auto
show "?rhs \<subseteq> T" if "p \<subseteq> T" "affine T" for T
using that unfolding affine by blast
show "affine ?rhs"
unfolding affine_def
proof clarify
fix u v :: real and sx ux sy uy
assume uv: "u + v = 1"
and x: "finite sx" "sx \<noteq> {}" "sx \<subseteq> p" "sum ux sx = (1::real)"
and y: "finite sy" "sy \<noteq> {}" "sy \<subseteq> p" "sum uy sy = (1::real)"
have **: "(sx \<union> sy) \<inter> sx = sx" "(sx \<union> sy) \<inter> sy = sy"
by auto
show "\<exists>S w. finite S \<and> S \<noteq> {} \<and> S \<subseteq> p \<and>
sum w S = 1 \<and> (\<Sum>v\<in>S. w v *\<^sub>R v) = u *\<^sub>R (\<Sum>v\<in>sx. ux v *\<^sub>R v) + v *\<^sub>R (\<Sum>v\<in>sy. uy v *\<^sub>R v)"
proof (intro exI conjI)
show "finite (sx \<union> sy)"
using x y by auto
show "sum (\<lambda>i. (if i\<in>sx then u * ux i else 0) + (if i\<in>sy then v * uy i else 0)) (sx \<union> sy) = 1"
using x y uv
by (simp add: sum_Un sum.distrib sum.inter_restrict[symmetric] sum_distrib_left [symmetric] **)
have "(\<Sum>i\<in>sx \<union> sy. ((if i \<in> sx then u * ux i else 0) + (if i \<in> sy then v * uy i else 0)) *\<^sub>R i)
= (\<Sum>i\<in>sx. (u * ux i) *\<^sub>R i) + (\<Sum>i\<in>sy. (v * uy i) *\<^sub>R i)"
using x y
unfolding scaleR_left_distrib scaleR_zero_left if_smult
by (simp add: sum_Un sum.distrib sum.inter_restrict[symmetric] **)
also have "\<dots> = u *\<^sub>R (\<Sum>v\<in>sx. ux v *\<^sub>R v) + v *\<^sub>R (\<Sum>v\<in>sy. uy v *\<^sub>R v)"
unfolding scaleR_scaleR[symmetric] scaleR_right.sum [symmetric] by blast
finally show "(\<Sum>i\<in>sx \<union> sy. ((if i \<in> sx then u * ux i else 0) + (if i \<in> sy then v * uy i else 0)) *\<^sub>R i)
= u *\<^sub>R (\<Sum>v\<in>sx. ux v *\<^sub>R v) + v *\<^sub>R (\<Sum>v\<in>sy. uy v *\<^sub>R v)" .
qed (use x y in auto)
qed
qed
lemma affine_hull_finite:
assumes "finite S"
shows "affine hull S = {y. \<exists>u. sum u S = 1 \<and> sum (\<lambda>v. u v *\<^sub>R v) S = y}"
proof -
have *: "\<exists>h. sum h S = 1 \<and> (\<Sum>v\<in>S. h v *\<^sub>R v) = x"
if "F \<subseteq> S" "finite F" "F \<noteq> {}" and sum: "sum u F = 1" and x: "(\<Sum>v\<in>F. u v *\<^sub>R v) = x" for x F u
proof -
have "S \<inter> F = F"
using that by auto
show ?thesis
proof (intro exI conjI)
show "(\<Sum>x\<in>S. if x \<in> F then u x else 0) = 1"
by (metis (mono_tags, lifting) \<open>S \<inter> F = F\<close> assms sum.inter_restrict sum)
show "(\<Sum>v\<in>S. (if v \<in> F then u v else 0) *\<^sub>R v) = x"
by (simp add: if_smult cong: if_cong) (metis (no_types) \<open>S \<inter> F = F\<close> assms sum.inter_restrict x)
qed
qed
show ?thesis
unfolding affine_hull_explicit using assms
by (fastforce dest: *)
qed
subsubsection\<^marker>\<open>tag unimportant\<close> \<open>Stepping theorems and hence small special cases\<close>
lemma affine_hull_empty[simp]: "affine hull {} = {}"
by simp
lemma affine_hull_finite_step:
fixes y :: "'a::real_vector"
shows "finite S \<Longrightarrow>
(\<exists>u. sum u (insert a S) = w \<and> sum (\<lambda>x. u x *\<^sub>R x) (insert a S) = y) \<longleftrightarrow>
(\<exists>v u. sum u S = w - v \<and> sum (\<lambda>x. u x *\<^sub>R x) S = y - v *\<^sub>R a)" (is "_ \<Longrightarrow> ?lhs = ?rhs")
proof -
assume fin: "finite S"
show "?lhs = ?rhs"
proof
assume ?lhs
then obtain u where u: "sum u (insert a S) = w \<and> (\<Sum>x\<in>insert a S. u x *\<^sub>R x) = y"
by auto
show ?rhs
proof (cases "a \<in> S")
case True
then show ?thesis
using u by (simp add: insert_absorb) (metis diff_zero real_vector.scale_zero_left)
next
case False
show ?thesis
by (rule exI [where x="u a"]) (use u fin False in auto)
qed
next
assume ?rhs
then obtain v u where vu: "sum u S = w - v" "(\<Sum>x\<in>S. u x *\<^sub>R x) = y - v *\<^sub>R a"
by auto
have *: "\<And>x M. (if x = a then v else M) *\<^sub>R x = (if x = a then v *\<^sub>R x else M *\<^sub>R x)"
by auto
show ?lhs
proof (cases "a \<in> S")
case True
show ?thesis
by (rule exI [where x="\<lambda>x. (if x=a then v else 0) + u x"])
(simp add: True scaleR_left_distrib sum.distrib sum_clauses fin vu * cong: if_cong)
next
case False
then show ?thesis
apply (rule_tac x="\<lambda>x. if x=a then v else u x" in exI)
apply (simp add: vu sum_clauses(2)[OF fin] *)
by (simp add: sum_delta_notmem(3) vu)
qed
qed
qed
lemma affine_hull_2:
fixes a b :: "'a::real_vector"
shows "affine hull {a,b} = {u *\<^sub>R a + v *\<^sub>R b| u v. (u + v = 1)}"
(is "?lhs = ?rhs")
proof -
have *:
"\<And>x y z. z = x - y \<longleftrightarrow> y + z = (x::real)"
"\<And>x y z. z = x - y \<longleftrightarrow> y + z = (x::'a)" by auto
have "?lhs = {y. \<exists>u. sum u {a, b} = 1 \<and> (\<Sum>v\<in>{a, b}. u v *\<^sub>R v) = y}"
using affine_hull_finite[of "{a,b}"] by auto
also have "\<dots> = {y. \<exists>v u. u b = 1 - v \<and> u b *\<^sub>R b = y - v *\<^sub>R a}"
by (simp add: affine_hull_finite_step[of "{b}" a])
also have "\<dots> = ?rhs" unfolding * by auto
finally show ?thesis by auto
qed
lemma affine_hull_3:
fixes a b c :: "'a::real_vector"
shows "affine hull {a,b,c} = { u *\<^sub>R a + v *\<^sub>R b + w *\<^sub>R c| u v w. u + v + w = 1}"
proof -
have *:
"\<And>x y z. z = x - y \<longleftrightarrow> y + z = (x::real)"
"\<And>x y z. z = x - y \<longleftrightarrow> y + z = (x::'a)" by auto
show ?thesis
apply (simp add: affine_hull_finite affine_hull_finite_step)
unfolding *
apply safe
apply (metis add.assoc)
apply (rule_tac x=u in exI, force)
done
qed
lemma mem_affine:
assumes "affine S" "x \<in> S" "y \<in> S" "u + v = 1"
shows "u *\<^sub>R x + v *\<^sub>R y \<in> S"
using assms affine_def[of S] by auto
lemma mem_affine_3:
assumes "affine S" "x \<in> S" "y \<in> S" "z \<in> S" "u + v + w = 1"
shows "u *\<^sub>R x + v *\<^sub>R y + w *\<^sub>R z \<in> S"
proof -
have "u *\<^sub>R x + v *\<^sub>R y + w *\<^sub>R z \<in> affine hull {x, y, z}"
using affine_hull_3[of x y z] assms by auto
moreover
have "affine hull {x, y, z} \<subseteq> affine hull S"
using hull_mono[of "{x, y, z}" "S"] assms by auto
moreover
have "affine hull S = S"
using assms affine_hull_eq[of S] by auto
ultimately show ?thesis by auto
qed
lemma mem_affine_3_minus:
assumes "affine S" "x \<in> S" "y \<in> S" "z \<in> S"
shows "x + v *\<^sub>R (y-z) \<in> S"
using mem_affine_3[of S x y z 1 v "-v"] assms
by (simp add: algebra_simps)
corollary%unimportant mem_affine_3_minus2:
"\<lbrakk>affine S; x \<in> S; y \<in> S; z \<in> S\<rbrakk> \<Longrightarrow> x - v *\<^sub>R (y-z) \<in> S"
by (metis add_uminus_conv_diff mem_affine_3_minus real_vector.scale_minus_left)
subsubsection\<^marker>\<open>tag unimportant\<close> \<open>Some relations between affine hull and subspaces\<close>
lemma affine_hull_insert_subset_span:
"affine hull (insert a S) \<subseteq> {a + v| v . v \<in> span {x - a | x . x \<in> S}}"
proof -
have "\<exists>v T u. x = a + v \<and> (finite T \<and> T \<subseteq> {x - a |x. x \<in> S} \<and> (\<Sum>v\<in>T. u v *\<^sub>R v) = v)"
if "finite F" "F \<noteq> {}" "F \<subseteq> insert a S" "sum u F = 1" "(\<Sum>v\<in>F. u v *\<^sub>R v) = x"
for x F u
proof -
have *: "(\<lambda>x. x - a) ` (F - {a}) \<subseteq> {x - a |x. x \<in> S}"
using that by auto
show ?thesis
proof (intro exI conjI)
show "finite ((\<lambda>x. x - a) ` (F - {a}))"
by (simp add: that(1))
show "(\<Sum>v\<in>(\<lambda>x. x - a) ` (F - {a}). u(v+a) *\<^sub>R v) = x-a"
by (simp add: sum.reindex[unfolded inj_on_def] algebra_simps
sum_subtractf scaleR_left.sum[symmetric] sum_diff1 that)
qed (use \<open>F \<subseteq> insert a S\<close> in auto)
qed
then show ?thesis
unfolding affine_hull_explicit span_explicit by fast
qed
lemma affine_hull_insert_span:
assumes "a \<notin> S"
shows "affine hull (insert a S) = {a + v | v . v \<in> span {x - a | x. x \<in> S}}"
proof -
have *: "\<exists>G u. finite G \<and> G \<noteq> {} \<and> G \<subseteq> insert a S \<and> sum u G = 1 \<and> (\<Sum>v\<in>G. u v *\<^sub>R v) = y"
if "v \<in> span {x - a |x. x \<in> S}" "y = a + v" for y v
proof -
from that
obtain T u where u: "finite T" "T \<subseteq> {x - a |x. x \<in> S}" "a + (\<Sum>v\<in>T. u v *\<^sub>R v) = y"
unfolding span_explicit by auto
define F where "F = (\<lambda>x. x + a) ` T"
have F: "finite F" "F \<subseteq> S" "(\<Sum>v\<in>F. u (v - a) *\<^sub>R (v - a)) = y - a"
unfolding F_def using u by (auto simp: sum.reindex[unfolded inj_on_def])
have *: "F \<inter> {a} = {}" "F \<inter> - {a} = F"
using F assms by auto
show "\<exists>G u. finite G \<and> G \<noteq> {} \<and> G \<subseteq> insert a S \<and> sum u G = 1 \<and> (\<Sum>v\<in>G. u v *\<^sub>R v) = y"
apply (rule_tac x = "insert a F" in exI)
apply (rule_tac x = "\<lambda>x. if x=a then 1 - sum (\<lambda>x. u (x - a)) F else u (x - a)" in exI)
using assms F
apply (auto simp: sum_clauses sum.If_cases if_smult sum_subtractf scaleR_left.sum algebra_simps *)
done
qed
show ?thesis
by (intro subset_antisym affine_hull_insert_subset_span) (auto simp: affine_hull_explicit dest!: *)
qed
lemma affine_hull_span:
assumes "a \<in> S"
shows "affine hull S = {a + v | v. v \<in> span {x - a | x. x \<in> S - {a}}}"
using affine_hull_insert_span[of a "S - {a}", unfolded insert_Diff[OF assms]] by auto
subsubsection\<^marker>\<open>tag unimportant\<close> \<open>Parallel affine sets\<close>
definition affine_parallel :: "'a::real_vector set \<Rightarrow> 'a::real_vector set \<Rightarrow> bool"
where "affine_parallel S T \<longleftrightarrow> (\<exists>a. T = (\<lambda>x. a + x) ` S)"
lemma affine_parallel_expl_aux:
fixes S T :: "'a::real_vector set"
assumes "\<And>x. x \<in> S \<longleftrightarrow> a + x \<in> T"
shows "T = (\<lambda>x. a + x) ` S"
proof -
have "x \<in> ((\<lambda>x. a + x) ` S)" if "x \<in> T" for x
using that
by (simp add: image_iff) (metis add.commute diff_add_cancel assms)
moreover have "T \<ge> (\<lambda>x. a + x) ` S"
using assms by auto
ultimately show ?thesis by auto
qed
lemma affine_parallel_expl: "affine_parallel S T \<longleftrightarrow> (\<exists>a. \<forall>x. x \<in> S \<longleftrightarrow> a + x \<in> T)"
by (auto simp add: affine_parallel_def)
(use affine_parallel_expl_aux [of S _ T] in blast)
lemma affine_parallel_reflex: "affine_parallel S S"
unfolding affine_parallel_def
using image_add_0 by blast
lemma affine_parallel_commut:
assumes "affine_parallel A B"
shows "affine_parallel B A"
proof -
from assms obtain a where B: "B = (\<lambda>x. a + x) ` A"
unfolding affine_parallel_def by auto
have [simp]: "(\<lambda>x. x - a) = plus (- a)" by (simp add: fun_eq_iff)
from B show ?thesis
using translation_galois [of B a A]
unfolding affine_parallel_def by blast
qed
lemma affine_parallel_assoc:
assumes "affine_parallel A B"
and "affine_parallel B C"
shows "affine_parallel A C"
proof -
from assms obtain ab where "B = (\<lambda>x. ab + x) ` A"
unfolding affine_parallel_def by auto
moreover
from assms obtain bc where "C = (\<lambda>x. bc + x) ` B"
unfolding affine_parallel_def by auto
ultimately show ?thesis
using translation_assoc[of bc ab A] unfolding affine_parallel_def by auto
qed
lemma affine_translation_aux:
fixes a :: "'a::real_vector"
assumes "affine ((\<lambda>x. a + x) ` S)"
shows "affine S"
proof -
{
fix x y u v
assume xy: "x \<in> S" "y \<in> S" "(u :: real) + v = 1"
then have "(a + x) \<in> ((\<lambda>x. a + x) ` S)" "(a + y) \<in> ((\<lambda>x. a + x) ` S)"
by auto
then have h1: "u *\<^sub>R (a + x) + v *\<^sub>R (a + y) \<in> (\<lambda>x. a + x) ` S"
using xy assms unfolding affine_def by auto
have "u *\<^sub>R (a + x) + v *\<^sub>R (a + y) = (u + v) *\<^sub>R a + (u *\<^sub>R x + v *\<^sub>R y)"
by (simp add: algebra_simps)
also have "\<dots> = a + (u *\<^sub>R x + v *\<^sub>R y)"
using \<open>u + v = 1\<close> by auto
ultimately have "a + (u *\<^sub>R x + v *\<^sub>R y) \<in> (\<lambda>x. a + x) ` S"
using h1 by auto
then have "u *\<^sub>R x + v *\<^sub>R y \<in> S" by auto
}
then show ?thesis unfolding affine_def by auto
qed
lemma affine_translation:
"affine S \<longleftrightarrow> affine ((+) a ` S)" for a :: "'a::real_vector"
proof
show "affine ((+) a ` S)" if "affine S"
using that translation_assoc [of "- a" a S]
by (auto intro: affine_translation_aux [of "- a" "((+) a ` S)"])
show "affine S" if "affine ((+) a ` S)"
using that by (rule affine_translation_aux)
qed
lemma parallel_is_affine:
fixes S T :: "'a::real_vector set"
assumes "affine S" "affine_parallel S T"
shows "affine T"
proof -
from assms obtain a where "T = (\<lambda>x. a + x) ` S"
unfolding affine_parallel_def by auto
then show ?thesis
using affine_translation assms by auto
qed
lemma subspace_imp_affine: "subspace s \<Longrightarrow> affine s"
unfolding subspace_def affine_def by auto
lemma affine_hull_subset_span: "(affine hull s) \<subseteq> (span s)"
by (metis hull_minimal span_superset subspace_imp_affine subspace_span)
subsubsection\<^marker>\<open>tag unimportant\<close> \<open>Subspace parallel to an affine set\<close>
lemma subspace_affine: "subspace S \<longleftrightarrow> affine S \<and> 0 \<in> S"
proof -
have h0: "subspace S \<Longrightarrow> affine S \<and> 0 \<in> S"
using subspace_imp_affine[of S] subspace_0 by auto
{
assume assm: "affine S \<and> 0 \<in> S"
{
fix c :: real
fix x
assume x: "x \<in> S"
have "c *\<^sub>R x = (1-c) *\<^sub>R 0 + c *\<^sub>R x" by auto
moreover
have "(1 - c) *\<^sub>R 0 + c *\<^sub>R x \<in> S"
using affine_alt[of S] assm x by auto
ultimately have "c *\<^sub>R x \<in> S" by auto
}
then have h1: "\<forall>c. \<forall>x \<in> S. c *\<^sub>R x \<in> S" by auto
{
fix x y
assume xy: "x \<in> S" "y \<in> S"
define u where "u = (1 :: real)/2"
have "(1/2) *\<^sub>R (x+y) = (1/2) *\<^sub>R (x+y)"
by auto
moreover
have "(1/2) *\<^sub>R (x+y)=(1/2) *\<^sub>R x + (1-(1/2)) *\<^sub>R y"
by (simp add: algebra_simps)
moreover
have "(1 - u) *\<^sub>R x + u *\<^sub>R y \<in> S"
using affine_alt[of S] assm xy by auto
ultimately
have "(1/2) *\<^sub>R (x+y) \<in> S"
using u_def by auto
moreover
have "x + y = 2 *\<^sub>R ((1/2) *\<^sub>R (x+y))"
by auto
ultimately
have "x + y \<in> S"
using h1[rule_format, of "(1/2) *\<^sub>R (x+y)" "2"] by auto
}
then have "\<forall>x \<in> S. \<forall>y \<in> S. x + y \<in> S"
by auto
then have "subspace S"
using h1 assm unfolding subspace_def by auto
}
then show ?thesis using h0 by metis
qed
lemma affine_diffs_subspace:
assumes "affine S" "a \<in> S"
shows "subspace ((\<lambda>x. (-a)+x) ` S)"
proof -
have [simp]: "(\<lambda>x. x - a) = plus (- a)" by (simp add: fun_eq_iff)
have "affine ((\<lambda>x. (-a)+x) ` S)"
using affine_translation assms by blast
moreover have "0 \<in> ((\<lambda>x. (-a)+x) ` S)"
using assms exI[of "(\<lambda>x. x\<in>S \<and> -a+x = 0)" a] by auto
ultimately show ?thesis using subspace_affine by auto
qed
lemma affine_diffs_subspace_subtract:
"subspace ((\<lambda>x. x - a) ` S)" if "affine S" "a \<in> S"
using that affine_diffs_subspace [of _ a] by simp
lemma parallel_subspace_explicit:
assumes "affine S"
and "a \<in> S"
assumes "L \<equiv> {y. \<exists>x \<in> S. (-a) + x = y}"
shows "subspace L \<and> affine_parallel S L"
proof -
from assms have "L = plus (- a) ` S" by auto
then have par: "affine_parallel S L"
unfolding affine_parallel_def ..
then have "affine L" using assms parallel_is_affine by auto
moreover have "0 \<in> L"
using assms by auto
ultimately show ?thesis
using subspace_affine par by auto
qed
lemma parallel_subspace_aux:
assumes "subspace A"
and "subspace B"
and "affine_parallel A B"
shows "A \<supseteq> B"
proof -
from assms obtain a where a: "\<forall>x. x \<in> A \<longleftrightarrow> a + x \<in> B"
using affine_parallel_expl[of A B] by auto
then have "-a \<in> A"
using assms subspace_0[of B] by auto
then have "a \<in> A"
using assms subspace_neg[of A "-a"] by auto
then show ?thesis
using assms a unfolding subspace_def by auto
qed
lemma parallel_subspace:
assumes "subspace A"
and "subspace B"
and "affine_parallel A B"
shows "A = B"
proof
show "A \<supseteq> B"
using assms parallel_subspace_aux by auto
show "A \<subseteq> B"
using assms parallel_subspace_aux[of B A] affine_parallel_commut by auto
qed
lemma affine_parallel_subspace:
assumes "affine S" "S \<noteq> {}"
shows "\<exists>!L. subspace L \<and> affine_parallel S L"
proof -
have ex: "\<exists>L. subspace L \<and> affine_parallel S L"
using assms parallel_subspace_explicit by auto
{
fix L1 L2
assume ass: "subspace L1 \<and> affine_parallel S L1" "subspace L2 \<and> affine_parallel S L2"
then have "affine_parallel L1 L2"
using affine_parallel_commut[of S L1] affine_parallel_assoc[of L1 S L2] by auto
then have "L1 = L2"
using ass parallel_subspace by auto
}
then show ?thesis using ex by auto
qed
subsection \<open>Affine Dependence\<close>
text "Formalized by Lars Schewe."
definition\<^marker>\<open>tag important\<close> affine_dependent :: "'a::real_vector set \<Rightarrow> bool"
where "affine_dependent s \<longleftrightarrow> (\<exists>x\<in>s. x \<in> affine hull (s - {x}))"
lemma affine_dependent_imp_dependent: "affine_dependent s \<Longrightarrow> dependent s"
unfolding affine_dependent_def dependent_def
using affine_hull_subset_span by auto
lemma affine_dependent_subset:
"\<lbrakk>affine_dependent s; s \<subseteq> t\<rbrakk> \<Longrightarrow> affine_dependent t"
apply (simp add: affine_dependent_def Bex_def)
apply (blast dest: hull_mono [OF Diff_mono [OF _ subset_refl]])
done
lemma affine_independent_subset:
shows "\<lbrakk>\<not> affine_dependent t; s \<subseteq> t\<rbrakk> \<Longrightarrow> \<not> affine_dependent s"
by (metis affine_dependent_subset)
lemma affine_independent_Diff:
"\<not> affine_dependent s \<Longrightarrow> \<not> affine_dependent(s - t)"
by (meson Diff_subset affine_dependent_subset)
proposition affine_dependent_explicit:
"affine_dependent p \<longleftrightarrow>
(\<exists>S u. finite S \<and> S \<subseteq> p \<and> sum u S = 0 \<and> (\<exists>v\<in>S. u v \<noteq> 0) \<and> sum (\<lambda>v. u v *\<^sub>R v) S = 0)"
proof -
have "\<exists>S u. finite S \<and> S \<subseteq> p \<and> sum u S = 0 \<and> (\<exists>v\<in>S. u v \<noteq> 0) \<and> (\<Sum>w\<in>S. u w *\<^sub>R w) = 0"
if "(\<Sum>w\<in>S. u w *\<^sub>R w) = x" "x \<in> p" "finite S" "S \<noteq> {}" "S \<subseteq> p - {x}" "sum u S = 1" for x S u
proof (intro exI conjI)
have "x \<notin> S"
using that by auto
then show "(\<Sum>v \<in> insert x S. if v = x then - 1 else u v) = 0"
using that by (simp add: sum_delta_notmem)
show "(\<Sum>w \<in> insert x S. (if w = x then - 1 else u w) *\<^sub>R w) = 0"
using that \<open>x \<notin> S\<close> by (simp add: if_smult sum_delta_notmem cong: if_cong)
qed (use that in auto)
moreover have "\<exists>x\<in>p. \<exists>S u. finite S \<and> S \<noteq> {} \<and> S \<subseteq> p - {x} \<and> sum u S = 1 \<and> (\<Sum>v\<in>S. u v *\<^sub>R v) = x"
if "(\<Sum>v\<in>S. u v *\<^sub>R v) = 0" "finite S" "S \<subseteq> p" "sum u S = 0" "v \<in> S" "u v \<noteq> 0" for S u v
proof (intro bexI exI conjI)
have "S \<noteq> {v}"
using that by auto
then show "S - {v} \<noteq> {}"
using that by auto
show "(\<Sum>x \<in> S - {v}. - (1 / u v) * u x) = 1"
unfolding sum_distrib_left[symmetric] sum_diff1[OF \<open>finite S\<close>] by (simp add: that)
show "(\<Sum>x\<in>S - {v}. (- (1 / u v) * u x) *\<^sub>R x) = v"
unfolding sum_distrib_left [symmetric] scaleR_scaleR[symmetric]
scaleR_right.sum [symmetric] sum_diff1[OF \<open>finite S\<close>]
using that by auto
show "S - {v} \<subseteq> p - {v}"
using that by auto
qed (use that in auto)
ultimately show ?thesis
unfolding affine_dependent_def affine_hull_explicit by auto
qed
lemma affine_dependent_explicit_finite:
fixes S :: "'a::real_vector set"
assumes "finite S"
shows "affine_dependent S \<longleftrightarrow>
(\<exists>u. sum u S = 0 \<and> (\<exists>v\<in>S. u v \<noteq> 0) \<and> sum (\<lambda>v. u v *\<^sub>R v) S = 0)"
(is "?lhs = ?rhs")
proof
have *: "\<And>vt u v. (if vt then u v else 0) *\<^sub>R v = (if vt then (u v) *\<^sub>R v else 0::'a)"
by auto
assume ?lhs
then obtain t u v where
"finite t" "t \<subseteq> S" "sum u t = 0" "v\<in>t" "u v \<noteq> 0" "(\<Sum>v\<in>t. u v *\<^sub>R v) = 0"
unfolding affine_dependent_explicit by auto
then show ?rhs
apply (rule_tac x="\<lambda>x. if x\<in>t then u x else 0" in exI)
apply (auto simp: * sum.inter_restrict[OF assms, symmetric] Int_absorb1[OF \<open>t\<subseteq>S\<close>])
done
next
assume ?rhs
then obtain u v where "sum u S = 0" "v\<in>S" "u v \<noteq> 0" "(\<Sum>v\<in>S. u v *\<^sub>R v) = 0"
by auto
then show ?lhs unfolding affine_dependent_explicit
using assms by auto
qed
lemma dependent_imp_affine_dependent:
assumes "dependent {x - a| x . x \<in> s}"
and "a \<notin> s"
shows "affine_dependent (insert a s)"
proof -
from assms(1)[unfolded dependent_explicit] obtain S u v
where obt: "finite S" "S \<subseteq> {x - a |x. x \<in> s}" "v\<in>S" "u v \<noteq> 0" "(\<Sum>v\<in>S. u v *\<^sub>R v) = 0"
by auto
define t where "t = (\<lambda>x. x + a) ` S"
have inj: "inj_on (\<lambda>x. x + a) S"
unfolding inj_on_def by auto
have "0 \<notin> S"
using obt(2) assms(2) unfolding subset_eq by auto
have fin: "finite t" and "t \<subseteq> s"
unfolding t_def using obt(1,2) by auto
then have "finite (insert a t)" and "insert a t \<subseteq> insert a s"
by auto
moreover have *: "\<And>P Q. (\<Sum>x\<in>t. (if x = a then P x else Q x)) = (\<Sum>x\<in>t. Q x)"
apply (rule sum.cong)
using \<open>a\<notin>s\<close> \<open>t\<subseteq>s\<close>
apply auto
done
have "(\<Sum>x\<in>insert a t. if x = a then - (\<Sum>x\<in>t. u (x - a)) else u (x - a)) = 0"
unfolding sum_clauses(2)[OF fin] * using \<open>a\<notin>s\<close> \<open>t\<subseteq>s\<close> by auto
moreover have "\<exists>v\<in>insert a t. (if v = a then - (\<Sum>x\<in>t. u (x - a)) else u (v - a)) \<noteq> 0"
using obt(3,4) \<open>0\<notin>S\<close>
by (rule_tac x="v + a" in bexI) (auto simp: t_def)
moreover have *: "\<And>P Q. (\<Sum>x\<in>t. (if x = a then P x else Q x) *\<^sub>R x) = (\<Sum>x\<in>t. Q x *\<^sub>R x)"
using \<open>a\<notin>s\<close> \<open>t\<subseteq>s\<close> by (auto intro!: sum.cong)
have "(\<Sum>x\<in>t. u (x - a)) *\<^sub>R a = (\<Sum>v\<in>t. u (v - a) *\<^sub>R v)"
unfolding scaleR_left.sum
unfolding t_def and sum.reindex[OF inj] and o_def
using obt(5)
by (auto simp: sum.distrib scaleR_right_distrib)
then have "(\<Sum>v\<in>insert a t. (if v = a then - (\<Sum>x\<in>t. u (x - a)) else u (v - a)) *\<^sub>R v) = 0"
unfolding sum_clauses(2)[OF fin]
using \<open>a\<notin>s\<close> \<open>t\<subseteq>s\<close>
by (auto simp: *)
ultimately show ?thesis
unfolding affine_dependent_explicit
apply (rule_tac x="insert a t" in exI, auto)
done
qed
lemma affine_dependent_biggerset:
fixes s :: "'a::euclidean_space set"
assumes "finite s" "card s \<ge> DIM('a) + 2"
shows "affine_dependent s"
proof -
have "s \<noteq> {}" using assms by auto
then obtain a where "a\<in>s" by auto
have *: "{x - a |x. x \<in> s - {a}} = (\<lambda>x. x - a) ` (s - {a})"
by auto
have "card {x - a |x. x \<in> s - {a}} = card (s - {a})"
unfolding * by (simp add: card_image inj_on_def)
also have "\<dots> > DIM('a)" using assms(2)
unfolding card_Diff_singleton[OF \<open>a\<in>s\<close>] by auto
finally show ?thesis
apply (subst insert_Diff[OF \<open>a\<in>s\<close>, symmetric])
apply (rule dependent_imp_affine_dependent)
apply (rule dependent_biggerset, auto)
done
qed
lemma affine_dependent_biggerset_general:
assumes "finite (S :: 'a::euclidean_space set)"
and "card S \<ge> dim S + 2"
shows "affine_dependent S"
proof -
from assms(2) have "S \<noteq> {}" by auto
then obtain a where "a\<in>S" by auto
have *: "{x - a |x. x \<in> S - {a}} = (\<lambda>x. x - a) ` (S - {a})"
by auto
have **: "card {x - a |x. x \<in> S - {a}} = card (S - {a})"
by (metis (no_types, lifting) "*" card_image diff_add_cancel inj_on_def)
have "dim {x - a |x. x \<in> S - {a}} \<le> dim S"
using \<open>a\<in>S\<close> by (auto simp: span_base span_diff intro: subset_le_dim)
also have "\<dots> < dim S + 1" by auto
also have "\<dots> \<le> card (S - {a})"
using assms card_Diff_singleton[OF \<open>a\<in>S\<close>] by auto
finally show ?thesis
apply (subst insert_Diff[OF \<open>a\<in>S\<close>, symmetric])
apply (rule dependent_imp_affine_dependent)
apply (rule dependent_biggerset_general)
unfolding **
apply auto
done
qed
subsection\<^marker>\<open>tag unimportant\<close> \<open>Some Properties of Affine Dependent Sets\<close>
lemma affine_independent_0 [simp]: "\<not> affine_dependent {}"
by (simp add: affine_dependent_def)
lemma affine_independent_1 [simp]: "\<not> affine_dependent {a}"
by (simp add: affine_dependent_def)
lemma affine_independent_2 [simp]: "\<not> affine_dependent {a,b}"
by (simp add: affine_dependent_def insert_Diff_if hull_same)
lemma affine_hull_translation: "affine hull ((\<lambda>x. a + x) ` S) = (\<lambda>x. a + x) ` (affine hull S)"
proof -
have "affine ((\<lambda>x. a + x) ` (affine hull S))"
using affine_translation affine_affine_hull by blast
moreover have "(\<lambda>x. a + x) ` S \<subseteq> (\<lambda>x. a + x) ` (affine hull S)"
using hull_subset[of S] by auto
ultimately have h1: "affine hull ((\<lambda>x. a + x) ` S) \<subseteq> (\<lambda>x. a + x) ` (affine hull S)"
by (metis hull_minimal)
have "affine((\<lambda>x. -a + x) ` (affine hull ((\<lambda>x. a + x) ` S)))"
using affine_translation affine_affine_hull by blast
moreover have "(\<lambda>x. -a + x) ` (\<lambda>x. a + x) ` S \<subseteq> (\<lambda>x. -a + x) ` (affine hull ((\<lambda>x. a + x) ` S))"
using hull_subset[of "(\<lambda>x. a + x) ` S"] by auto
moreover have "S = (\<lambda>x. -a + x) ` (\<lambda>x. a + x) ` S"
using translation_assoc[of "-a" a] by auto
ultimately have "(\<lambda>x. -a + x) ` (affine hull ((\<lambda>x. a + x) ` S)) >= (affine hull S)"
by (metis hull_minimal)
then have "affine hull ((\<lambda>x. a + x) ` S) >= (\<lambda>x. a + x) ` (affine hull S)"
by auto
then show ?thesis using h1 by auto
qed
lemma affine_dependent_translation:
assumes "affine_dependent S"
shows "affine_dependent ((\<lambda>x. a + x) ` S)"
proof -
obtain x where x: "x \<in> S \<and> x \<in> affine hull (S - {x})"
using assms affine_dependent_def by auto
have "(+) a ` (S - {x}) = (+) a ` S - {a + x}"
by auto
then have "a + x \<in> affine hull ((\<lambda>x. a + x) ` S - {a + x})"
using affine_hull_translation[of a "S - {x}"] x by auto
moreover have "a + x \<in> (\<lambda>x. a + x) ` S"
using x by auto
ultimately show ?thesis
unfolding affine_dependent_def by auto
qed
lemma affine_dependent_translation_eq:
"affine_dependent S \<longleftrightarrow> affine_dependent ((\<lambda>x. a + x) ` S)"
proof -
{
assume "affine_dependent ((\<lambda>x. a + x) ` S)"
then have "affine_dependent S"
using affine_dependent_translation[of "((\<lambda>x. a + x) ` S)" "-a"] translation_assoc[of "-a" a]
by auto
}
then show ?thesis
using affine_dependent_translation by auto
qed
lemma affine_hull_0_dependent:
assumes "0 \<in> affine hull S"
shows "dependent S"
proof -
obtain s u where s_u: "finite s \<and> s \<noteq> {} \<and> s \<subseteq> S \<and> sum u s = 1 \<and> (\<Sum>v\<in>s. u v *\<^sub>R v) = 0"
using assms affine_hull_explicit[of S] by auto
then have "\<exists>v\<in>s. u v \<noteq> 0" by auto
then have "finite s \<and> s \<subseteq> S \<and> (\<exists>v\<in>s. u v \<noteq> 0 \<and> (\<Sum>v\<in>s. u v *\<^sub>R v) = 0)"
using s_u by auto
then show ?thesis
unfolding dependent_explicit[of S] by auto
qed
lemma affine_dependent_imp_dependent2:
assumes "affine_dependent (insert 0 S)"
shows "dependent S"
proof -
obtain x where x: "x \<in> insert 0 S \<and> x \<in> affine hull (insert 0 S - {x})"
using affine_dependent_def[of "(insert 0 S)"] assms by blast
then have "x \<in> span (insert 0 S - {x})"
using affine_hull_subset_span by auto
moreover have "span (insert 0 S - {x}) = span (S - {x})"
using insert_Diff_if[of "0" S "{x}"] span_insert_0[of "S-{x}"] by auto
ultimately have "x \<in> span (S - {x})" by auto
then have "x \<noteq> 0 \<Longrightarrow> dependent S"
using x dependent_def by auto
moreover
{
assume "x = 0"
then have "0 \<in> affine hull S"
using x hull_mono[of "S - {0}" S] by auto
then have "dependent S"
using affine_hull_0_dependent by auto
}
ultimately show ?thesis by auto
qed
lemma affine_dependent_iff_dependent:
assumes "a \<notin> S"
shows "affine_dependent (insert a S) \<longleftrightarrow> dependent ((\<lambda>x. -a + x) ` S)"
proof -
have "((+) (- a) ` S) = {x - a| x . x \<in> S}" by auto
then show ?thesis
using affine_dependent_translation_eq[of "(insert a S)" "-a"]
affine_dependent_imp_dependent2 assms
dependent_imp_affine_dependent[of a S]
by (auto simp del: uminus_add_conv_diff)
qed
lemma affine_dependent_iff_dependent2:
assumes "a \<in> S"
shows "affine_dependent S \<longleftrightarrow> dependent ((\<lambda>x. -a + x) ` (S-{a}))"
proof -
have "insert a (S - {a}) = S"
using assms by auto
then show ?thesis
using assms affine_dependent_iff_dependent[of a "S-{a}"] by auto
qed
lemma affine_hull_insert_span_gen:
"affine hull (insert a s) = (\<lambda>x. a + x) ` span ((\<lambda>x. - a + x) ` s)"
proof -
have h1: "{x - a |x. x \<in> s} = ((\<lambda>x. -a+x) ` s)"
by auto
{
assume "a \<notin> s"
then have ?thesis
using affine_hull_insert_span[of a s] h1 by auto
}
moreover
{
assume a1: "a \<in> s"
have "\<exists>x. x \<in> s \<and> -a+x=0"
apply (rule exI[of _ a])
using a1
apply auto
done
then have "insert 0 ((\<lambda>x. -a+x) ` (s - {a})) = (\<lambda>x. -a+x) ` s"
by auto
then have "span ((\<lambda>x. -a+x) ` (s - {a}))=span ((\<lambda>x. -a+x) ` s)"
using span_insert_0[of "(+) (- a) ` (s - {a})"] by (auto simp del: uminus_add_conv_diff)
moreover have "{x - a |x. x \<in> (s - {a})} = ((\<lambda>x. -a+x) ` (s - {a}))"
by auto
moreover have "insert a (s - {a}) = insert a s"
by auto
ultimately have ?thesis
using affine_hull_insert_span[of "a" "s-{a}"] by auto
}
ultimately show ?thesis by auto
qed
lemma affine_hull_span2:
assumes "a \<in> s"
shows "affine hull s = (\<lambda>x. a+x) ` span ((\<lambda>x. -a+x) ` (s-{a}))"
using affine_hull_insert_span_gen[of a "s - {a}", unfolded insert_Diff[OF assms]]
by auto
lemma affine_hull_span_gen:
assumes "a \<in> affine hull s"
shows "affine hull s = (\<lambda>x. a+x) ` span ((\<lambda>x. -a+x) ` s)"
proof -
have "affine hull (insert a s) = affine hull s"
using hull_redundant[of a affine s] assms by auto
then show ?thesis
using affine_hull_insert_span_gen[of a "s"] by auto
qed
lemma affine_hull_span_0:
assumes "0 \<in> affine hull S"
shows "affine hull S = span S"
using affine_hull_span_gen[of "0" S] assms by auto
lemma extend_to_affine_basis_nonempty:
fixes S V :: "'n::real_vector set"
assumes "\<not> affine_dependent S" "S \<subseteq> V" "S \<noteq> {}"
shows "\<exists>T. \<not> affine_dependent T \<and> S \<subseteq> T \<and> T \<subseteq> V \<and> affine hull T = affine hull V"
proof -
obtain a where a: "a \<in> S"
using assms by auto
then have h0: "independent ((\<lambda>x. -a + x) ` (S-{a}))"
using affine_dependent_iff_dependent2 assms by auto
obtain B where B:
"(\<lambda>x. -a+x) ` (S - {a}) \<subseteq> B \<and> B \<subseteq> (\<lambda>x. -a+x) ` V \<and> independent B \<and> (\<lambda>x. -a+x) ` V \<subseteq> span B"
using assms
by (blast intro: maximal_independent_subset_extend[OF _ h0, of "(\<lambda>x. -a + x) ` V"])
define T where "T = (\<lambda>x. a+x) ` insert 0 B"
then have "T = insert a ((\<lambda>x. a+x) ` B)"
by auto
then have "affine hull T = (\<lambda>x. a+x) ` span B"
using affine_hull_insert_span_gen[of a "((\<lambda>x. a+x) ` B)"] translation_assoc[of "-a" a B]
by auto
then have "V \<subseteq> affine hull T"
using B assms translation_inverse_subset[of a V "span B"]
by auto
moreover have "T \<subseteq> V"
using T_def B a assms by auto
ultimately have "affine hull T = affine hull V"
by (metis Int_absorb1 Int_absorb2 hull_hull hull_mono)
moreover have "S \<subseteq> T"
using T_def B translation_inverse_subset[of a "S-{a}" B]
by auto
moreover have "\<not> affine_dependent T"
using T_def affine_dependent_translation_eq[of "insert 0 B"]
affine_dependent_imp_dependent2 B
by auto
ultimately show ?thesis using \<open>T \<subseteq> V\<close> by auto
qed
lemma affine_basis_exists:
fixes V :: "'n::real_vector set"
shows "\<exists>B. B \<subseteq> V \<and> \<not> affine_dependent B \<and> affine hull V = affine hull B"
proof (cases "V = {}")
case True
then show ?thesis
using affine_independent_0 by auto
next
case False
then obtain x where "x \<in> V" by auto
then show ?thesis
using affine_dependent_def[of "{x}"] extend_to_affine_basis_nonempty[of "{x}" V]
by auto
qed
proposition extend_to_affine_basis:
fixes S V :: "'n::real_vector set"
assumes "\<not> affine_dependent S" "S \<subseteq> V"
obtains T where "\<not> affine_dependent T" "S \<subseteq> T" "T \<subseteq> V" "affine hull T = affine hull V"
proof (cases "S = {}")
case True then show ?thesis
using affine_basis_exists by (metis empty_subsetI that)
next
case False
then show ?thesis by (metis assms extend_to_affine_basis_nonempty that)
qed
subsection \<open>Affine Dimension of a Set\<close>
definition\<^marker>\<open>tag important\<close> aff_dim :: "('a::euclidean_space) set \<Rightarrow> int"
where "aff_dim V =
(SOME d :: int.
\<exists>B. affine hull B = affine hull V \<and> \<not> affine_dependent B \<and> of_nat (card B) = d + 1)"
lemma aff_dim_basis_exists:
fixes V :: "('n::euclidean_space) set"
shows "\<exists>B. affine hull B = affine hull V \<and> \<not> affine_dependent B \<and> of_nat (card B) = aff_dim V + 1"
proof -
obtain B where "\<not> affine_dependent B \<and> affine hull B = affine hull V"
using affine_basis_exists[of V] by auto
then show ?thesis
unfolding aff_dim_def
some_eq_ex[of "\<lambda>d. \<exists>B. affine hull B = affine hull V \<and> \<not> affine_dependent B \<and> of_nat (card B) = d + 1"]
apply auto
apply (rule exI[of _ "int (card B) - (1 :: int)"])
apply (rule exI[of _ "B"], auto)
done
qed
lemma affine_hull_eq_empty [simp]: "affine hull S = {} \<longleftrightarrow> S = {}"
by (metis affine_empty subset_empty subset_hull)
lemma empty_eq_affine_hull[simp]: "{} = affine hull S \<longleftrightarrow> S = {}"
by (metis affine_hull_eq_empty)
lemma aff_dim_parallel_subspace_aux:
fixes B :: "'n::euclidean_space set"
assumes "\<not> affine_dependent B" "a \<in> B"
shows "finite B \<and> ((card B) - 1 = dim (span ((\<lambda>x. -a+x) ` (B-{a}))))"
proof -
have "independent ((\<lambda>x. -a + x) ` (B-{a}))"
using affine_dependent_iff_dependent2 assms by auto
then have fin: "dim (span ((\<lambda>x. -a+x) ` (B-{a}))) = card ((\<lambda>x. -a + x) ` (B-{a}))"
"finite ((\<lambda>x. -a + x) ` (B - {a}))"
using indep_card_eq_dim_span[of "(\<lambda>x. -a+x) ` (B-{a})"] by auto
show ?thesis
proof (cases "(\<lambda>x. -a + x) ` (B - {a}) = {}")
case True
have "B = insert a ((\<lambda>x. a + x) ` (\<lambda>x. -a + x) ` (B - {a}))"
using translation_assoc[of "a" "-a" "(B - {a})"] assms by auto
then have "B = {a}" using True by auto
then show ?thesis using assms fin by auto
next
case False
then have "card ((\<lambda>x. -a + x) ` (B - {a})) > 0"
using fin by auto
moreover have h1: "card ((\<lambda>x. -a + x) ` (B-{a})) = card (B-{a})"
by (rule card_image) (use translate_inj_on in blast)
ultimately have "card (B-{a}) > 0" by auto
then have *: "finite (B - {a})"
using card_gt_0_iff[of "(B - {a})"] by auto
then have "card (B - {a}) = card B - 1"
using card_Diff_singleton assms by auto
with * show ?thesis using fin h1 by auto
qed
qed
lemma aff_dim_parallel_subspace:
fixes V L :: "'n::euclidean_space set"
assumes "V \<noteq> {}"
and "subspace L"
and "affine_parallel (affine hull V) L"
shows "aff_dim V = int (dim L)"
proof -
obtain B where
B: "affine hull B = affine hull V \<and> \<not> affine_dependent B \<and> int (card B) = aff_dim V + 1"
using aff_dim_basis_exists by auto
then have "B \<noteq> {}"
using assms B
by auto
then obtain a where a: "a \<in> B" by auto
define Lb where "Lb = span ((\<lambda>x. -a+x) ` (B-{a}))"
moreover have "affine_parallel (affine hull B) Lb"
using Lb_def B assms affine_hull_span2[of a B] a
affine_parallel_commut[of "Lb" "(affine hull B)"]
unfolding affine_parallel_def
by auto
moreover have "subspace Lb"
using Lb_def subspace_span by auto
moreover have "affine hull B \<noteq> {}"
using assms B by auto
ultimately have "L = Lb"
using assms affine_parallel_subspace[of "affine hull B"] affine_affine_hull[of B] B
by auto
then have "dim L = dim Lb"
by auto
moreover have "card B - 1 = dim Lb" and "finite B"
using Lb_def aff_dim_parallel_subspace_aux a B by auto
ultimately show ?thesis
using B \<open>B \<noteq> {}\<close> card_gt_0_iff[of B] by auto
qed
lemma aff_independent_finite:
fixes B :: "'n::euclidean_space set"
assumes "\<not> affine_dependent B"
shows "finite B"
proof -
{
assume "B \<noteq> {}"
then obtain a where "a \<in> B" by auto
then have ?thesis
using aff_dim_parallel_subspace_aux assms by auto
}
then show ?thesis by auto
qed
lemma aff_dim_empty:
fixes S :: "'n::euclidean_space set"
shows "S = {} \<longleftrightarrow> aff_dim S = -1"
proof -
obtain B where *: "affine hull B = affine hull S"
and "\<not> affine_dependent B"
and "int (card B) = aff_dim S + 1"
using aff_dim_basis_exists by auto
moreover
from * have "S = {} \<longleftrightarrow> B = {}"
by auto
ultimately show ?thesis
using aff_independent_finite[of B] card_gt_0_iff[of B] by auto
qed
lemma aff_dim_empty_eq [simp]: "aff_dim ({}::'a::euclidean_space set) = -1"
by (simp add: aff_dim_empty [symmetric])
lemma aff_dim_affine_hull [simp]: "aff_dim (affine hull S) = aff_dim S"
unfolding aff_dim_def using hull_hull[of _ S] by auto
lemma aff_dim_affine_hull2:
assumes "affine hull S = affine hull T"
shows "aff_dim S = aff_dim T"
unfolding aff_dim_def using assms by auto
lemma aff_dim_unique:
fixes B V :: "'n::euclidean_space set"
assumes "affine hull B = affine hull V \<and> \<not> affine_dependent B"
shows "of_nat (card B) = aff_dim V + 1"
proof (cases "B = {}")
case True
then have "V = {}"
using assms
by auto
then have "aff_dim V = (-1::int)"
using aff_dim_empty by auto
then show ?thesis
using \<open>B = {}\<close> by auto
next
case False
then obtain a where a: "a \<in> B" by auto
define Lb where "Lb = span ((\<lambda>x. -a+x) ` (B-{a}))"
have "affine_parallel (affine hull B) Lb"
using Lb_def affine_hull_span2[of a B] a
affine_parallel_commut[of "Lb" "(affine hull B)"]
unfolding affine_parallel_def by auto
moreover have "subspace Lb"
using Lb_def subspace_span by auto
ultimately have "aff_dim B = int(dim Lb)"
using aff_dim_parallel_subspace[of B Lb] \<open>B \<noteq> {}\<close> by auto
moreover have "(card B) - 1 = dim Lb" "finite B"
using Lb_def aff_dim_parallel_subspace_aux a assms by auto
ultimately have "of_nat (card B) = aff_dim B + 1"
using \<open>B \<noteq> {}\<close> card_gt_0_iff[of B] by auto
then show ?thesis
using aff_dim_affine_hull2 assms by auto
qed
lemma aff_dim_affine_independent:
fixes B :: "'n::euclidean_space set"
assumes "\<not> affine_dependent B"
shows "of_nat (card B) = aff_dim B + 1"
using aff_dim_unique[of B B] assms by auto
lemma affine_independent_iff_card:
fixes s :: "'a::euclidean_space set"
shows "\<not> affine_dependent s \<longleftrightarrow> finite s \<and> aff_dim s = int(card s) - 1"
apply (rule iffI)
apply (simp add: aff_dim_affine_independent aff_independent_finite)
by (metis affine_basis_exists [of s] aff_dim_unique card_subset_eq diff_add_cancel of_nat_eq_iff)
lemma aff_dim_sing [simp]:
fixes a :: "'n::euclidean_space"
shows "aff_dim {a} = 0"
using aff_dim_affine_independent[of "{a}"] affine_independent_1 by auto
lemma aff_dim_2 [simp]:
fixes a :: "'n::euclidean_space"
shows "aff_dim {a,b} = (if a = b then 0 else 1)"
proof (clarsimp)
assume "a \<noteq> b"
then have "aff_dim{a,b} = card{a,b} - 1"
using affine_independent_2 [of a b] aff_dim_affine_independent by fastforce
also have "\<dots> = 1"
using \<open>a \<noteq> b\<close> by simp
finally show "aff_dim {a, b} = 1" .
qed
lemma aff_dim_inner_basis_exists:
fixes V :: "('n::euclidean_space) set"
shows "\<exists>B. B \<subseteq> V \<and> affine hull B = affine hull V \<and>
\<not> affine_dependent B \<and> of_nat (card B) = aff_dim V + 1"
proof -
obtain B where B: "\<not> affine_dependent B" "B \<subseteq> V" "affine hull B = affine hull V"
using affine_basis_exists[of V] by auto
then have "of_nat(card B) = aff_dim V+1" using aff_dim_unique by auto
with B show ?thesis by auto
qed
lemma aff_dim_le_card:
fixes V :: "'n::euclidean_space set"
assumes "finite V"
shows "aff_dim V \<le> of_nat (card V) - 1"
proof -
obtain B where B: "B \<subseteq> V" "of_nat (card B) = aff_dim V + 1"
using aff_dim_inner_basis_exists[of V] by auto
then have "card B \<le> card V"
using assms card_mono by auto
with B show ?thesis by auto
qed
lemma aff_dim_parallel_eq:
fixes S T :: "'n::euclidean_space set"
assumes "affine_parallel (affine hull S) (affine hull T)"
shows "aff_dim S = aff_dim T"
proof -
{
assume "T \<noteq> {}" "S \<noteq> {}"
then obtain L where L: "subspace L \<and> affine_parallel (affine hull T) L"
using affine_parallel_subspace[of "affine hull T"]
affine_affine_hull[of T]
by auto
then have "aff_dim T = int (dim L)"
using aff_dim_parallel_subspace \<open>T \<noteq> {}\<close> by auto
moreover have *: "subspace L \<and> affine_parallel (affine hull S) L"
using L affine_parallel_assoc[of "affine hull S" "affine hull T" L] assms by auto
moreover from * have "aff_dim S = int (dim L)"
using aff_dim_parallel_subspace \<open>S \<noteq> {}\<close> by auto
ultimately have ?thesis by auto
}
moreover
{
assume "S = {}"
then have "S = {}" and "T = {}"
using assms
unfolding affine_parallel_def
by auto
then have ?thesis using aff_dim_empty by auto
}
moreover
{
assume "T = {}"
then have "S = {}" and "T = {}"
using assms
unfolding affine_parallel_def
by auto
then have ?thesis
using aff_dim_empty by auto
}
ultimately show ?thesis by blast
qed
lemma aff_dim_translation_eq:
"aff_dim ((+) a ` S) = aff_dim S" for a :: "'n::euclidean_space"
proof -
have "affine_parallel (affine hull S) (affine hull ((\<lambda>x. a + x) ` S))"
unfolding affine_parallel_def
apply (rule exI[of _ "a"])
using affine_hull_translation[of a S]
apply auto
done
then show ?thesis
using aff_dim_parallel_eq[of S "(\<lambda>x. a + x) ` S"] by auto
qed
lemma aff_dim_translation_eq_subtract:
"aff_dim ((\<lambda>x. x - a) ` S) = aff_dim S" for a :: "'n::euclidean_space"
using aff_dim_translation_eq [of "- a"] by (simp cong: image_cong_simp)
lemma aff_dim_affine:
fixes S L :: "'n::euclidean_space set"
assumes "S \<noteq> {}"
and "affine S"
and "subspace L"
and "affine_parallel S L"
shows "aff_dim S = int (dim L)"
proof -
have *: "affine hull S = S"
using assms affine_hull_eq[of S] by auto
then have "affine_parallel (affine hull S) L"
using assms by (simp add: *)
then show ?thesis
using assms aff_dim_parallel_subspace[of S L] by blast
qed
lemma dim_affine_hull:
fixes S :: "'n::euclidean_space set"
shows "dim (affine hull S) = dim S"
proof -
have "dim (affine hull S) \<ge> dim S"
using dim_subset by auto
moreover have "dim (span S) \<ge> dim (affine hull S)"
using dim_subset affine_hull_subset_span by blast
moreover have "dim (span S) = dim S"
using dim_span by auto
ultimately show ?thesis by auto
qed
lemma aff_dim_subspace:
fixes S :: "'n::euclidean_space set"
assumes "subspace S"
shows "aff_dim S = int (dim S)"
proof (cases "S={}")
case True with assms show ?thesis
by (simp add: subspace_affine)
next
case False
with aff_dim_affine[of S S] assms subspace_imp_affine[of S] affine_parallel_reflex[of S] subspace_affine
show ?thesis by auto
qed
lemma aff_dim_zero:
fixes S :: "'n::euclidean_space set"
assumes "0 \<in> affine hull S"
shows "aff_dim S = int (dim S)"
proof -
have "subspace (affine hull S)"
using subspace_affine[of "affine hull S"] affine_affine_hull assms
by auto
then have "aff_dim (affine hull S) = int (dim (affine hull S))"
using assms aff_dim_subspace[of "affine hull S"] by auto
then show ?thesis
using aff_dim_affine_hull[of S] dim_affine_hull[of S]
by auto
qed
lemma aff_dim_eq_dim:
"aff_dim S = int (dim ((+) (- a) ` S))" if "a \<in> affine hull S"
for S :: "'n::euclidean_space set"
proof -
have "0 \<in> affine hull (+) (- a) ` S"
unfolding affine_hull_translation
using that by (simp add: ac_simps)
with aff_dim_zero show ?thesis
by (metis aff_dim_translation_eq)
qed
lemma aff_dim_eq_dim_subtract:
"aff_dim S = int (dim ((\<lambda>x. x - a) ` S))" if "a \<in> affine hull S"
for S :: "'n::euclidean_space set"
using aff_dim_eq_dim [of a] that by (simp cong: image_cong_simp)
lemma aff_dim_UNIV [simp]: "aff_dim (UNIV :: 'n::euclidean_space set) = int(DIM('n))"
using aff_dim_subspace[of "(UNIV :: 'n::euclidean_space set)"]
dim_UNIV[where 'a="'n::euclidean_space"]
by auto
lemma aff_dim_geq:
fixes V :: "'n::euclidean_space set"
shows "aff_dim V \<ge> -1"
proof -
obtain B where "affine hull B = affine hull V"
and "\<not> affine_dependent B"
and "int (card B) = aff_dim V + 1"
using aff_dim_basis_exists by auto
then show ?thesis by auto
qed
lemma aff_dim_negative_iff [simp]:
fixes S :: "'n::euclidean_space set"
shows "aff_dim S < 0 \<longleftrightarrow>S = {}"
by (metis aff_dim_empty aff_dim_geq diff_0 eq_iff zle_diff1_eq)
lemma aff_lowdim_subset_hyperplane:
fixes S :: "'a::euclidean_space set"
assumes "aff_dim S < DIM('a)"
obtains a b where "a \<noteq> 0" "S \<subseteq> {x. a \<bullet> x = b}"
proof (cases "S={}")
case True
moreover
have "(SOME b. b \<in> Basis) \<noteq> 0"
by (metis norm_some_Basis norm_zero zero_neq_one)
ultimately show ?thesis
using that by blast
next
case False
then obtain c S' where "c \<notin> S'" "S = insert c S'"
by (meson equals0I mk_disjoint_insert)
have "dim ((+) (-c) ` S) < DIM('a)"
by (metis \<open>S = insert c S'\<close> aff_dim_eq_dim assms hull_inc insertI1 of_nat_less_imp_less)
then obtain a where "a \<noteq> 0" "span ((+) (-c) ` S) \<subseteq> {x. a \<bullet> x = 0}"
using lowdim_subset_hyperplane by blast
moreover
have "a \<bullet> w = a \<bullet> c" if "span ((+) (- c) ` S) \<subseteq> {x. a \<bullet> x = 0}" "w \<in> S" for w
proof -
have "w-c \<in> span ((+) (- c) ` S)"
by (simp add: span_base \<open>w \<in> S\<close>)
with that have "w-c \<in> {x. a \<bullet> x = 0}"
by blast
then show ?thesis
by (auto simp: algebra_simps)
qed
ultimately have "S \<subseteq> {x. a \<bullet> x = a \<bullet> c}"
by blast
then show ?thesis
by (rule that[OF \<open>a \<noteq> 0\<close>])
qed
lemma affine_independent_card_dim_diffs:
fixes S :: "'a :: euclidean_space set"
assumes "\<not> affine_dependent S" "a \<in> S"
shows "card S = dim ((\<lambda>x. x - a) ` S) + 1"
proof -
have non: "\<not> affine_dependent (insert a S)"
by (simp add: assms insert_absorb)
have "finite S"
by (meson assms aff_independent_finite)
with \<open>a \<in> S\<close> have "card S \<noteq> 0" by auto
moreover have "dim ((\<lambda>x. x - a) ` S) = card S - 1"
using aff_dim_eq_dim_subtract aff_dim_unique \<open>a \<in> S\<close> hull_inc insert_absorb non by fastforce
ultimately show ?thesis
by auto
qed
lemma independent_card_le_aff_dim:
fixes B :: "'n::euclidean_space set"
assumes "B \<subseteq> V"
assumes "\<not> affine_dependent B"
shows "int (card B) \<le> aff_dim V + 1"
proof -
obtain T where T: "\<not> affine_dependent T \<and> B \<subseteq> T \<and> T \<subseteq> V \<and> affine hull T = affine hull V"
by (metis assms extend_to_affine_basis[of B V])
then have "of_nat (card T) = aff_dim V + 1"
using aff_dim_unique by auto
then show ?thesis
using T card_mono[of T B] aff_independent_finite[of T] by auto
qed
lemma aff_dim_subset:
fixes S T :: "'n::euclidean_space set"
assumes "S \<subseteq> T"
shows "aff_dim S \<le> aff_dim T"
proof -
obtain B where B: "\<not> affine_dependent B" "B \<subseteq> S" "affine hull B = affine hull S"
"of_nat (card B) = aff_dim S + 1"
using aff_dim_inner_basis_exists[of S] by auto
then have "int (card B) \<le> aff_dim T + 1"
using assms independent_card_le_aff_dim[of B T] by auto
with B show ?thesis by auto
qed
lemma aff_dim_le_DIM:
fixes S :: "'n::euclidean_space set"
shows "aff_dim S \<le> int (DIM('n))"
proof -
have "aff_dim (UNIV :: 'n::euclidean_space set) = int(DIM('n))"
using aff_dim_UNIV by auto
then show "aff_dim (S:: 'n::euclidean_space set) \<le> int(DIM('n))"
using aff_dim_subset[of S "(UNIV :: ('n::euclidean_space) set)"] subset_UNIV by auto
qed
lemma affine_dim_equal:
fixes S :: "'n::euclidean_space set"
assumes "affine S" "affine T" "S \<noteq> {}" "S \<subseteq> T" "aff_dim S = aff_dim T"
shows "S = T"
proof -
obtain a where "a \<in> S" using assms by auto
then have "a \<in> T" using assms by auto
define LS where "LS = {y. \<exists>x \<in> S. (-a) + x = y}"
then have ls: "subspace LS" "affine_parallel S LS"
using assms parallel_subspace_explicit[of S a LS] \<open>a \<in> S\<close> by auto
then have h1: "int(dim LS) = aff_dim S"
using assms aff_dim_affine[of S LS] by auto
have "T \<noteq> {}" using assms by auto
define LT where "LT = {y. \<exists>x \<in> T. (-a) + x = y}"
then have lt: "subspace LT \<and> affine_parallel T LT"
using assms parallel_subspace_explicit[of T a LT] \<open>a \<in> T\<close> by auto
then have "int(dim LT) = aff_dim T"
using assms aff_dim_affine[of T LT] \<open>T \<noteq> {}\<close> by auto
then have "dim LS = dim LT"
using h1 assms by auto
moreover have "LS \<le> LT"
using LS_def LT_def assms by auto
ultimately have "LS = LT"
using subspace_dim_equal[of LS LT] ls lt by auto
moreover have "S = {x. \<exists>y \<in> LS. a+y=x}"
using LS_def by auto
moreover have "T = {x. \<exists>y \<in> LT. a+y=x}"
using LT_def by auto
ultimately show ?thesis by auto
qed
lemma aff_dim_eq_0:
fixes S :: "'a::euclidean_space set"
shows "aff_dim S = 0 \<longleftrightarrow> (\<exists>a. S = {a})"
proof (cases "S = {}")
case True
then show ?thesis
by auto
next
case False
then obtain a where "a \<in> S" by auto
show ?thesis
proof safe
assume 0: "aff_dim S = 0"
have "\<not> {a,b} \<subseteq> S" if "b \<noteq> a" for b
by (metis "0" aff_dim_2 aff_dim_subset not_one_le_zero that)
then show "\<exists>a. S = {a}"
using \<open>a \<in> S\<close> by blast
qed auto
qed
lemma affine_hull_UNIV:
fixes S :: "'n::euclidean_space set"
assumes "aff_dim S = int(DIM('n))"
shows "affine hull S = (UNIV :: ('n::euclidean_space) set)"
proof -
have "S \<noteq> {}"
using assms aff_dim_empty[of S] by auto
have h0: "S \<subseteq> affine hull S"
using hull_subset[of S _] by auto
have h1: "aff_dim (UNIV :: ('n::euclidean_space) set) = aff_dim S"
using aff_dim_UNIV assms by auto
then have h2: "aff_dim (affine hull S) \<le> aff_dim (UNIV :: ('n::euclidean_space) set)"
using aff_dim_le_DIM[of "affine hull S"] assms h0 by auto
have h3: "aff_dim S \<le> aff_dim (affine hull S)"
using h0 aff_dim_subset[of S "affine hull S"] assms by auto
then have h4: "aff_dim (affine hull S) = aff_dim (UNIV :: ('n::euclidean_space) set)"
using h0 h1 h2 by auto
then show ?thesis
using affine_dim_equal[of "affine hull S" "(UNIV :: ('n::euclidean_space) set)"]
affine_affine_hull[of S] affine_UNIV assms h4 h0 \<open>S \<noteq> {}\<close>
by auto
qed
lemma disjoint_affine_hull:
fixes s :: "'n::euclidean_space set"
assumes "\<not> affine_dependent s" "t \<subseteq> s" "u \<subseteq> s" "t \<inter> u = {}"
shows "(affine hull t) \<inter> (affine hull u) = {}"
proof -
from assms(1) have "finite s"
by (simp add: aff_independent_finite)
with assms(2,3) have "finite t" "finite u"
by (blast intro: finite_subset)+
have False if "y \<in> affine hull t" and "y \<in> affine hull u" for y
proof -
from that obtain a b
where a1 [simp]: "sum a t = 1"
and [simp]: "sum (\<lambda>v. a v *\<^sub>R v) t = y"
and [simp]: "sum b u = 1" "sum (\<lambda>v. b v *\<^sub>R v) u = y"
by (auto simp: affine_hull_finite \<open>finite t\<close> \<open>finite u\<close>)
define c where "c x = (if x \<in> t then a x else if x \<in> u then -(b x) else 0)" for x
from assms(2,3,4) have [simp]: "s \<inter> t = t" "s \<inter> - t \<inter> u = u"
by auto
have "sum c s = 0"
by (simp add: c_def comm_monoid_add_class.sum.If_cases \<open>finite s\<close> sum_negf)
moreover have "\<not> (\<forall>v\<in>s. c v = 0)"
by (metis (no_types) IntD1 \<open>s \<inter> t = t\<close> a1 c_def sum.neutral zero_neq_one)
moreover have "(\<Sum>v\<in>s. c v *\<^sub>R v) = 0"
by (simp add: c_def if_smult sum_negf comm_monoid_add_class.sum.If_cases \<open>finite s\<close>)
ultimately show ?thesis
using assms(1) \<open>finite s\<close> by (auto simp: affine_dependent_explicit)
qed
then show ?thesis by blast
qed
end
|
{"author": "seL4", "repo": "isabelle", "sha": "e1ab32a3bb41728cd19541063283e37919978a4c", "save_path": "github-repos/isabelle/seL4-isabelle", "path": "github-repos/isabelle/seL4-isabelle/isabelle-e1ab32a3bb41728cd19541063283e37919978a4c/src/HOL/Analysis/Affine.thy"}
|
"""
DCMotor
=======
"""
import time
import RPi.GPIO as GPIO
Motor_A_EN = 4
Motor_B_EN = 17
Motor_A_Pin1 = 26
Motor_A_Pin2 = 21
Motor_B_Pin1 = 27
Motor_B_Pin2 = 18
forward = 0
backward = 1
pwm_A = 0
pwm_B = 0
class DCMotor:
"""DC 모터를 제어합니다."""
def __init__(self):
"""DC 모터 GPIO를 연결하고 초기화합니다."""
global pwm_A, pwm_B
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(Motor_A_EN, GPIO.OUT)
GPIO.setup(Motor_B_EN, GPIO.OUT)
GPIO.setup(Motor_A_Pin1, GPIO.OUT)
GPIO.setup(Motor_A_Pin2, GPIO.OUT)
GPIO.setup(Motor_B_Pin1, GPIO.OUT)
GPIO.setup(Motor_B_Pin2, GPIO.OUT)
self.motorStop()
try:
pwm_A = GPIO.PWM(Motor_A_EN, 1000)
pwm_B = GPIO.PWM(Motor_B_EN, 1000)
except:
pass
def motor_right(self, status, direction, speed):
"""우측 모터를 제어합니다."""
if status == 0: # stop
GPIO.output(Motor_A_Pin1, GPIO.LOW)
GPIO.output(Motor_A_Pin2, GPIO.LOW)
GPIO.output(Motor_A_EN, GPIO.LOW)
else:
if direction == forward:
GPIO.output(Motor_A_Pin1, GPIO.LOW)
GPIO.output(Motor_A_Pin2, GPIO.HIGH)
pwm_A.start(0)
pwm_A.ChangeDutyCycle(speed)
elif direction == backward:
GPIO.output(Motor_A_Pin1, GPIO.HIGH)
GPIO.output(Motor_A_Pin2, GPIO.LOW)
pwm_A.start(100)
pwm_A.ChangeDutyCycle(speed)
def motor_left(self, status, direction, speed):
"""좌측 모터를 제어합니다."""
if status == 0: # stop
GPIO.output(Motor_B_Pin1, GPIO.LOW)
GPIO.output(Motor_B_Pin2, GPIO.LOW)
GPIO.output(Motor_B_EN, GPIO.LOW)
else:
if direction == forward:
GPIO.output(Motor_B_Pin1, GPIO.LOW)
GPIO.output(Motor_B_Pin2, GPIO.HIGH)
pwm_B.start(0)
pwm_B.ChangeDutyCycle(speed)
elif direction == backward:
GPIO.output(Motor_B_Pin1, GPIO.HIGH)
GPIO.output(Motor_B_Pin2, GPIO.LOW)
pwm_B.start(100)
pwm_B.ChangeDutyCycle(speed)
def move(self, speed, direction, turn, radius=0.8): # 0 < radius <= 1
"""양쪽 모터를 제어하여 기기를 움직입니다."""
if direction == 'forward':
self.motor_left(1, forward, speed - (turn * radius))
self.motor_right(1, forward, speed + (turn * radius))
elif direction == 'backward':
self.motor_left(1, backward, speed - (turn * radius))
self.motor_right(1, backward, speed + (turn * radius))
else:
pass
def motorStop(self):
"""모터를 정지합니다."""
GPIO.output(Motor_A_Pin1, GPIO.LOW)
GPIO.output(Motor_A_Pin2, GPIO.LOW)
GPIO.output(Motor_B_Pin1, GPIO.LOW)
GPIO.output(Motor_B_Pin2, GPIO.LOW)
GPIO.output(Motor_A_EN, GPIO.LOW)
GPIO.output(Motor_B_EN, GPIO.LOW)
"""
ServoMotor
==========
"""
import Adafruit_PCA9685
import threading
pwm = Adafruit_PCA9685.PCA9685()
pwm.set_pwm_freq(50)
init_pwm0 = 300
init_pwm1 = 300
init_pwm2 = 300
class ServoMotor:
"""
서보 모터를 제어합니다.
기존 16개 서보모터 -> 3개로 축소
0: 앞바퀴 방향. 0=정면
1: 카메라 좌우. 0=정면
2: 카메라 상하. 0=정면
"""
def __init__(self):
self.sc_direction = [1,1,1,1, 1,1,1,1, 1,1,1,1, 1,1,1,1]
self.initPos = [init_pwm0,init_pwm1,init_pwm2]
self.nowPos = [300,300,300]
self.maxPos = [560,560,560]
self.minPos = [100,100,100]
self.ctrlRangeMax = 560
self.ctrlRangeMin = 100
self.angleRange = 180
self.initConfig(1, 297, 1)
self.initConfig(2, 140, 1)
def moveInit(self):
for i in range(0,3):
pwm.set_pwm(i,0,self.initPos[i])
self.nowPos[i] = self.initPos[i]
def initConfig(self, ID, initInput, moveTo):
if initInput > self.minPos[ID] and initInput < self.maxPos[ID]:
self.initPos[ID] = initInput
if moveTo:
pwm.set_pwm(ID,0,self.initPos[ID])
else:
print('initPos Value Error.')
def moveServoInit(self, ID):
for i in range(0,len(ID)):
pwm.set_pwm(ID[i], 0, self.initPos[ID[i]])
self.nowPos[ID[i]] = self.initPos[ID[i]]
def pwmGenOut(self, angleInput):
return int(round(((self.ctrlRangeMax-self.ctrlRangeMin)/self.angleRange*angleInput),0))
def moveAngle(self, ID, angleInput):
self.nowPos[ID] = int(self.initPos[ID] + self.sc_direction[ID]*self.pwmGenOut(angleInput))
if self.nowPos[ID] > self.maxPos[ID]:self.nowPos[ID] = self.maxPos[ID]
elif self.nowPos[ID] < self.minPos[ID]:self.nowPos[ID] = self.minPos[ID]
pwm.set_pwm(ID, 0, self.nowPos[ID])
def setPWM(self, ID, PWM_input):
self.nowPos[ID] = PWM_input
pwm.set_pwm(ID, 0, PWM_input)
"""
Tracking
========
"""
line_pin_right = 19
line_pin_middle = 16
line_pin_left = 20
class Tracking:
"""트래킹 모듈을 제어합니다."""
def __init__(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(line_pin_right,GPIO.IN)
GPIO.setup(line_pin_middle,GPIO.IN)
GPIO.setup(line_pin_left,GPIO.IN)
self.status_right = None
self.status_middle = None
self.status_left = None
def get_signal(self):
self.status_right = GPIO.input(line_pin_right)
self.status_middle = GPIO.input(line_pin_middle)
self.status_left = GPIO.input(line_pin_left)
return (self.status_left, self.status_middle, self.status_right)
"""
Camera
======
"""
import cv2
import numpy as np
def get_line(lineIndex_Pos, thickness, offset):
lines = []
left = 999
right = 0
for pixel in lineIndex_Pos[0]:
if pixel != right + 1:
if abs((right - left) - thickness) <= offset:
line_center = (left + right) // 2
lines.append(line_center)
left = pixel
continue
left = pixel
pixel_old = pixel
right = pixel
lines.sort(key=lambda x: abs(x - 320))
return lines[0]
class Camera:
"""카메라를 제어합니다."""
def __init__(self):
self.linePos_1 = 440
self.linePos_2 = 380
self.lineColorSet = 0
self.center_Pos1 = None
self.center_Pos2 = None
self.center = None
self.thickness1 = 88
self.thickness2 = 78
self.offset = 20
self.camera = cv2.VideoCapture(0)
def set_thick(self, thickness, offset):
self.thickness1 = thickness
self.thickness2 = int(thickness * 0.9)
self.offset = offset
def findline(self):
_, frame_image = self.camera.read()
frame_findline = cv2.cvtColor(frame_image, cv2.COLOR_BGR2GRAY)
retval, frame_findline = cv2.threshold(frame_findline, 0, 255, cv2.THRESH_OTSU)
frame_findline = cv2.erode(frame_findline, None, iterations=6)
colorPos_1 = frame_findline[self.linePos_1]
colorPos_2 = frame_findline[self.linePos_2]
try:
lineIndex_Pos1 = np.where(colorPos_1 == self.lineColorSet)
lineIndex_Pos2 = np.where(colorPos_2 == self.lineColorSet)
self.center_Pos1 = get_line(lineIndex_Pos1, self.thickness1, self.offset)
self.center_Pos2 = get_line(lineIndex_Pos2, self.thickness2, self.offset)
self.center = (self.center_Pos1 + self.center_Pos2) // 2
except:
self.center = 320
return self.center
|
{"hexsha": "40c0239583c820a3e4d71d9272d204e7cdb76d9f", "size": 7718, "ext": "py", "lang": "Python", "max_stars_repo_path": "openpibo/picar_lib.py", "max_stars_repo_name": "hong-jinpyo/x-openpibo", "max_stars_repo_head_hexsha": "f785ec2a539786c5ae87057d383fff711fff5566", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "openpibo/picar_lib.py", "max_issues_repo_name": "hong-jinpyo/x-openpibo", "max_issues_repo_head_hexsha": "f785ec2a539786c5ae87057d383fff711fff5566", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "openpibo/picar_lib.py", "max_forks_repo_name": "hong-jinpyo/x-openpibo", "max_forks_repo_head_hexsha": "f785ec2a539786c5ae87057d383fff711fff5566", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2720848057, "max_line_length": 98, "alphanum_fraction": 0.5821456336, "include": true, "reason": "import numpy", "num_tokens": 2170}
|
[STATEMENT]
lemma eqvt_at_apply'':
assumes "eqvt_at f x"
shows "(p \<bullet> f) (p \<bullet> x) = f (p \<bullet> x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (p \<bullet> f) (p \<bullet> x) = f (p \<bullet> x)
[PROOF STEP]
by (metis (opaque_lifting, no_types) assms eqvt_at_def permute_fun_def permute_minus_cancel(1))
|
{"llama_tokens": 145, "file": "Launchbury_Nominal-Utils", "length": 1}
|
/**
* Copyright (c) 2015 Carnegie Mellon University, Daniel Maturana <dimatura@cmu.edu>
*
* For License information please see the LICENSE file in the root directory.
*
*/
#ifndef RAYCASTING_HPP_OLVFBMND
#define RAYCASTING_HPP_OLVFBMND
#include <Eigen/Core>
#include <pcl_util/point_types.hpp>
#include "scrollgrid/grid_types.hpp"
#include "scrollgrid/box.hpp"
#include "scrollgrid/ray.hpp"
#include "scrollgrid/scrollgrid3.hpp"
#include "scrollgrid/dense_array3.hpp"
namespace ca
{
/**
* Axis-aligned bounding box intersection test.
* Reference:
* An Efficient and Robust Ray–Box Intersection Algorithm, Williams et al. 2004
* tmin and tmax are updated
*/
template<typename Scalar>
bool aabb_ray_intersect(const ca::scrollgrid::Box<Scalar, 3>& box,
ca::scrollgrid::Ray3<Scalar> &r) {
Scalar tmin = (box.bound( boost::get<0>(r.sign) ).x() - r.origin.x()) * r.invdir.x();
Scalar tmax = (box.bound( 1-boost::get<0>(r.sign) ).x() - r.origin.x()) * r.invdir.x();
Scalar tymin = (box.bound( boost::get<1>(r.sign) ).y() - r.origin.y()) * r.invdir.y();
Scalar tymax = (box.bound(1-boost::get<1>(r.sign) ).y() - r.origin.y()) * r.invdir.y();
if ((tmin > tymax) || (tymin > tmax)) { return false; }
if (tymin > tmin) { tmin = tymin; }
if (tymax < tmax) { tmax = tymax; }
Scalar tzmin = (box.bound( boost::get<2>(r.sign)).z() - r.origin.z()) * r.invdir.z();
Scalar tzmax = (box.bound(1-boost::get<2>(r.sign)).z() - r.origin.z()) * r.invdir.z();
if ((tmin > tzmax) || (tzmin > tmax)) { return false; }
if (tzmin > tmin) { tmin = tzmin; }
if (tzmax < tmax) { tmax = tzmax; }
if (tmin > r.tmin) { r.tmin = tmin; }
if (tmax < r.tmax) { r.tmax = tmax; }
return true;
}
/**
* Trace a straight line from start_pos to end_pos.
* At each step fun(i, j, k) is called.
*
* NOTE start_pos and end_pos should be inside the grid
*
* Reference: graphics gems article
* TODO consider DDA-type raytracing.
*/
template<class TraceFunctor>
void bresenham_trace(const Vec3Ix& start_pos,
const Vec3Ix& end_pos,
const TraceFunctor& fun) {
// beware: vec3ix are int64_t
int x = start_pos[0],
y = start_pos[1],
z = start_pos[2];
int dx = end_pos[0] - start_pos[0],
dy = end_pos[1] - start_pos[1],
dz = end_pos[2] - start_pos[2];
int sx, sy, sz;
//X
if ( dx>0 ) {
sx = 1;
} else if ( dx<0 ) {
sx = -1;
dx = -dx;
} else {
sx = 0;
}
//Y
if ( dy>0 ) {
sy = 1;
} else if ( dy<0 ) {
sy = -1;
dy = -dy;
} else {
sy = 0;
}
//Z
if ( dz>0 ) {
sz = 1;
} else if ( dz<0 ) {
sz = -1;
dz = -dz;
} else {
sz = 0;
}
int ax = 2*dx,
ay = 2*dy,
az = 2*dz;
if ( ( dy <= dx ) && ( dz <= dx ) ) {
for (int decy=ay-dx, decz=az-dx;
;
x+=sx, decy+=ay, decz+=az) {
//SetP ( grid,x,y,z,end_pos, atMax, count);
if(!fun(x, y, z)) break;
//Bresenham step
if ( x==end_pos[0] ) break;
if ( decy>=0 ) {
decy-=ax;
y+=sy;
}
if ( decz>=0 ) {
decz-=ax;
z+=sz;
}
}
} else if ( ( dx <= dy ) && ( dz <= dy ) ) {
//dy>=dx,dy
for (int decx=ax-dy,decz=az-dy;
;
y+=sy,decx+=ax,decz+=az ) {
// SetP ( grid,x,y,z,end_pos, atMax, count);
if(!fun(x, y, z)) break;
//Bresenham step
if ( y==end_pos[1] ) break;
if ( decx>=0 ) {
decx-=ay;
x+=sx;
}
if ( decz>=0 ) {
decz-=ay;
z+=sz;
}
}
} else if ( ( dx <= dz ) && ( dy <= dz ) ) {
//dy>=dx,dy
for (int decx=ax-dz,decy=ay-dz;
;
z+=sz,decx+=ax,decy+=ay ) {
//SetP ( grid,x,y,z,end_pos, atMax, count);
if(!fun(x, y, z)) break;
//Bresenham step
if ( z==end_pos[2] ) break;
if ( decx>=0 ) {
decx-=az;
x+=sx;
} if ( decy>=0 ) {
decy-=az;
y+=sy;
}
}
}
}
/**
* Simply increment a counter in densearray3 for each step along the way.
*/
template<class GridScalar, class ArrayScalar>
void bresenham_trace_simple(const Vec3Ix& start_pos,
const Vec3Ix& end_pos,
const ca::ScrollGrid3<GridScalar>& grid3,
ca::DenseArray3<ArrayScalar>& array3
) {
//int ray_ctr = 0;
// beware: vec3ix are int64_t
int x = start_pos[0],
y = start_pos[1],
z = start_pos[2];
int dx = end_pos[0] - start_pos[0],
dy = end_pos[1] - start_pos[1],
dz = end_pos[2] - start_pos[2];
int sx, sy, sz;
//X
if ( dx>0 ) {
sx = 1;
} else if ( dx<0 ) {
sx = -1;
dx = -dx;
} else {
sx = 0;
}
//Y
if ( dy>0 ) {
sy = 1;
} else if ( dy<0 ) {
sy = -1;
dy = -dy;
} else {
sy = 0;
}
//Z
if ( dz>0 ) {
sz = 1;
} else if ( dz<0 ) {
sz = -1;
dz = -dz;
} else {
sz = 0;
}
int ax = 2*dx,
ay = 2*dy,
az = 2*dz;
if ( ( dy <= dx ) && ( dz <= dx ) ) {
for (int decy=ay-dx, decz=az-dx;
;
x+=sx, decy+=ay, decz+=az) {
mem_ix_t mem_ix = grid3.grid_to_mem(x, y, z);
array3[mem_ix] += 1;
//array3[mem_ix] = ray_ctr++;
//Bresenham step
if ( x==end_pos[0] ) break;
if ( decy>=0 ) {
decy-=ax;
y+=sy;
}
if ( decz>=0 ) {
decz-=ax;
z+=sz;
}
}
} else if ( ( dx <= dy ) && ( dz <= dy ) ) {
//dy>=dx,dy
for (int decx=ax-dy,decz=az-dy;
;
y+=sy,decx+=ax,decz+=az ) {
mem_ix_t mem_ix = grid3.grid_to_mem(x, y, z);
array3[mem_ix] += 1;
//array3[mem_ix] = ray_ctr++;
//Bresenham step
if ( y==end_pos[1] ) break;
if ( decx>=0 ) {
decx-=ay;
x+=sx;
}
if ( decz>=0 ) {
decz-=ay;
z+=sz;
}
}
} else if ( ( dx <= dz ) && ( dy <= dz ) ) {
//dy>=dx,dy
for (int decx=ax-dz,decy=ay-dz;
;
z+=sz,decx+=ax,decy+=ay ) {
grid_ix_t mem_ix = grid3.grid_to_mem(x, y, z);
array3[mem_ix] += 1;
//array3[mem_ix] = ray_ctr++;
//Bresenham step
if ( z==end_pos[2] ) break;
if ( decx>=0 ) {
decx-=az;
x+=sx;
} if ( decy>=0 ) {
decy-=az;
y+=sy;
}
}
}
}
template<class TraceFunctor>
void bresenham_trace(const Vec2Ix& start_pos,
const Vec2Ix& end_pos,
const TraceFunctor& fun) {
int x = start_pos[0],
y = start_pos[1];
int dx = end_pos[0] - start_pos[0],
dy = end_pos[1] - start_pos[1];
int sx, sy;
//X
if ( dx>0 ) {
sx = 1;
} else if ( dx<0 ) {
sx = -1;
dx = -dx;
} else {
sx = 0;
}
//Y
if ( dy>0 ) {
sy = 1;
} else if ( dy<0 ) {
sy = -1;
dy = -dy;
} else {
sy = 0;
}
int ax = 2*dx,
ay = 2*dy;
if (dy <= dx){
for (int decy=ay-dx;;
x+=sx, decy+=ay) {
bool end_cell = false;
//Bresenham step
if(!fun(x,y,end_cell)){ ROS_INFO("Breaking due to function call"); break;}
if ( x==end_pos[0] ) break;
if ( decy>=0 ) {
decy-=ax;
y+=sy;
}
}
} else if ( dx <= dy ){
for (int decx=ax-dy;;
y+=sy,decx+=ax) {
bool end_cell = false;
//Bresenham step
if(!fun(x,y,end_cell)){ break; ROS_INFO("Breaking due to function call"); break;}
if ( y==end_pos[1] ) break;
if ( decx>=0 ) {
decx-=ay;
x+=sx;
}
}
}
}
}
#endif /* end of include guard: RAYCASTING_HPP_OLVFBMND */
|
{"hexsha": "c04c8227ac13f1676239e8322bf0eb6138e00d12", "size": 7774, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/scrollgrid/raycasting.hpp", "max_stars_repo_name": "castacks/scrollgrid", "max_stars_repo_head_hexsha": "710324173907a182eb688effcf1c9ec998ade1e0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 9.0, "max_stars_repo_stars_event_min_datetime": "2017-07-20T23:04:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-12T08:03:10.000Z", "max_issues_repo_path": "include/scrollgrid/raycasting.hpp", "max_issues_repo_name": "castacks/scrollgrid", "max_issues_repo_head_hexsha": "710324173907a182eb688effcf1c9ec998ade1e0", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/scrollgrid/raycasting.hpp", "max_forks_repo_name": "castacks/scrollgrid", "max_forks_repo_head_hexsha": "710324173907a182eb688effcf1c9ec998ade1e0", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2018-04-06T16:41:58.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T01:39:22.000Z", "avg_line_length": 22.7976539589, "max_line_length": 91, "alphanum_fraction": 0.4753022897, "num_tokens": 2697}
|
#include <iostream>
#include <string>
#include <vector>
#include <boost/foreach.hpp>
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/json_parser.hpp>
class Test {
public:
Test() throw()
: integer_(40)
{
}
void save(boost::property_tree::ptree& tree, std::string parentNode = "") const throw() {
tree.put(parentNode + "Test.integer_", this->integer_);
}
void load(const boost::property_tree::ptree& tree, std::string parentNode = "") throw() {
this->integer_ = tree.get<int>(parentNode + "Test.integer_");
}
int integer_;
};
class Composite {
public:
Composite() throw() : integer_(2929) {
this->testList_.resize(3);
}
void save(boost::property_tree::ptree& tree, std::string parentNode = "") const throw() {
boost::property_tree::ptree subTree;
BOOST_FOREACH(const Test& test, this->testList_) {
boost::property_tree::ptree info;
test.save(info, "");
subTree.push_back(std::make_pair("", info));
}
tree.add_child("Composite.testList_", subTree);
tree.put(parentNode + "Composite.integer_", this->integer_);
}
void load(const boost::property_tree::ptree& tree, std::string parentNode = "") throw() {
this->integer_ = tree.get<int>(parentNode + "Composite.integer_");
int count = 0;
BOOST_FOREACH(const boost::property_tree::ptree::value_type& child, tree.get_child("Composite.testList_")) {
const boost::property_tree::ptree& info = child.second;
this->testList_.at(count).load(info);
++count;
}
}
std::vector<Test> testList_;
int integer_;
};
int main() {
Composite composite;
//composite.test_.integer_ = 1000;
composite.integer_ = 10;
int count = 0;
BOOST_FOREACH(Test& test, composite.testList_) {
test.integer_ = count * 100;
++count;
}
boost::property_tree::ptree tree;
composite.save(tree);
write_json("D:/PersonalTool/xyzzy/laboratory/c++/boost/PropertyTree/data_out.json", tree);
Composite composite2;
composite2.load(tree);
std::cout << composite2.integer_ << std::endl;
//std::cout << composite2.test_.integer_ << std::endl;
BOOST_FOREACH(const Test& test, composite2.testList_) {
std::cout << test.integer_ << std::endl;
}
}
|
{"hexsha": "cb8bf97ef1cb403c7f233ee5a33e5563c7719d2f", "size": 2257, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "c++/boost/PropertyTree/Write.cpp", "max_stars_repo_name": "taku-xhift/labo", "max_stars_repo_head_hexsha": "89dc28fdb602c7992c6f31920714225f83a11218", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "c++/boost/PropertyTree/Write.cpp", "max_issues_repo_name": "taku-xhift/labo", "max_issues_repo_head_hexsha": "89dc28fdb602c7992c6f31920714225f83a11218", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "c++/boost/PropertyTree/Write.cpp", "max_forks_repo_name": "taku-xhift/labo", "max_forks_repo_head_hexsha": "89dc28fdb602c7992c6f31920714225f83a11218", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5326086957, "max_line_length": 111, "alphanum_fraction": 0.6650420913, "num_tokens": 575}
|
!> @brief Test the parallel distribution of a mesh's points.
!
!> @description A mesh of size N should have its points distributed in parallel such that
!! \f$\sum_p n_p = N\f$.
program test_mesh_point_distribution
use testing_lib
use mesh_utils, only : build_square_mesh
implicit none
type(ccs_mesh) :: mesh
integer(ccs_int) :: n
integer(ccs_int) :: n_expected
integer(ccs_int) :: n_global
call init()
do n = 1, 100
mesh = build_square_mesh(par_env, n, 1.0_ccs_real)
associate(nlocal => mesh%nlocal)
if (nlocal < 0) then
! XXX: Zero cells on a PE is not necessarily invalid...
! ? exit
! select type(par_env)
! type is(parallel_environment_mpi)
! call MPI_Allreduce(nlocal, n_global, 1, MPI_INT, MPI_SUM, par_env%comm, ierr)
! class default
! write (message,*) "ERROR: Unknown parallel environment!"
! call stop_test(message)
! end select
end if
n_expected = n**2
if (nlocal > n_expected) then
write (message,*) "FAIL: Local number of cells ", nlocal, &
" exceeds requested total!", n
call stop_test(message)
end if
select type(par_env)
type is(parallel_environment_mpi)
call MPI_Allreduce(nlocal, n_global, 1, MPI_INT, MPI_SUM, par_env%comm, ierr)
class default
write (message,*) "ERROR: Unknown parallel environment!"
call stop_test(message)
end select
end associate
if (n_global /= n_expected) then
write (message,*) "FAIL: expected ", n_expected, " got ", n_global, &
" (test_mesh:test_mesh_point_distribution/1)"
call stop_test(message)
end if
call assert_equal(n_expected, mesh%nglobal, &
'("FAIL: expected ", i0, " got ", i0, " (test_mesh:test_mesh_point_distribution/2)")')
end do
call fin()
end program test_mesh_point_distribution
|
{"hexsha": "fbf2c85be739d62712bc185f54e108f51c9a8bd5", "size": 1948, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/mesh/test_mesh_point_distribution.f90", "max_stars_repo_name": "asimovpp/asimov-ccs", "max_stars_repo_head_hexsha": "f8624d418fb847d104f8e753a21326e7026277fd", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2022-03-15T16:48:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T17:49:20.000Z", "max_issues_repo_path": "tests/mesh/test_mesh_point_distribution.f90", "max_issues_repo_name": "asimovpp/asimov-ccs", "max_issues_repo_head_hexsha": "f8624d418fb847d104f8e753a21326e7026277fd", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/mesh/test_mesh_point_distribution.f90", "max_forks_repo_name": "asimovpp/asimov-ccs", "max_forks_repo_head_hexsha": "f8624d418fb847d104f8e753a21326e7026277fd", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6470588235, "max_line_length": 95, "alphanum_fraction": 0.6339835729, "num_tokens": 504}
|
import scipy as sp
import scipy.optimize as optimize
from scipy.optimize import Bounds, LinearConstraint
from math import sqrt, pi, sin, cos
import numpy as np
N = 400
def S(p):
val = 0
n = len(p)
a = [0.0] + list(p)
"""
for k in range(1, n+1):
val += (1-sin(a[k])) * (cos(a[k-1]) - cos(a[k]))
return -val
"""
val = - cos(a[n]) + 1.0
for k in range(1, n+1):
val -= sin(a[k]) * (cos(a[k-1]) - cos(a[k]))
return -val
def G(p):
g = []
a = [0.0] + list(p)
for k in range(1, len(p)):
g.append(cos(2*a[k]) - cos(a[k]) * cos(a[k-1]) - sin(a[k+1]*sin(a[k])))
g.append(sin(a[-1]) + cos(2*a[-1]) - cos(a[-1]) * cos(a[-2]))
for i in range(len(g)):
g[i] = -g[i]
return g
if __name__ == "__main__":
initial_x = []
for i in range(N//2):
#initial_x.append(1-(i+1)/(N+1))
initial_x.append(0)
bounds = Bounds([0 for i in range(N//2)], [pi/2 for i in range(N//2)])
M = np.zeros((N//2, N//2), dtype=np.float64)
for i in range(0, N//2-1):
M[i][i] = 1.0
M[i][i+1] = -1.0
linear_constraints = LinearConstraint(M, [-np.inf for i in range(N//2)], [0 for i in range(N//2-1)]+[0])
tol = 1e-12
result = optimize.minimize(S, initial_x,
#jac=G,
jac='3-point',
bounds=bounds,
constraints = linear_constraints,
method = "trust-constr",
tol = tol,
options={'xtol': tol, 'gtol': tol, 'barrier_tol': tol, 'maxiter': 2000,'verbose':1})
"""
constraints = []
for i in range(0, N//2):
lowerbound = {'type': 'ineq', 'fun': lambda x, index = i: x[index]}
upperbound = {'type': 'ineq', 'fun': lambda x, index = i: pi/2 - x[index]}
constraints.append(lowerbound)
constraints.append(upperbound)
if not i == 0:
increasing = {'type': 'ineq', 'fun': lambda x, index = i: x[index] - x[index-1]}
constraints.append(increasing)
result = optimize.minimize(S, initial_x,
constraints = constraints,
method = "COBYLA",
tol = tol,
options={'maxiter': 10000})
"""
print(result)
print("ans:", 4 + result.fun*4)
|
{"hexsha": "9481fac2314a3ea410b6b00a1d69e9def215ec21", "size": 2459, "ext": "py", "lang": "Python", "max_stars_repo_path": "working/problem392.py", "max_stars_repo_name": "takekoputa/project-euler", "max_stars_repo_head_hexsha": "6f434be429bd26f5d0f84f5ab0f5fa2bd677c790", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "working/problem392.py", "max_issues_repo_name": "takekoputa/project-euler", "max_issues_repo_head_hexsha": "6f434be429bd26f5d0f84f5ab0f5fa2bd677c790", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "working/problem392.py", "max_forks_repo_name": "takekoputa/project-euler", "max_forks_repo_head_hexsha": "6f434be429bd26f5d0f84f5ab0f5fa2bd677c790", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-02T12:08:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-02T12:08:46.000Z", "avg_line_length": 31.9350649351, "max_line_length": 115, "alphanum_fraction": 0.4648230988, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 713}
|
import random
import math
import numpy as np
from scipy.stats import unitary_group
from collections import Counter
from qlazy import QState, DensOp
def random_qstate(qnum): # random pure state
dim = 2**qnum
vec = np.array([0.0]*dim)
vec[0] = 1.0
mat = unitary_group.rvs(dim)
vec = np.dot(mat, vec)
qs = QState(vector=vec)
return qs
def random_qstate_schmidt(qnum_A,qnum_B): # random pure state (schmidt decomp.)
dim_A = 2**qnum_A
dim_B = 2**qnum_B
dim_AB = dim_A*dim_B
dim_min = min(dim_A,dim_B)
coef = np.array([random.random() for _ in range(dim_min)])
norm = math.sqrt(np.dot(coef,coef))
coef = coef/norm
vec = np.array([0.0]*dim_AB)
for i in range(dim_min):
idx = i + i*dim_B
vec[idx] = coef[i]
qs = QState(vector=vec)
return qs
def entropy_from_qstate(qstate=None,id_A=[],id_B=[],shots=10):
vec_samp = [str(list(qstate.get_amp(id_A))) for _ in range(shots)]
freq = list(Counter(vec_samp).values())
total = sum(freq)
prob = [x/total for x in freq]
ent = 0.0
for p in prob:
ent -= (p*np.log2(p))
return ent
if __name__ == '__main__':
qnum_A = 2
qnum_B = 3
id_A = list(range(qnum_A))
id_B = [i+qnum_A for i in range(qnum_B)]
## simple random pure state
#qs = random_qstate(qnum_A+qnum_B)
# random pure state represented by schmidt decomposition
qs = random_qstate_schmidt(qnum_A,qnum_B)
# density operator for the pure state
de = DensOp(qstate=[qs], prob=[1.0])
ent = de.entropy()
ent_A = de.entropy(id_A)
ent_B = de.entropy(id_B)
# 'ent_A' is equal to 'ent_B', if state for system A+B is pure state
print("- entropy (system A+B): S(A,B) = {:.4f}".format(ent))
print("- entanglement entropy (system A): S(A) = {:.4f}".format(ent_A))
print("- entanglement entropy (system B): S(B) = {:.4f}".format(ent_B))
ent_Q = entropy_from_qstate(qs,id_A,id_B,1000)
# 'ent_Q' is equal to 'ent_A','ent_B',
# if the pure state is represented by schmidt decomposition
print("- entanglement entropy (predict of A): S(A)' = {:.4f}".format(ent_Q))
ent_cond = de.cond_entropy(id_A,id_B)
mut_info = de.mutual_info(id_A,id_B)
print("- conditional entropy: S(A|B) = {:.4f}".format(ent_cond))
print("- mutual information: I(A:B) = {:.4f}".format(mut_info))
if qnum_A == qnum_B:
de_A = de.partial(id_A)
de_B = de.partial(id_B)
rel_ent = de_A.relative_entropy(de_B)
print("- relative entropy: S(A||B) = {:.4f}".format(rel_ent))
|
{"hexsha": "e9944eb4f492fdaff96488a0584f01190f1be107", "size": 2696, "ext": "py", "lang": "Python", "max_stars_repo_path": "example/py/Entropy/entropy2.py", "max_stars_repo_name": "samn33/qlazy", "max_stars_repo_head_hexsha": "b215febfec0a3b8192e57a20ec85f14576745a89", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2019-04-09T13:02:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T12:57:08.000Z", "max_issues_repo_path": "example/py/Entropy/entropy2.py", "max_issues_repo_name": "samn33/qlazy", "max_issues_repo_head_hexsha": "b215febfec0a3b8192e57a20ec85f14576745a89", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-02-26T16:21:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T00:46:53.000Z", "max_forks_repo_path": "example/py/Entropy/entropy2.py", "max_forks_repo_name": "samn33/qlazy", "max_forks_repo_head_hexsha": "b215febfec0a3b8192e57a20ec85f14576745a89", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-01-28T05:38:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-30T12:19:19.000Z", "avg_line_length": 28.6808510638, "max_line_length": 88, "alphanum_fraction": 0.6038575668, "include": true, "reason": "import numpy,from scipy", "num_tokens": 819}
|
#!/usr/bin/env python
import numpy as np
from roboticstoolbox.robot.ERobot import ERobot
from spatialmath import SE3
class FrankieOmni(ERobot):
"""
Class that imports an Omnidirectional Frankie URDF model
``FrankieOmni()`` is a class which imports a FrankieOmni robot definition
from a URDF file. The model describes its kinematic and graphical
characteristics.
.. runblock:: pycon
>>> import roboticstoolbox as rtb
>>> robot = rtb.models.URDF.FrankieOmni()
>>> print(robot)
Defined joint configurations are:
- qz, zero joint angle configuration, 'L' shaped configuration
- qr, vertical 'READY' configuration
- qs, arm is stretched out in the x-direction
- qn, arm is at a nominal non-singular configuration
.. codeauthor:: Jesse Haviland
.. sectionauthor:: Peter Corke
"""
def __init__(self):
links, name, urdf_string, urdf_filepath = self.URDF_read(
"franka_description/robots/frankieOmni_arm_hand.urdf.xacro"
)
super().__init__(
links,
name=name,
manufacturer="Franka Emika",
gripper_links=links[12],
urdf_string=urdf_string,
urdf_filepath=urdf_filepath,
)
self.grippers[0].tool = SE3(0, 0, 0.1034)
self.qdlim = np.array(
[
4.0,
4.0,
4.0,
2.1750,
2.1750,
2.1750,
2.1750,
2.6100,
2.6100,
2.6100,
3.0,
3.0,
]
)
self.addconfiguration("qz", np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))
self.addconfiguration(
"qr", np.array([0, 0, 0, 0, -0.3, 0, -2.2, 0, 2.0, np.pi / 4])
)
if __name__ == "__main__": # pragma nocover
robot = FrankieOmni()
print(robot)
for link in robot.links:
print(link.name)
print(link.isjoint)
print(len(link.collision))
print()
for link in robot.grippers[0].links:
print(link.name)
print(link.isjoint)
print(len(link.collision))
|
{"hexsha": "ed75cfbccdcef3b9f303f489e383e5cceb795874", "size": 2215, "ext": "py", "lang": "Python", "max_stars_repo_path": "roboticstoolbox/models/URDF/FrankieOmni.py", "max_stars_repo_name": "tassos/robotics-toolbox-python", "max_stars_repo_head_hexsha": "51aa8bbb3663a7c815f9880d538d61e7c85bc470", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 749, "max_stars_repo_stars_event_min_datetime": "2015-04-28T03:02:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T06:55:12.000Z", "max_issues_repo_path": "roboticstoolbox/models/URDF/FrankieOmni.py", "max_issues_repo_name": "tassos/robotics-toolbox-python", "max_issues_repo_head_hexsha": "51aa8bbb3663a7c815f9880d538d61e7c85bc470", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 226, "max_issues_repo_issues_event_min_datetime": "2015-04-16T22:22:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T16:42:28.000Z", "max_forks_repo_path": "roboticstoolbox/models/URDF/FrankieOmni.py", "max_forks_repo_name": "tassos/robotics-toolbox-python", "max_forks_repo_head_hexsha": "51aa8bbb3663a7c815f9880d538d61e7c85bc470", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 251, "max_forks_repo_forks_event_min_datetime": "2015-04-30T23:52:35.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T13:32:16.000Z", "avg_line_length": 24.6111111111, "max_line_length": 77, "alphanum_fraction": 0.5471783296, "include": true, "reason": "import numpy", "num_tokens": 603}
|
From Hammer Require Import Hammer.
Require Export Decidable.
Require Export NAxioms.
Require Import NZProperties.
Module NBaseProp (Import N : NAxiomsMiniSig').
Include NZProp N.
Theorem neq_succ_0 : forall n, S n ~= 0.
Proof. hammer_hook "NBase" "NBase.NBaseProp.neq_succ_0".
intros n EQ.
assert (EQ' := pred_succ n).
rewrite EQ, pred_0 in EQ'.
rewrite <- EQ' in EQ.
now apply (neq_succ_diag_l 0).
Qed.
Theorem neq_0_succ : forall n, 0 ~= S n.
Proof. hammer_hook "NBase" "NBase.NBaseProp.neq_0_succ".
intro n; apply neq_sym; apply neq_succ_0.
Qed.
Theorem le_0_l : forall n, 0 <= n.
Proof. hammer_hook "NBase" "NBase.NBaseProp.le_0_l".
nzinduct n.
now apply eq_le_incl.
intro n; split.
apply le_le_succ_r.
intro H; apply le_succ_r in H; destruct H as [H | H].
assumption.
symmetry in H; false_hyp H neq_succ_0.
Qed.
Theorem induction :
forall A : N.t -> Prop, Proper (N.eq==>iff) A ->
A 0 -> (forall n, A n -> A (S n)) -> forall n, A n.
Proof. hammer_hook "NBase" "NBase.NBaseProp.induction".
intros A A_wd A0 AS n; apply right_induction with 0; try assumption.
intros; auto; apply le_0_l. apply le_0_l.
Qed.
Ltac induct n := induction_maker n ltac:(apply induction).
Theorem case_analysis :
forall A : N.t -> Prop, Proper (N.eq==>iff) A ->
A 0 -> (forall n, A (S n)) -> forall n, A n.
Proof. hammer_hook "NBase" "NBase.NBaseProp.case_analysis".
intros; apply induction; auto.
Qed.
Ltac cases n := induction_maker n ltac:(apply case_analysis).
Theorem neq_0 : ~ forall n, n == 0.
Proof. hammer_hook "NBase" "NBase.NBaseProp.neq_0".
intro H; apply (neq_succ_0 0). apply H.
Qed.
Theorem neq_0_r : forall n, n ~= 0 <-> exists m, n == S m.
Proof. hammer_hook "NBase" "NBase.NBaseProp.neq_0_r".
cases n. split; intro H;
[now elim H | destruct H as [m H]; symmetry in H; false_hyp H neq_succ_0].
intro n; split; intro H; [now exists n | apply neq_succ_0].
Qed.
Theorem zero_or_succ : forall n, n == 0 \/ exists m, n == S m.
Proof. hammer_hook "NBase" "NBase.NBaseProp.zero_or_succ".
cases n.
now left.
intro n; right; now exists n.
Qed.
Theorem eq_pred_0 : forall n, P n == 0 <-> n == 0 \/ n == 1.
Proof. hammer_hook "NBase" "NBase.NBaseProp.eq_pred_0".
cases n.
rewrite pred_0. now split; [left|].
intro n. rewrite pred_succ.
split. intros H; right. now rewrite H, one_succ.
intros [H|H]. elim (neq_succ_0 _ H).
apply succ_inj_wd. now rewrite <- one_succ.
Qed.
Theorem succ_pred : forall n, n ~= 0 -> S (P n) == n.
Proof. hammer_hook "NBase" "NBase.NBaseProp.succ_pred".
cases n.
intro H; exfalso; now apply H.
intros; now rewrite pred_succ.
Qed.
Theorem pred_inj : forall n m, n ~= 0 -> m ~= 0 -> P n == P m -> n == m.
Proof. hammer_hook "NBase" "NBase.NBaseProp.pred_inj".
intros n m; cases n.
intros H; exfalso; now apply H.
intros n _; cases m.
intros H; exfalso; now apply H.
intros m H2 H3. do 2 rewrite pred_succ in H3. now rewrite H3.
Qed.
Section PairInduction.
Variable A : N.t -> Prop.
Hypothesis A_wd : Proper (N.eq==>iff) A.
Theorem pair_induction :
A 0 -> A 1 ->
(forall n, A n -> A (S n) -> A (S (S n))) -> forall n, A n.
Proof. hammer_hook "NBase" "NBase.NBaseProp.pair_induction".
rewrite one_succ.
intros until 3.
assert (D : forall n, A n /\ A (S n)); [ |intro n; exact (proj1 (D n))].
induct n; [ | intros n [IH1 IH2]]; auto.
Qed.
End PairInduction.
Section TwoDimensionalInduction.
Variable R : N.t -> N.t -> Prop.
Hypothesis R_wd : Proper (N.eq==>N.eq==>iff) R.
Theorem two_dim_induction :
R 0 0 ->
(forall n m, R n m -> R n (S m)) ->
(forall n, (forall m, R n m) -> R (S n) 0) -> forall n m, R n m.
Proof. hammer_hook "NBase" "NBase.NBaseProp.two_dim_induction".
intros H1 H2 H3. induct n.
induct m.
exact H1. exact (H2 0).
intros n IH. induct m.
now apply H3. exact (H2 (S n)).
Qed.
End TwoDimensionalInduction.
Section DoubleInduction.
Variable R : N.t -> N.t -> Prop.
Hypothesis R_wd : Proper (N.eq==>N.eq==>iff) R.
Theorem double_induction :
(forall m, R 0 m) ->
(forall n, R (S n) 0) ->
(forall n m, R n m -> R (S n) (S m)) -> forall n m, R n m.
Proof. hammer_hook "NBase" "NBase.NBaseProp.double_induction".
intros H1 H2 H3; induct n; auto.
intros n H; cases m; auto.
Qed.
End DoubleInduction.
Ltac double_induct n m :=
try intros until n;
try intros until m;
pattern n, m; apply double_induction; clear n m;
[solve_proper | | | ].
End NBaseProp.
|
{"author": "lukaszcz", "repo": "coqhammer-eval", "sha": "e7a30119c1470623125728006fd1299641192a60", "save_path": "github-repos/coq/lukaszcz-coqhammer-eval", "path": "github-repos/coq/lukaszcz-coqhammer-eval/coqhammer-eval-e7a30119c1470623125728006fd1299641192a60/stdlib/Numbers/Natural/Abstract/NBase.v"}
|
import os
import sys
import cv2
import time
import h5py
import numpy as np
import scipy.io as sio
from fastkde import fastKDE
from matplotlib.path import Path
from matplotlib.figure import Figure
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QIntValidator, QFont
from PyQt5.QtWidgets import QPushButton, QLabel, QRadioButton, QLineEdit, QCheckBox, QComboBox, QMessageBox, QButtonGroup
from PyQt5.QtWidgets import QGridLayout, QFileDialog, QApplication, QMainWindow, QWidget, QVBoxLayout, QFrame
class PlotWindow(QMainWindow):
def __init__(self, parent=None):
super(PlotWindow, self).__init__(parent)
self.setWindowTitle("Visualization window")
self.interactive_dist = None
self._polygon_points = None
self._anchor_object = None
self.parent = parent
# (1) set up the canvas
self.fig = Figure(figsize=(8, 8), dpi=100)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
# add axes for plotting
self.ax1 = self.fig.add_subplot(221)
self.ax2 = self.fig.add_subplot(222, sharex=self.ax1, sharey=self.ax1)
self.ax3 = self.fig.add_subplot(223, sharex=self.ax1, sharey=self.ax1)
self.ax4 = self.fig.add_subplot(224)
self.ax4.axis('off')
#
self.ax1.set_title("Interactive area")
self.ax2.set_title("Processing")
self.ax3.set_title("Result output")
self.ax4.set_title("Debug info")
# build a colorbar at right of Axis №3
self.cax = make_axes_locatable(self.ax3).append_axes("right", size="5%", pad="2%")
self.cax.axis('off')
#
self.init_interactive()
# (2) Create the navigation toolbar, tied to the canvas
self.mpl_toolbar = NavigationToolbar(self.canvas, self)
layout = QVBoxLayout()
layout.addWidget(self.canvas)
layout.addWidget(self.mpl_toolbar)
widget = QWidget()
widget.setLayout(layout)
self.setCentralWidget(widget)
self.resize(900, 900)
self.fig.tight_layout()
self.show()
def init_interactive(self):
self._polygon_points = []
self._line, self._dragging_point, self._anc = None, None, None
self.canvas.mpl_connect('button_press_event', self.on_click)
self.canvas.mpl_connect('button_release_event', self.on_release)
self.canvas.mpl_connect('motion_notify_event', self.on_motion)
def update_plot(self):
if not self._polygon_points:
if self._line:
self._line.set_data([], [])
else:
x, y = zip(*self._polygon_points)
# Add new plot
if not self._line:
(self._line, ) = self.ax1.plot(x, y, "r", marker="o", markersize=8, zorder=10)
# Update current plot
else:
x, y = list(x), list(y)
self._line.set_data(x + [x[0]], y + [y[0]])
if self._anchor_object:
if not self._anc:
(self._anc, ) = self.ax1.plot(*self._anchor_object, color='r', ms=20, marker='x', mew=3)
else:
self._anc.set_data(*self._anchor_object)
self.fig.tight_layout()
self.canvas.draw()
def add_point(self, event):
self._polygon_points.append((event.xdata, event.ydata))
def remove_point(self, point):
if point in self._polygon_points:
self._polygon_points.remove(point)
def find_neighbor_point(self, event):
"""
Find point around mouse position
Args:
event: mouse event object
Returns:
tuple: (x, y) if there are any point around mouse else None
"""
if self._polygon_points:
nx, ny = min(self._polygon_points, key=lambda p: np.hypot(event.xdata - p[0], event.ydata - p[1]))
if np.hypot(event.xdata - nx, event.ydata - ny) < self.interactive_dist:
return nx, ny
return None
@staticmethod
def isBetween(pA, pB, p0):
p = pB
p0A = np.hypot(p0[0] - pA[0], p0[1] - pA[1])
p0B = np.hypot(p0[0] - pB[0], p0[1] - pB[1])
if p0A < p0B:
p = pA
#
dotproduct = (p0[0] - pA[0]) * (pB[0] - pA[0]) + (p0[1] - pA[1]) * (pB[1] - pA[1])
if dotproduct < 0:
return None, None
#
squaredlengthba = (pB[0] - pA[0]) * (pB[0] - pA[0]) + (pB[1] - pA[1]) * (pB[1] - pA[1])
if dotproduct > squaredlengthba:
return None, None
return p0B + p0A, p
def on_click(self, event):
"""
Callback method for mouse click event
Args:
event: mouse click event
"""
# left click
if event.inaxes in [self.ax1] and event.button == 1:
point = self.find_neighbor_point(event)
p_next = None
p0 = (event.xdata, event.ydata)
mind = np.inf
#
if len(self._polygon_points) >= 3:
a = self._polygon_points + [self._polygon_points[0]]
for p1, p2 in zip(a, a[1:]):
d, p = self.isBetween(p1, p2, p0)
if d and d < mind:
mind = d
p_next = p2
if point:
self._dragging_point = point
elif p_next:
self._polygon_points.insert(self._polygon_points.index(p_next), p0)
else:
self.add_point(event)
self.update_plot()
# mid click
elif event.inaxes in [self.ax1] and event.button == 2:
self._polygon_points = []
self.update_plot()
elif event.inaxes in [self.ax1] and event.button == 3:
point = self.find_neighbor_point(event)
if point:
self.remove_point(point)
self.update_plot()
self.parent.filter_exclude()
else:
self._anchor_object = (event.xdata, event.ydata)
self.update_plot()
#
if len(self._polygon_points) > 3:
self.parent.filter_exclude()
def on_release(self, event):
"""
Callback method for mouse release event
Args:
event: mouse event
"""
if event.inaxes in [self.ax1] and event.button == 1 and self._dragging_point:
self._dragging_point = None
self.update_plot()
self.parent.filter_exclude()
def on_motion(self, event):
"""
Callback method for mouse motion event
Args:
event: mouse event
"""
if not self._dragging_point:
return
if event.xdata is None or event.ydata is None:
return
# get index of the previous dragged point
index = self._polygon_points.index(self._dragging_point)
# set new point
self._dragging_point = (event.xdata, event.ydata)
# update previous point
self._polygon_points[index] = self._dragging_point
self.update_plot()
class Application(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Computer Vision Analyzer v3.1 (NcN lab product 2020)')
self.create_main_frame()
self.create_status_bar()
self.data = None
self.mask_inside = None
self.current_frame = 0
def open_file(self, path):
"""
Try to read data from files with different types
Args:
path (str): file path
Returns:
tuple : shape of the data
"""
self.variables = {}
try:
for varname, vardata in sio.loadmat(path).items():
# get only 4-dim data
if len(np.shape(vardata)) == 4:
self.variables[varname] = vardata[:]
except NotImplementedError:
with h5py.File(path, 'r') as file:
for varname, vardata in file.items():
# get only 4-dim data
if len(np.shape(vardata)) == 4:
self.variables[varname] = vardata[:]
except Exception:
QMessageBox.about(self, 'Error', "Could not read the file...")
def file_dialog(self):
"""
Invoke PyQT file dialog with unblocking buttons
"""
fname = QFileDialog.getOpenFileName(self, "Open file", '', "MAT file (*.mat)")
# if exists
if fname[0]:
self.box_variable.clear()
# delete old data if exists
if self.data is not None:
del self.data
self.data = None
# prepare the data
self.status_text.setText("Unpack .mat file... Please wait")
QApplication.processEvents()
QApplication.processEvents()
#
self.filepath = fname[0]
self.open_file(self.filepath)
self.status_text.setText(f".mat file is unpacked ({self.filepath})")
# based on data set the possible variables
self.box_variable.setEnabled(True)
for varname in self.variables.keys():
self.box_variable.addItem(str(varname))
def reshape_data(self):
"""
Transpose the multi-dimensional matrix if need
"""
# get new shape as tuple
new_order = tuple(map(int, self.in_data_reshape.text().split()))
# reshape data to the new shape
self.data = np.transpose(self.data, new_order)
self.im_height, self.im_width, self.total_frames, methods_num = self.data.shape
self.im_shape = (self.im_height, self.im_width)
# disable buttons of reshaping
self.in_data_reshape.setEnabled(False)
self.btn_reshape_data.setEnabled(False)
# init frames in GUI form
self.in_start_frame.setValidator(QIntValidator(0, self.total_frames - 1))
self.in_end_frame.setValidator(QIntValidator(0, self.total_frames - 1))
self.in_end_frame.setText(str(self.total_frames - 1))
# update status
self.status_text.setText(f"Data was reshaped to {self.data.shape}")
#
self.box_method.clear()
for method_index in range(methods_num):
self.box_method.addItem(str(method_index))
#
self.plot_frame.interactive_dist = max(self.im_width, self.im_height) * 0.1
def choose_variable(self):
""" Invoked if text in QComboBox is changed """
# get the user's choose
var = self.box_variable.currentText()
if var != '':
# get the data by name
self.data = self.variables[var]
# meta info
data_shape = self.data.shape
str_format = len(data_shape) * '{:<5}'
self.label_fileinfo.setText(f"Shape: {str_format.format(*data_shape)}\n"
f"Index: {str_format.format(*list(range(4)))}")
self.label_fileinfo.setFont(QFont("Courier New"))
self.box_method.clear()
# unblock buttons
for obj in [self.btn_save_results, self.btn_loop_draw, self.btn_frame_right,
self.btn_frame_left, self.btn_reshape_data, self.in_data_reshape, self.box_method]:
obj.setEnabled(True)
self.status_text.setText(f"{var} is chosen {data_shape}")
def filter_exclude(self):
"""
Returns:
"""
row, col = np.indices(self.im_shape)
grid_points = np.vstack((col.ravel(), row.ravel())).T
p = Path(self.plot_frame._polygon_points) # make a polygon
self.mask_inside = p.contains_points(grid_points).reshape(self.im_shape)
self.true_y, self.true_x = np.where(self.mask_inside)
def method_onchange(self):
"""
Returns:
"""
text = self.box_method.currentText()
if text != '':
methodic = int(text)
self.plot_frame.ax1.imshow(np.mean(self.data[:, :, :, methodic], axis=2), zorder=-10, cmap='gray')
if self.plot_frame._line:
self.plot_frame.update_plot()
else:
self.plot_frame.fig.tight_layout()
self.plot_frame.canvas.draw()
@staticmethod
def polygon_area(coords):
x, y = coords[:, 0], coords[:, 1]
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
@staticmethod
def align_coord(coords, border):
"""
Args:
coords:
border:
Returns:
"""
coords[coords >= border - 0.5] = border - 1
coords[coords <= 0.5] = 0
return coords
def update_draws(self, frame, methodic, static_thresh=None, dynamic_thresh=None,
fullness_need=None, separation=None, anchor=None, reversal=None, save_result=False):
"""
Calculate contours and draw them on the axis
Args:
frame (int): index of the frame
methodic (int): index of the method
static_thresh (float): constant threshold value (None - not using static threshold)
dynamic_thresh (float): maximal number of fragmentation (None - without fragmentation checking)
fullness_need (float): level of fullnes inside the IOS (None - without fullness checking)
separation (tuple): lower/upper values of border (None - without separation)
anchor (tuple): x,y coordinates of anchor (None - without using an anchor)
reversal (bool): is reversing of color needs
save_result (bool): flag for skipping drawing if we want just save results
Returns:
tuple: x and y coords of the contour if 'save_result' is true
"""
cv_cntrs = None
fullness = None
debug_ios = None
max_contour = None
# get an original data
mask_in = self.mask_inside
if mask_in is None:
return
original = self.data[:, :, frame, methodic]
# normalize data from 0 to 4095 with dynamic borders (min and max). It mades grayscale cmap
image = np.array(original, copy=True)
# if normalization:
# a_to, b_to = normalization
# i_min, i_max = image.min(), image.max()
# image = (b_to - a_to) * (image - i_min) / (i_max - i_min) + a_to
# reverse colors if epilepsy radio button checked
if reversal:
image = -image
original = -original
if separation:
mask_in = mask_in & (separation[0] <= image) & (image <= separation[1])
image[~mask_in] = np.min(image[mask_in])
# blur the image to smooth very light pixels
# image = cv2.medianBlur(image, 3)
# set the dynamic thresh value
in_mask_image = image[mask_in]
# first, rude iteration
morph_kernel = np.ones((3, 3), np.uint8)
mask = np.zeros(shape=image.shape, dtype='uint8')
#
if static_thresh:
threshold_percent = static_thresh
thresh_value = np.percentile(in_mask_image, threshold_percent)
# get coordinates of points which are greater than thresh value
y, x = np.where(image >= thresh_value)
else:
threshold_percent = 99
# 1st raw loop
while True:
thresh_value = np.percentile(in_mask_image, threshold_percent)
# get coordinates of points which are greater than thresh value
y, x = np.where(image >= thresh_value)
# calc raw CV contours to decide -- search contour or not
tmpmask = np.array(mask, copy=True)
tmpmask[y, x] = 255
#
_, thresh_mask = cv2.threshold(tmpmask, 200, 255, cv2.THRESH_BINARY)
# transform morphology of the mask
thresh_mask = cv2.morphologyEx(thresh_mask, cv2.MORPH_CLOSE, morph_kernel)
thresh_mask = cv2.morphologyEx(thresh_mask, cv2.MORPH_OPEN, morph_kernel)
# get the contour of the mask
*im2, cv_cntrs, hierarchy = cv2.findContours(thresh_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# only if number of CV not so big (more fragmentation -- more confidence that there are no epilepsy contour)
if len(cv_cntrs) > dynamic_thresh:
break
threshold_percent -= 2
if threshold_percent < 50:
threshold_percent += 2
break
# second, more preciescly iteration
while True:
threshold_percent += 0.2
if threshold_percent > 99.8:
threshold_percent -= 0.2
break
thresh_value = np.percentile(in_mask_image, threshold_percent)
# get coordinates of points which are greater than thresh value
y, x = np.where(image >= thresh_value)
# calc raw CV contours to decide -- search contour or not
tmpmask = np.array(mask, copy=True)
tmpmask[y, x] = 255
#
_, thresh_mask = cv2.threshold(tmpmask, 200, 255, cv2.THRESH_BINARY)
# transform morphology of the mask
thresh_mask = cv2.morphologyEx(thresh_mask, cv2.MORPH_CLOSE, morph_kernel)
thresh_mask = cv2.morphologyEx(thresh_mask, cv2.MORPH_OPEN, morph_kernel)
# get the contour of the mask
*im2, cv_cntrs, hierarchy = cv2.findContours(thresh_mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# only if number of CV not so big (more fragmentation -- more confidence that there are no epilepsy contour)
if len(cv_cntrs) <= dynamic_thresh:
break
#
if len(x) > 10:
# get a KDE function values based on found points above XY meshgrid
PDF, (vx, vy) = fastKDE.pdf(x, y)
# find the contour by maximal area
max_cont = max(self.plot_frame.ax3.contour(vx, vy, PDF, levels=1, alpha=0).allsegs[1], key=self.polygon_area)
# limit coordinates within the border
x_cont = self.align_coord(max_cont[:, 0], self.im_width)
y_cont = self.align_coord(max_cont[:, 1], self.im_height)
max_contour = (x_cont, y_cont)
# get fullness
p_poly = Path(np.vstack((x_cont, y_cont)).T)
inside_points = np.vstack((x, y)).T
inside_count = np.where(p_poly.contains_points(inside_points))[0].size
# the area of polygon the same as the count of points inside
all_count = 0.5 * np.abs(np.dot(x_cont, np.roll(y_cont, 1)) - np.dot(y_cont, np.roll(x_cont, 1)))
fullness = inside_count / all_count * 100
#
if fullness_need and fullness < fullness_need:
debug_ios = max_contour
max_contour = None
if max_contour and anchor and not p_poly.contains_points((anchor,)):
debug_ios = max_contour
max_contour = None
#
if save_result:
return max_contour
else:
self.current_frame = frame
self.plot_frame.fig.suptitle(f"Frame {frame}")
self.plot_frame.ax2.clear()
self.plot_frame.ax3.clear()
self.plot_frame.ax4.clear()
self.plot_frame.ax4.axis('off')
self.plot_frame.cax.clear()
self.plot_frame.ax1.set_title("Interactive area")
self.plot_frame.ax2.set_title("Processing")
self.plot_frame.ax3.set_title("Result output")
self.plot_frame.ax4.set_title("Debug info")
if separation:
m = (image < separation[0]) | (separation[1] < image)
image[m] = None
image[~mask_in] = None
# Axis 2
self.plot_frame.ax2.imshow(image)
self.plot_frame.ax2.plot(x, y, '.', color='r', ms=1)
if max_contour:
self.plot_frame.ax2.plot(max_contour[0], max_contour[1], color='r', lw=3)
if debug_ios:
self.plot_frame.ax2.plot(debug_ios[0], debug_ios[1], color='r', lw=3, ls='--')
# Axis 3
im = self.plot_frame.ax3.imshow(image, cmap='jet')
self.plot_frame.fig.colorbar(im, cax=self.plot_frame.cax)
if max_contour:
self.plot_frame.ax3.plot(max_contour[0], max_contour[1], color='r', lw=3)
if anchor:
self.plot_frame.ax2.plot(*anchor, color='w', ms=20, marker='x', mew=3)
self.plot_frame.ax3.plot(*anchor, color='w', ms=20, marker='x', mew=3)
# Axis 4
log = f"Area:\n" \
f" min= {np.min(image[mask_in]):.2f}\n" \
f" max= {np.max(image[mask_in]):.2f}\n" \
f"Image (original):\n" \
f" min= {np.min(original):.2f}\n" \
f" max= {np.max(original):.2f}\n" \
f"Threshold: {threshold_percent:.1f} ({'Static' if static_thresh else 'Dynamic'})\n" \
f"Fragments: {len(cv_cntrs) if cv_cntrs else 0} (Max: {dynamic_thresh if dynamic_thresh else 'not setted'})\n" \
f"Fullness: {fullness:.2f}% (Min: {round(fullness_need, 2) if fullness_need else 'not setted'})\n"
if self.chkbox_anchor.isChecked():
anc = self.plot_frame._anchor_object
log += f"Anchor: {np.floor(anc) if anc and self.chkbox_anchor.isChecked() else 'Not used'}"
self.plot_frame.ax4.text(0, 0.5, log, ha='left', va='center', transform=self.plot_frame.ax4.transAxes)
# save axis plot
# extent = self.ax3.get_window_extent().transformed(self.fig.dpi_scale_trans.inverted())
# self.fig.savefig(f'/home/alex/example/{frame}.jpg', format='jpg')
self.plot_frame.fig.tight_layout()
self.plot_frame.canvas.draw()
# waiting to see changes
time.sleep(0.01)
# flush the changes to the screen
self.plot_frame.canvas.flush_events()
def check_input(self, input_value, borders=(-np.inf, np.inf)):
"""
Checking the input value on validity
Returns:
float : converted from string a value
Raises:
ValueError : value not in borders
Exception : cannot convert from string
"""
try:
value = float(input_value.text())
if borders[0] <= value <= borders[1]:
return value
else:
QMessageBox.about(self, f"Error value '{value}'", f"Value must be a number from {borders[0]} to {borders[1]}")
return
except Exception:
QMessageBox.about(self, f"Error value '{input_value.text()}'", f"Value must be a number from {borders[0]} to {borders[1]}")
return
def check_fields(self):
"""
"""
fields = {}
#
if len(self.plot_frame._polygon_points) < 4:
QMessageBox.about(self, "Error", f"Polygon is not setted")
fields['polygon_existing'] = False
#
if self.radio_static.isChecked():
static_thresh = self.check_input(self.in_static_thresh, borders=[0.1, 99.9])
fields['static_thresh'] = static_thresh
else:
dynamic_thresh = self.check_input(self.in_dynamic_thresh, borders=[3, 100])
fields['dynamic_thresh'] = int(dynamic_thresh) if dynamic_thresh else None
#
if self.chkbox_fullness.isChecked():
fullness_need = self.check_input(self.in_fullness, borders=[0.0, 100.0])
fields['fullness_need'] = fullness_need
#
if self.chkbox_separation.isChecked():
lower = self.check_input(self.in_lower_separation)
upper = self.check_input(self.in_upper_separation)
fields['separation'] = None if (lower is None or upper is None or upper <= lower) else (lower, upper)
#
if self.chkbox_anchor.isChecked() and self.plot_frame._anchor_object:
fields['anchor'] = self.plot_frame._anchor_object
#
fields['reversal'] = self.chkbox_reverse.isChecked()
return fields
def save_contour(self):
"""
Converting numpy arrays of contours to a mat file
"""
start = int(self.check_input(self.in_start_frame))
end = int(self.check_input(self.in_end_frame))
step = int(self.check_input(self.in_frame_stepsize))
methodic = int(self.box_method.currentText())
#
if start < 0 or end <= start or end >= self.total_frames or step <= 0:
QMessageBox.about(self, "Error", f"Invalid start/end/step values")
return
#
fields = self.check_fields()
if any(f in [None, False] for f in fields):
return
static_thresh = fields['static_thresh'] if 'static_thresh' in fields.keys() else None
dynamic_thresh = fields['dynamic_thresh'] if 'dynamic_thresh' in fields.keys() else None
fullness_need = fields['fullness_need'] if 'fullness_need' in fields.keys() else None
separation = fields['separation'] if 'separation' in fields.keys() else None
anchor = fields['anchor'] if 'anchor' in fields.keys() else None
reversal = fields['reversal']
# check if value is correct
self.status_text.setText("Saving results.... please wait")
# prepare array of objects per frame
matframes = np.zeros((self.total_frames, ), dtype=np.object)
# init by void arrays
for frame in range(self.total_frames):
matframes[frame] = np.array([], dtype=np.int32)
# get data per frame and fill the 'matframes'
for index, frame in enumerate(range(start, end, step)):
contour = self.update_draws(frame, methodic,
static_thresh=static_thresh, dynamic_thresh=dynamic_thresh,
fullness_need=fullness_need, separation=separation,
anchor=anchor, reversal=reversal, save_result=True)
if contour is not None:
matframes[frame] = np.array(contour, dtype=np.int32)
QApplication.processEvents()
QApplication.processEvents()
self.status_text.setText(f"Processed {index / len(range(start, end, step)) * 100:.2f} %")
# save data into mat format
filepath = os.path.dirname(self.filepath)
filename = os.path.basename(self.filepath)[:-4]
newpath = f"{filepath}/{filename}_{self.box_variable.currentText()}_{methodic}.mat"
fields['frames'] = matframes
sio.savemat(newpath, fields)
# you are beautiful :3
self.status_text.setText(f"Successfully saved into {newpath}")
def on_loop_draw(self):
"""
Automatic drawing data in loop by user panel settings
"""
start = int(self.check_input(self.in_start_frame))
end = int(self.check_input(self.in_end_frame))
step = int(self.check_input(self.in_frame_stepsize))
methodic = int(self.box_method.currentText())
#
if start < 0 or end <= start or end >= self.total_frames or step <= 0:
QMessageBox.about(self, "Error", f"Invalid start/end/step values")
return
#
fields = self.check_fields()
if any(f in [None, False] for f in fields):
return
static_thresh = fields['static_thresh'] if 'static_thresh' in fields.keys() else None
dynamic_thresh = fields['dynamic_thresh'] if 'dynamic_thresh' in fields.keys() else None
fullness_need = fields['fullness_need'] if 'fullness_need' in fields.keys() else None
separation = fields['separation'] if 'separation' in fields.keys() else None
anchor = fields['anchor'] if 'anchor' in fields.keys() else None
reversal = fields['reversal']
#
self.flag_loop_draw_stop = False
self.btn_loop_draw_stop.setEnabled(True)
for frame in range(start, end, step):
if self.flag_loop_draw_stop:
break
self.current_frame = frame
self.in_start_frame.setText(str(frame))
self.update_draws(frame, methodic,
static_thresh=static_thresh, dynamic_thresh=dynamic_thresh, fullness_need=fullness_need,
separation=separation, anchor=anchor, reversal=reversal)
def stop_loop(self):
self.flag_loop_draw_stop = True
def on_hand_draw(self, step, sign=1):
"""
Manual drawing frames
Args:
step (int): stepsize of left/right moving
sign (int): -1 or 1 show the side moving (-1 is left, 1 is right)
"""
self.current_frame += sign * step
methodic = int(self.box_method.currentText())
#
fields = self.check_fields()
if any(f in [None, False] for f in fields):
return
static_thresh = fields['static_thresh'] if 'static_thresh' in fields.keys() else None
dynamic_thresh = fields['dynamic_thresh'] if 'dynamic_thresh' in fields.keys() else None
fullness_need = fields['fullness_need'] if 'fullness_need' in fields.keys() else None
separation = fields['separation'] if 'separation' in fields.keys() else None
anchor = fields['anchor'] if 'anchor' in fields.keys() else None
reversal = fields['reversal']
#
if self.current_frame < 0:
self.current_frame = 0
if self.current_frame >= self.total_frames:
self.current_frame = self.total_frames - 1
self.in_start_frame.setText(str(self.current_frame))
self.update_draws(self.current_frame, methodic,
static_thresh=static_thresh, dynamic_thresh=dynamic_thresh, fullness_need=fullness_need,
separation=separation, anchor=anchor, reversal=reversal)
def create_main_frame(self):
# create the main plot
self.main_frame = QWidget()
self.plot_frame = PlotWindow(self)
# (3) Layout with panel
btn_panel_grid = QGridLayout()
btn_panel_grid.setContentsMargins(0, 0, 0, 0)
current_line = 1
''' PREPARE BLOCK '''
# FILE
self.btn_file = QPushButton("Open file")
self.btn_file.clicked.connect(self.file_dialog)
btn_panel_grid.addWidget(self.btn_file, current_line, 0, 1, 1)
label_variable = QLabel("Variable:")
label_variable.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
btn_panel_grid.addWidget(label_variable, current_line, 1, 1, 1)
# VARIABLE
self.box_variable = QComboBox(self)
btn_panel_grid.addWidget(self.box_variable, current_line, 2, 1, 1)
self.box_variable.currentTextChanged.connect(lambda x: self.choose_variable())
self.box_variable.setEnabled(False)
current_line += 1
self.label_fileinfo = QLabel("File info")
self.label_fileinfo.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
btn_panel_grid.addWidget(self.label_fileinfo, current_line, 0, 1, 3)
current_line += 1
# RESHAPE
lbl_reshape_meta = QLabel("Height Width Frame Method")
lbl_reshape_meta.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
btn_panel_grid.addWidget(lbl_reshape_meta, current_line, 0, 1, 3)
current_line += 1
lbl_reshape = QLabel("Reshape data")
lbl_reshape.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
btn_panel_grid.addWidget(lbl_reshape, current_line, 0, 1, 1)
self.in_data_reshape = QLineEdit()
self.in_data_reshape.setText("0 1 2 3")
btn_panel_grid.addWidget(self.in_data_reshape, current_line, 1, 1, 1)
self.in_data_reshape.setEnabled(False)
self.btn_reshape_data = QPushButton("Reshape")
self.btn_reshape_data.clicked.connect(lambda x: self.reshape_data())
btn_panel_grid.addWidget(self.btn_reshape_data, current_line, 2, 1, 1)
self.btn_reshape_data.setEnabled(False)
current_line += 1
# METHOD
label_method = QLabel("Method")
label_method.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
btn_panel_grid.addWidget(label_method, current_line, 0, 1, 1)
self.box_method = QComboBox(self)
btn_panel_grid.addWidget(self.box_method, current_line, 1, 1, 1)
self.box_method.setEnabled(False)
self.box_method.currentTextChanged.connect(self.method_onchange) # changed!
current_line += 1
# Threshold
def state0():
if self.radio_static.isChecked():
self.in_static_thresh.setEnabled(True)
self.in_dynamic_thresh.setEnabled(False)
if self.radio_dynamic.isChecked():
self.in_static_thresh.setEnabled(False)
self.in_dynamic_thresh.setEnabled(True)
self.label_object = QLabel("Threshold")
self.label_object.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
btn_panel_grid.addWidget(self.label_object, current_line, 0, 1, 1)
radio_thresh_group = QButtonGroup(self.main_frame)
self.radio_static = QRadioButton("Static")
self.radio_dynamic = QRadioButton("Dynamic")
self.radio_static.clicked.connect(state0)
self.radio_dynamic.clicked.connect(state0)
radio_thresh_group.addButton(self.radio_static)
radio_thresh_group.addButton(self.radio_dynamic)
self.radio_static.setChecked(True)
btn_panel_grid.addWidget(self.radio_static, current_line, 2, 1, 1)
btn_panel_grid.addWidget(self.radio_dynamic, current_line + 1, 2, 1, 1)
self.in_static_thresh = QLineEdit()
self.in_static_thresh.setPlaceholderText("0 - 100%")
btn_panel_grid.addWidget(self.in_static_thresh, current_line, 1, 1, 1)
self.in_dynamic_thresh = QLineEdit()
self.in_dynamic_thresh.setPlaceholderText("3 - 50 frags")
self.in_dynamic_thresh.setEnabled(False)
btn_panel_grid.addWidget(self.in_dynamic_thresh, current_line + 1, 1, 1, 1)
current_line += 2
def state1():
if self.chkbox_separation.isChecked():
self.in_lower_separation.setEnabled(True)
self.in_upper_separation.setEnabled(True)
else:
self.in_lower_separation.setEnabled(False)
self.in_upper_separation.setEnabled(False)
# normalization
self.chkbox_separation = QCheckBox("Separation")
self.chkbox_separation.setChecked(False)
btn_panel_grid.addWidget(self.chkbox_separation, current_line, 0, 1, 1)
self.chkbox_separation.stateChanged.connect(state1)
# min
self.in_lower_separation = QLineEdit()
self.in_lower_separation.setPlaceholderText("lower")
self.in_lower_separation.setEnabled(False)
btn_panel_grid.addWidget(self.in_lower_separation, current_line, 1, 1, 1)
# max
self.in_upper_separation = QLineEdit()
self.in_upper_separation.setPlaceholderText("upper")
self.in_upper_separation.setEnabled(False)
btn_panel_grid.addWidget(self.in_upper_separation, current_line, 2, 1, 1)
current_line += 1
def state2():
if self.chkbox_fullness.isChecked():
self.in_fullness.setEnabled(True)
else:
self.in_fullness.setEnabled(False)
self.chkbox_fullness = QCheckBox("Use fullness")
self.chkbox_fullness.setChecked(False)
btn_panel_grid.addWidget(self.chkbox_fullness, current_line, 0, 1, 1)
self.chkbox_fullness.stateChanged.connect(state2)
self.in_fullness = QLineEdit()
self.in_fullness.setPlaceholderText("0 - 100%")
self.in_fullness.setEnabled(False)
btn_panel_grid.addWidget(self.in_fullness, current_line, 1, 1, 1)
current_line += 1
self.chkbox_reverse = QCheckBox(f"Reverse colors")
self.chkbox_reverse.setChecked(False)
btn_panel_grid.addWidget(self.chkbox_reverse, current_line, 0, 1, 1)
current_line += 1
self.chkbox_anchor = QCheckBox(f"Use anchor")
self.chkbox_anchor.setChecked(False)
btn_panel_grid.addWidget(self.chkbox_anchor, current_line, 0, 1, 1)
self.line = QFrame()
self.line.setFrameShape(QFrame.VLine)
self.line.setFrameShadow(QFrame.Sunken)
btn_panel_grid.addWidget(self.line, 0, 3, current_line + 2, 1)
''' END PREPARE BLOCK '''
current_line = 1
''' MANUAL BLOCK '''
self.lbl_manual = QLabel("Manual view")
self.lbl_manual.setAlignment(Qt.AlignCenter)
btn_panel_grid.addWidget(self.lbl_manual, current_line, 4, 1, 3)
current_line += 1
in_frame_step = QLineEdit("1")
in_frame_step.setAlignment(Qt.AlignCenter)
in_frame_step.setValidator(QIntValidator(1, 100))
btn_panel_grid.addWidget(in_frame_step, current_line, 5, 1, 1)
left_step = lambda x: self.on_hand_draw(int(self.check_input(in_frame_step)), sign=-1)
right_step = lambda x: self.on_hand_draw(int(self.check_input(in_frame_step)))
self.btn_frame_left = QPushButton("<<")
self.btn_frame_left.clicked.connect(left_step)
self.btn_frame_left.setEnabled(False)
btn_panel_grid.addWidget(self.btn_frame_left, current_line, 4, 1, 1)
self.btn_frame_right = QPushButton(">>")
self.btn_frame_right.clicked.connect(right_step)
self.btn_frame_right.setEnabled(False)
btn_panel_grid.addWidget(self.btn_frame_right, current_line, 6, 1, 1)
current_line += 1
self.lbl_framestep = QLabel("Frame step")
self.lbl_framestep.setAlignment(Qt.AlignCenter)
btn_panel_grid.addWidget(self.lbl_framestep, current_line, 4, 1, 3)
''' END MANUAL BLOCK '''
current_line += 1
self.line = QFrame()
self.line.setFrameShape(QFrame.HLine)
self.line.setFrameShadow(QFrame.Sunken)
btn_panel_grid.addWidget(self.line, current_line, 4, 1, 3)
current_line += 1
''' BEGIN AUTO BLOCK '''
self.label_automatic = QLabel("Automatic view")
self.label_automatic.setAlignment(Qt.AlignCenter)
btn_panel_grid.addWidget(self.label_automatic, current_line, 4, 1, 3)
current_line += 1
#
self.label_start_frame = QLabel("Start frame")
self.label_start_frame.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
btn_panel_grid.addWidget(self.label_start_frame, current_line, 4, 1, 1)
self.in_start_frame = QLineEdit("0")
btn_panel_grid.addWidget(self.in_start_frame, current_line, 6, 1, 1)
current_line += 1
self.label_end_frame = QLabel("End frame")
self.label_end_frame.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
btn_panel_grid.addWidget(self.label_end_frame, current_line, 4, 1, 1)
self.in_end_frame = QLineEdit("0")
btn_panel_grid.addWidget(self.in_end_frame, current_line, 6, 1, 1)
current_line += 1
self.label_stepsize_frame = QLabel("Step size frame")
self.label_stepsize_frame.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
btn_panel_grid.addWidget(self.label_stepsize_frame, current_line, 4, 1, 1)
self.in_frame_stepsize = QLineEdit("1")
self.in_frame_stepsize.setValidator(QIntValidator(0, 100))
btn_panel_grid.addWidget(self.in_frame_stepsize, current_line, 6, 1, 1)
current_line += 1
self.btn_loop_draw = QPushButton("Start loop draw")
self.btn_loop_draw.clicked.connect(lambda x: self.on_loop_draw())
self.btn_loop_draw.setEnabled(False)
btn_panel_grid.addWidget(self.btn_loop_draw, current_line, 4, 1, 3)
current_line += 1
self.btn_loop_draw_stop = QPushButton("Stop loop draw")
self.btn_loop_draw_stop.clicked.connect(lambda x: self.stop_loop())
self.btn_loop_draw_stop.setEnabled(False)
btn_panel_grid.addWidget(self.btn_loop_draw_stop, current_line, 4, 1, 3)
current_line += 2
self.btn_save_results = QPushButton("Save results")
self.btn_save_results.clicked.connect(lambda x: self.save_contour())
self.btn_save_results.setEnabled(False)
btn_panel_grid.addWidget(self.btn_save_results, current_line, 4, 1, 3)
""" END AUTO BLOCK """
# (4) combne all in the structure
vbox = QVBoxLayout()
vbox.addLayout(btn_panel_grid)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QLabel("Waiting a file...")
self.statusBar().addWidget(self.status_text, stretch=1)
def main():
app = QApplication(sys.argv)
form = Application()
form.resize(700, 350)
form.show()
app.exec_()
if __name__ == "__main__":
main()
|
{"hexsha": "3f2efc0abb5d530cff3d28c7de5ed10307ed5f0b", "size": 35383, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/signal_recognition.py", "max_stars_repo_name": "research-team/memrisitve-spinal-cord", "max_stars_repo_head_hexsha": "20c3c26757b7ff8fdffb846728093d386c564797", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/signal_recognition.py", "max_issues_repo_name": "research-team/memrisitve-spinal-cord", "max_issues_repo_head_hexsha": "20c3c26757b7ff8fdffb846728093d386c564797", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/signal_recognition.py", "max_forks_repo_name": "research-team/memrisitve-spinal-cord", "max_forks_repo_head_hexsha": "20c3c26757b7ff8fdffb846728093d386c564797", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8853955375, "max_line_length": 126, "alphanum_fraction": 0.7155696238, "include": true, "reason": "import numpy,import scipy", "num_tokens": 9739}
|
import numpy as np
from numpy.random import uniform
def next_batch():
"""
Modify this function to ingest your data and returns it.
:return: (inputs, targets). Could be a python generator.
"""
x = np.expand_dims(uniform(size=32), axis=1)
y = np.expand_dims(np.expand_dims(np.mean(x), axis=0), axis=0) # should be close to 0.5
return x, y
|
{"hexsha": "ca9880cc8433e3289e77d103b7459cf9ca519bf6", "size": 368, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_reader.py", "max_stars_repo_name": "philipperemy/wavenet", "max_stars_repo_head_hexsha": "a10bb3cdf924df5b77a56c56e70c853c1e614327", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 21, "max_stars_repo_stars_event_min_datetime": "2016-11-18T03:06:10.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-21T14:02:01.000Z", "max_issues_repo_path": "data_reader.py", "max_issues_repo_name": "afcarl/wavenet-philipperemy", "max_issues_repo_head_hexsha": "a10bb3cdf924df5b77a56c56e70c853c1e614327", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_reader.py", "max_forks_repo_name": "afcarl/wavenet-philipperemy", "max_forks_repo_head_hexsha": "a10bb3cdf924df5b77a56c56e70c853c1e614327", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2016-11-24T02:44:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-15T11:52:19.000Z", "avg_line_length": 28.3076923077, "max_line_length": 92, "alphanum_fraction": 0.6657608696, "include": true, "reason": "import numpy,from numpy", "num_tokens": 100}
|
import unittest
from sotodlib.core import metadata
import os
import time
import numpy as np
# Global Announcement: I know, but I hate slow tests.
example = None
class TestDetDb(unittest.TestCase):
def setUp(self):
global example
if example is None:
print('Creating example database...')
example = metadata.get_example('DetDb')
def test_smoke(self):
"""Basic functionality."""
db = example.copy()
print('Test 1: how many dets have array_code=HF1?')
X = db.dets(props={'base.array_code': 'HF1'})
print(' Answer: %i' % len(X))
print(' The first few are:')
for x in X[:5]:
print(' ' + str(x))
print()
print('Test 2: Get (array, wafer) for a bunch of dets.')
u2 = db.props(X, props=['base.array_code', 'wafer_code'])
pairs = list(u2.distinct())
print(' Distinct pairs:')
for p in pairs:
print(' ' + str(p))
print()
assert(len(pairs) == 3)
def test_resultset(self):
"""Test that ResultSet objects have required behaviors.
"""
db0 = example.copy()
dets = db0.dets()
props = db0.props(props=['base.array_code', 'base.wafer_code'])
combos = props.distinct()
assert isinstance(dets, metadata.ResultSet)
assert isinstance(props, metadata.ResultSet)
assert isinstance(combos, metadata.ResultSet)
assert isinstance(combos[0], dict)
assert isinstance(list(combos)[0], dict)
# Test distinct coverage.
n0 = len(db0.dets(props=combos))
n1 = 0
for c in combos:
subd = db0.dets(props=c)
n1 += len(subd)
assert(n0 == n1)
# Check operators...
assert isinstance(combos[:2] + combos[2:], metadata.ResultSet)
with self.assertRaises(TypeError):
combos + [1, 2, 3]
with self.assertRaises(ValueError):
combos + dets
# Check indexing
# ... with int
self.assertIsInstance(dets[1], dict)
self.assertIsInstance(dets[np.int(1)], dict)
self.assertIsInstance(dets[np.arange(3)[1]], dict)
# ... with slice
dets_subset = dets[1:3]
self.assertIsInstance(dets_subset, metadata.ResultSet)
self.assertEqual(len(dets_subset), 2)
# ... Check that string index returns field
names = dets['name']
self.assertIsInstance(names, np.ndarray)
self.assertEqual(len(names), len(dets))
def test_io(self):
"""Check to_file and from_file."""
db0 = example.copy()
dump_list = [('test.sqlite', None),
('test.txt', 'dump'),
('test.gz', None)]
# Save.
for fn, fmt in dump_list:
print(f'Writing {fn}')
db0.to_file(fn, fmt=fmt)
print(' -- output has size {}'.format(os.path.getsize(fn)))
t0 = time.time()
db1 = metadata.DetDb.from_file(fn, fmt=fmt)
dt = time.time() - t0
print(' -- read-back {} rows in {} seconds.'.format(
len(db1.dets()), dt))
print(' -- removing.')
os.remove(fn)
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "1638100d6f81cc0dbcbdc96232fa85b6cd624fc2", "size": 3319, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_detdb.py", "max_stars_repo_name": "tskisner/sotodlib", "max_stars_repo_head_hexsha": "9b80171129ea312bc7a61ce5c37d6abfbb3d5be9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_detdb.py", "max_issues_repo_name": "tskisner/sotodlib", "max_issues_repo_head_hexsha": "9b80171129ea312bc7a61ce5c37d6abfbb3d5be9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_detdb.py", "max_forks_repo_name": "tskisner/sotodlib", "max_forks_repo_head_hexsha": "9b80171129ea312bc7a61ce5c37d6abfbb3d5be9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.4495412844, "max_line_length": 72, "alphanum_fraction": 0.5486592347, "include": true, "reason": "import numpy", "num_tokens": 776}
|
module Data.Strong.Array.Fusion
import Data.Strong.Array
import Language.Reflection
-- import public Language.Reflection.Pretty
-- import public Language.Reflection.Syntax
-- import public Language.Reflection.Types
--
-- import public Text.PrettyPrint.Prettyprinter
--
-- -- import Generics.Derive
%language ElabReflection
-- I can get elab to inline and fuse for me but not quite as
-- nicely/automatically as I prefer so far, work continues.
-- %hide Prelude.Basics.(.)
-- This is probably a lost cause currently, inlining occurs _after_ %transforms
-- so we can't use them to fuse things like blar. Perhaps elaborator reflection
-- can do something, but I'm not sure how far we can get with Check/Quote
data Step : Type -> Type -> Type where
Yield : a -> s -> Step s a
Skip : s -> Step s a
Done : Step s a
data Stream : Nat -> Type -> Type where
MkStream : (s -> Step s a) -> s -> (n : Nat) -> Stream n a
stream : Array n a -> Stream n a
-- stream arr = case arr of
-- MkArray s _ _ => MkStream step 0 s
-- where
-- step : Int -> Step Int a
-- step s = if s >= intSize arr
-- then Done
-- else Yield (unsafeReadArray arr (cast s)) (s+1)
unstream : Stream n a -> Array n a
-- %transform "what" Prelude.(.) = Fusion.gop
mapS : (a -> b) -> Stream n a -> Stream n b
mapS f (MkStream step s i) = MkStream (\s' => case step s' of
(Yield x y) => Yield (f x) y
(Skip x) => Skip x
Done => Done) s i
-- braf : Elab ((b -> c) -> (a -> b) -> (a -> c))
-- I just want this to wrap in stream/unstream
-- export
-- streamWrap : (Stream a -> Stream b) -> Array s a -> Elab (Array s a -> Array s b)
-- streamWrap f arr = pure (newUnintializedArray 2)
-- %macro
-- streamWrap : {s,n,a,b:_} -> (Stream n a -> Stream n b) -> Elab (Array s a -> Array s b)
-- streamWrap f = do
-- r <- quote f
-- check `(unstream . ~r . stream)
-- %macro
-- map' : {s,a,b:_} -> (a -> b) -> Elab (Array s a -> Array s b)
-- map' f = do
-- r <- quote f
-- check `(unstream . mapS ~r . stream)
-- `(stream (unstream ~x))
spider : TTImp -> TTImp
spider `(stream (unstream ~x)) = spider x
spider (IPi x y z w argTy retTy) = IPi x y z w (spider argTy) (spider retTy)
spider (ILam x y z w argTy lamTy) = (ILam x y z w (spider argTy) (spider lamTy))
spider (ILet x lhsFC y z nTy nVal scope) = (ILet x lhsFC y z (spider nTy) (spider nVal) (spider scope))
spider (ICase x y ty xs) = (ICase x y (spider ty) xs) -- do clauses?
spider (ILocal x xs y) = (ILocal x xs (spider y)) -- do decls?
spider (IUpdate x xs y) = (IUpdate x xs (spider y))
spider `(Data.Strong.Array.Fusion.stream {a = _} {n = _} (Data.Strong.Array.Fusion.unstream {a = _} {n = _} ~x)) = x
spider (INamedApp x y z w) = (INamedApp x (spider y) z (spider w))
spider (IApp x y z) = (IApp x (spider y) (spider z))
spider (IAutoApp x y z) = (IAutoApp x (spider y) (spider z))
spider (IWithApp x y z) = (IWithApp x (spider y) (spider z))
spider (IAlternative x y xs) = (IAlternative x y (map spider xs))
spider (IRewrite x y z) = (IRewrite x (spider y) (spider z))
spider (IBindHere x y z) = (IBindHere x y (spider z))
spider (IAs x nameFC y z w) = (IAs x nameFC y z (spider w))
spider (IMustUnify x y z) = (IMustUnify x y (spider z))
spider (IDelayed x y z) = (IDelayed x y (spider z))
spider (IDelay x y) = (IDelay x (spider y))
spider (IForce x y) = (IForce x (spider y))
spider (IQuote x y) = (IQuote x (spider y))
spider (IUnquote x y) = (IUnquote x (spider y))
spider (IWithUnambigNames x xs y) = (IWithUnambigNames x xs (spider y))
spider x = x
infixr 9 `dot`
-- fuse : x -> x
-- fuse x = %runElab do
-- r <- quote x
-- logTerm "" 1 "faf" r
-- check (spider r)
infixr 9 `dott`
%macro
dott : {c:_} -> (b -> c) -> (a -> b) -> Elab (a -> c)
dott f g = lambda _ $ \x => do
-- let r = g x
-- let r' = f r
-- d <- quote r
-- `(Data.Strong.Array.Fusion.stream {a = _} {n = _} (Data.Strong.Array.Fusion.unstream {a = _} {n = _} ~y)) <- quote r
-- | _ => pure r'
-- _ <- check d
pure (f (g x))
-- ?dfdf
-- %transform "stream fusion1" Fusion.stream . Fusion.unstream = Prelude.id
-- %macro
-- dot : {c:_} -> (b -> c) -> (a -> b) -> a -> Elab c
-- dot f g x = do
-- r <- quote (f (g x))
-- check (spider r)
%macro
map' : (a -> b) -> Elab (Array s a -> Array s b)
map' f = lambda _ $ \x => pure $ unstream (mapS f (stream x))
-- %macro
-- streamWrap : {s,a,b:_} -> (Stream s a -> Stream s b) -> Elab (Array s a -> Array s b)
-- streamWrap f = lambda _ $ \x => pure $ unstream (f (stream x))
-- map' : (a -> b) -> Array s a -> Array s b
-- map' f = streamWrap (mapS f)
-- %inline
-- map' : (a -> b) -> Array s a -> Array s b
-- map' f = unstream `dot` mapS f `dot` stream
export
blar : Array s Double -> Array s Double
blar = map' {s} (Prelude.(+) 1.0) `dott` map' (Prelude.(+) 2.0) `dott` Prelude.id
-- export
-- blar : Array s Double -> Array s Double
-- blar arr =
-- let f = map' {s} (Prelude.(+) 1.0) `dott` map' (Prelude.(+) 2.0) `dott` id
-- in %runElab do
-- z <- quote (f arr)
-- let r = spider z
-- logTerm "" 1 "wah" r
-- check r
-- pure z
-- farf : Int -> Int
-- farf = dot (+1) (+1)
-- I need map' to inline before our fusion rule fires so I get the below
-- which the rule should match:
-- ^ unstream . mapS (+1) . stream . unstream . mapS (+2) . stream
-- ^ unstream . (mapS (+1) . (stream . (unstream . (mapS (+2) . stream))))
-- So, how do we unroll map' via elab?
-- (IApp EmptyFC (INamedApp EmptyFC (INamedApp EmptyFC (IVar EmptyFC (NS (MkNS ["Fusion","Array","Strong","Data"]) (UN "stream"))) (UN "a") (IPrimVal EmptyFC DoubleType)) (UN "n") (IHole (MkFC (Virtual Interactive) (0,15) (0, 20)) "s")) (IApp EmptyFC (INamedApp EmptyFC (INamedApp EmptyFC (IVar EmptyFC (NS (MkNS ["Fusion","Array","Strong","Data"]) (UN "unstream"))) (UN "a") (IPrimVal EmptyFC DoubleType)) (UN "n") (IHole (MkFC (Virtual Interactive) (0,15) (0,20)) "s")) (UN "x")
-- (IApp EmptyFC
-- (INamedApp EmptyFC
-- (INamedApp EmptyFC
-- (IVar EmptyFC (NS (MkNS ["Fusion","Array","Strong","Data"]) (UN "stream")))
-- (UN "a")
-- (IPrimVal EmptyFC DoubleType))
-- (UN "n")
-- (IHole "s"))
-- (IApp EmptyFC
-- (INamedApp EmptyFC
-- (INamedApp EmptyFC
-- (IVar EmptyFC (NS (MkNS ["Fusion","Array","Strong","Data"]) (UN "unstream")))
-- (UN "a")
-- (IPrimVal EmptyFC DoubleType))
-- (UN "n")
-- (IHole "s"))
-- (IApp _ (INamedApp _ (INamedApp _ (IVar _ (NS (MkNS ["Fusion","Array","Strong","Data"]) (UN "stream"))) _ _) _ _) (IApp _ (INamedApp _ (INamedApp _ (IVar EmptyFC (NS (MkNS ["Fusion","Array","Strong","Data"]) (UN "unstream"))) _ _) _ _) w))
-- foo : TTImp
-- foo = `( stream . unstream)
--
-- spider : TTImp -> TTImp
-- spider `((~f . ~g) ~x) = `(~(spider f) (~(spider g) x))
-- spider r = r
--
-- fuse : TTImp -> TTImp
-- fuse `(stream (unstream ~x)) = x
-- fuse r = r
-- `(unstream . map1 . stream . unstream . map 2 . stream)
-- LOG declare.type:1: Processing Data.Strong.Array.Fusion.14044:1647:it
-- IApp (IApp (IVar (UN ".")) (IVar (UN "unstream"))) (IApp (IApp (IVar (UN ".")) (IVar (UN "map1"))) (IApp (IApp (IVar (UN ".")) (IVar (UN "stream"))) (IApp (IApp (MkFC (Virtual Interactive) (0, 29) (0,
-- 54)) (IVar (MkFC (Virtual Interactive) (0, 38) (0,
-- 39)) (UN ".")) (IVar (MkFC (Virtual Interactive) (0, 29) (0,
-- 37)) (UN "unstream"))) (IApp (MkFC (Virtual Interactive) (0, 40) (0,
-- 54)) (IApp (MkFC (Virtual Interactive) (0, 40) (0,
-- 54)) (IVar (MkFC (Virtual Interactive) (0, 46) (0,
-- 47)) (UN ".")) (IApp (MkFC (Virtual Interactive) (0, 40) (0,
-- 45)) (IVar (MkFC (Virtual Interactive) (0, 40) (0,
-- 43)) (UN "map")) (IApp (MkFC (Virtual Interactive) (0, 44) (0,
-- 45)) (IVar (MkFC (Virtual Interactive) (0, 44) (0,
-- 45)) (UN "fromInteger")) (IPrimVal (MkFC (Virtual Interactive) (0, 44) (0,
-- 45)) (BI 2))))) (IVar (MkFC (Virtual Interactive) (0, 48) (0,
-- 54)) (UN "stream"))))))
-- Data.Strong.Array.Fusion>
--
-- dot : (b -> c) -> (a -> b) -> (a -> c)
-- dot f g x = %runElab do
-- z <- quote $ blar {s=1}
-- q <- quote $ Fusion.map' (+1) {a=Int} {b=Int} {s=1}
-- logTerm "" 1 "blar" z
-- logTerm "" 1 "map'" q
-- logTerm "" 1 "spider" $ spider `(unstream . map1 . stream . unstream . map 2 . stream)
-- pure (f (g x))
-- --?sdffd
--
-- foo2 : Elab ()
-- foo2 = do
-- ?asSDfsfd
-- %transform "vectStreamFusion3" Fusion.map' f . Fusion.map' g = Fusion.map' (f . g)
main : IO ()
-- main = pure ()
main = do
-- let xvar = nak $ MkStream {a=Int} (\_ => Done) 'a' 1
printLn (blar $ fromList [1.0,2.0,3.0])
|
{"hexsha": "e31322111a3a04a03fc25e7381913870f40636ce", "size": 8609, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "src/Data/Strong/Array/Fusion.idr", "max_stars_repo_name": "MarcelineVQ/sarray", "max_stars_repo_head_hexsha": "e7728a9b89396ef6241318be9f82c45577969c8c", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-09-11T09:00:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T17:25:53.000Z", "max_issues_repo_path": "src/Data/Strong/Array/Fusion.idr", "max_issues_repo_name": "MarcelineVQ/idris2-sarray", "max_issues_repo_head_hexsha": "e7728a9b89396ef6241318be9f82c45577969c8c", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Data/Strong/Array/Fusion.idr", "max_forks_repo_name": "MarcelineVQ/idris2-sarray", "max_forks_repo_head_hexsha": "e7728a9b89396ef6241318be9f82c45577969c8c", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2988047809, "max_line_length": 480, "alphanum_fraction": 0.5809037054, "num_tokens": 3200}
|
import sys, getopt, io
import requests
import json, csv
import urllib.request
import plotext as ptt
import uniplot as uni
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib.dates as dts
from matplotlib.dates import DateFormatter
class My_DcTracker_Class():
"""NAME
My_DcTracker_Class - A Class to display a current figure of dc prices by using a requests function.
"""
def __init__(self, argu):
self.argu = argu
self.url = argu['url']
self.currency = argu['currency']
self.comission = argu['comission']
self.coinvalue = argu['coinvalue']
self.coinamount = argu['coinamount']
def __str__(self):
pass
def __eq__(self, other):
pass
def set_request(self):
""" Requests the data and exports it as a json file. """
req_data = requests.get(self.argu['url'])
return req_data.json()
def set_prices_float(self, data):
print(data)
print(data[0]['currency'])
prices = data[0]['prices']
for i in range(0, len(prices)):
prices[i] = float(prices[i])
print(prices)
return prices
def set_times(self, data):
time = data[0]['timestamps']
for i in range(0, len(time)):
time[i]
print(time)
return time
def set_content(self, data):
for i in range(0, len(data)):
EBKV = self.coinamount*data[i]
EBV = EBKV*(1-self.comission)
data[i] = EBV-self.coinvalue
print(" ")
print(" ")
print(" ")
print(data)
return data
def set_plot(self, earnings, times):
y = earnings
x = times
listofzeros = [0] * len(earnings)
ptt.plot(y,line_color='red')
ptt.plot(listofzeros,line_color='green')
ptt.grid(True)
ptt.canvas_color("black")
ptt.axes_color("black")
ptt.ticks_color("cloud")
ptt.show()
print(str(y[-1]))
print(str(x[-1]))
def set_uni(self, earnings, times):
y = earnings
x = times
uni.plot(ys=y,lines=1)
print("Last Entry: ")
print(str(y[-1]))
print(str(x[-1]))
def set_matplot(self, data, times):
# plt.plot(data)
# plt.show()
# dates=times
# values=data
# plt.plot(dates, values, '-o')
# plt.show()
ax =plt.plot_date(x=times, y=data, fmt="r-")
plt.title("Page impressions on example.com")
plt.ylabel("Page impressions")
plt.grid(True)
plt.xticks(rotation=90)
date_form = DateFormatter("%m-%d")
plt.show()
|
{"hexsha": "9ac1fd32c22e0573efbe95eb7361da9c63ca7c1f", "size": 2739, "ext": "py", "lang": "Python", "max_stars_repo_path": "distata/class_tracker.py", "max_stars_repo_name": "onshoremanover/dist", "max_stars_repo_head_hexsha": "96a52b23e6e651d6d6b73614c73a5aa0d0c4bd14", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "distata/class_tracker.py", "max_issues_repo_name": "onshoremanover/dist", "max_issues_repo_head_hexsha": "96a52b23e6e651d6d6b73614c73a5aa0d0c4bd14", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "distata/class_tracker.py", "max_forks_repo_name": "onshoremanover/dist", "max_forks_repo_head_hexsha": "96a52b23e6e651d6d6b73614c73a5aa0d0c4bd14", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3611111111, "max_line_length": 107, "alphanum_fraction": 0.5604235122, "include": true, "reason": "import numpy", "num_tokens": 677}
|
import numpy as np
from helpers import remove_callback_dir, make_callback_dir, save_array
from core import ToxicComments, ToxicModel, prepare
def run():
# Load data and build model
batch_size = 10000
n_features = 100
tc = ToxicComments('tcc/data/train.csv', batch_size=batch_size)
model = ToxicModel(n_features, 6)
# Train model
remove_callback_dir('events')
remove_callback_dir('ckpts')
iteration = 0
for _, comments, labels in tc:
comments = np.array(prepare(comments, n_features))
labels = np.array(labels)
iteration+=1
events_dir = make_callback_dir('events', iteration)
ckpts_dir = make_callback_dir('ckpts', iteration)
model.train(comments, labels, callback_dirs=[events_dir, ckpts_dir])
# Test model
tc_test = ToxicComments('tcc/data/test.csv')
ids, comments, _ = next(tc_test)
comments = prepare(comments, n_features=n_features)
predictions = model.predict(comments)
save_array(ids, predictions, 'tcc/data/test_submission5.csv')
if __name__ == '__main__':
run()
|
{"hexsha": "65ef8d2b56d6604114426e5ba2776ccad05c0aea", "size": 1098, "ext": "py", "lang": "Python", "max_stars_repo_path": "tcc/main.py", "max_stars_repo_name": "mctrjalloh/jigsai-tcc-challenge", "max_stars_repo_head_hexsha": "4ea9e7bef9586a789df4b6dbe76b7c11084cf719", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tcc/main.py", "max_issues_repo_name": "mctrjalloh/jigsai-tcc-challenge", "max_issues_repo_head_hexsha": "4ea9e7bef9586a789df4b6dbe76b7c11084cf719", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tcc/main.py", "max_forks_repo_name": "mctrjalloh/jigsai-tcc-challenge", "max_forks_repo_head_hexsha": "4ea9e7bef9586a789df4b6dbe76b7c11084cf719", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.7804878049, "max_line_length": 76, "alphanum_fraction": 0.693989071, "include": true, "reason": "import numpy", "num_tokens": 264}
|
[STATEMENT]
lemma wcode_erase2_via_move [simp]: "wcode_right_move ires rs (b, Bk # list) \<Longrightarrow> wcode_erase2 ires rs (Bk # b, list)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. wcode_right_move ires rs (b, Bk # list) \<Longrightarrow> wcode_erase2 ires rs (Bk # b, list)
[PROOF STEP]
by (auto simp:wcode_fourtimes_invs ) auto
|
{"llama_tokens": 143, "file": "Universal_Turing_Machine_UTM", "length": 1}
|
export ## Types
CompositeQSystem,
Control,
Dissipation,
Field,
Interaction,
ParametricInteraction,
QSystem,
## Methods
dim,
label,
lowering,
number,
raising,
strength,
X
abstract QSystem
label(q::QSystem) = q.label
dim(q::QSystem) = q.dim
raising(q::QSystem) = diagm(sqrt(1:(dim(q)-1)), -1)
lowering(q::QSystem) = diagm(sqrt(1:(dim(q)-1)), 1)
number(q::QSystem) = raising(q) * lowering(q)
X(q::QSystem) = raising(q) + lowering(q)
hamiltonian(q::QSystem, t) = hamiltonian(q)
abstract Control
label(c::Control) = c.label
abstract Interaction
strength(i::Interaction) = i.strength
hamiltonian(i::Interaction, t) = hamiltonian(i)
abstract ParametricInteraction <: Interaction
abstract Dissipation
# [todo] - Should COmpositeQSystem <: QSystem ?
type CompositeQSystem
# [feature] - Use something like OrderedDict for component enumeration
subSystems::Vector{QSystem}
interactions::Vector{Interaction}
parametericInteractions::Vector{ParametricInteraction}
subSystemExpansions::Vector{Vector{Vector{Int}}}
interactionExpansions::Vector{Vector{Vector{Int}}}
dissipators::Vector{Dissipation}
end
|
{"hexsha": "08dd2316236f5de49c185c08c8059ad17910eb36", "size": 1231, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/base-types.jl", "max_stars_repo_name": "silky/QSimulator.jl", "max_stars_repo_head_hexsha": "ceb112b062acacfda7c2375309f572e1d844f44d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-06-27T11:33:27.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-27T11:33:27.000Z", "max_issues_repo_path": "src/base-types.jl", "max_issues_repo_name": "silky/QSimulator.jl", "max_issues_repo_head_hexsha": "ceb112b062acacfda7c2375309f572e1d844f44d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/base-types.jl", "max_forks_repo_name": "silky/QSimulator.jl", "max_forks_repo_head_hexsha": "ceb112b062acacfda7c2375309f572e1d844f44d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.137254902, "max_line_length": 74, "alphanum_fraction": 0.6799350122, "num_tokens": 324}
|
"""
Author Hugues
Evaluate the effectiveness of the features obtained by different feature
reduction algorithms.
This script is essentially a series of for loops, and it is long (a few hours)
"""
if __name__ == '__main__': # this is used to launch the file from anywhere
import sys
sys.path.append("..")
import torch
from copy import deepcopy
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from param import device
from models.store_model_SHL import Diagnostic_CNN
from models.store_model_CIFAR import Diagnostic_ResNet # the classes are needed to load the pickle
from models.load_model import load_data, file_location
from models.load_model import sensors as sensors_per_dataset
from similarity.feature_reduction import FeatureReduction
methods_list = [ 'CCA_highest', 'CCA_random', 'CCA_lowest', 'PCA', 'max_activation', 'random_proj', 'random_keep']
random_methods_list = ['random_proj', 'random_keep', 'CCA_random'] # we will repreat these methods more often because of the randomness
n_repeat_random = 3 # repeat each random feature reduction 3 times per couple
# of networks
n_repeat_FCLayer = 1 # we only repeat each training of the FC layer 1 time
# because we already have 3 networks per sensor.
n_features_list = list(range(1,16)) + [16, 32, 64, 128]
n_trials = 3
# plot options
colors = {'random_proj': [0.2, 0.23, 1.],
'random_keep': [0.2, 0.8, 1.],
'max_activation': [0.8, 0.2, 1.],
'PCA': [0.05, 0.9, 0.05],
'CCA_highest': [1, 0., 0.],
'CCA_lowest': [0.9, 0.9, 0.],
'CCA_random': [1., 0.5, 0.]}
legend_types = [Line2D([0], [0], linestyle='-', color=colors[method]) for method in methods_list]
score_names = {"SHL_2018":"F1-score", "CIFAR_10":"accuracy"}
#%%
if __name__ == "__main__":
""" reminder:
data: dict
keys = sensor (ex "Acc_norm" or "CIFAR_10")
values = dict
keys = split ('train' or 'val')
values = list of numpy arrays (n_samples, ...)
one array per initialization (3*2 = 6 by default)
"""
for dataset in ["CIFAR_10", "SHL_2018"]:
sensors_list = sensors_per_dataset[dataset]
results_retrained_dict = {sensor:
{method:
{i_trial:
{n_features:
[] # list of F1 scores
for n_features in n_features_list}
for i_trial in range(n_trials)}
for method in methods_list}
for sensor in sensors_list}
results_old_dict = deepcopy(results_retrained_dict)
# the architecture is the same, but not the content.
for i_sensor, sensor in enumerate(sensors_list):
data, models, GT = load_data(file_location, dataset)
for method in methods_list:
for i_trial in range(n_trials) :
X_train = data[sensor]['train'][i_trial]
X_val = data[sensor]['val'][i_trial]
Y_train = GT['train'].reshape(-1)
Y_val = GT[ 'val' ].reshape(-1)
n_train_samples = X_train.shape[0]
n_val_samples = X_val.shape[0]
old_shape = X_train.shape[1:] # get rid of the number of samples
X_train = X_train.reshape(n_train_samples, -1)
X_val = X_val.reshape( n_val_samples, -1)
# For the CCA: use an other instance of the same sensor
next_i_trial = i_trial + n_trials
X_train_next = data[sensor]['train'][next_i_trial]
X_train_next = X_train_next.reshape(X_train_next.shape[0], -1)
this_n_repeat = n_repeat_random if method in random_methods_list else n_repeat_FCLayer
# we will also repeat the classification using the old model N times,
# but it is not long compared to the rest
for i_repeat in range(this_n_repeat):
if method in random_methods_list or i_repeat == 0:
#compute the feature reduction the first time or recompute it if it is random
feature_reduction = FeatureReduction(method=method)
feature_reduction.fit(X_train, X_train_next)
for n_features_to_keep in n_features_list:
if n_features_to_keep <= X_train.shape[1]:
print(sensor, method, i_trial, i_repeat, n_features_to_keep)
X_train_projected = feature_reduction.transform(X_train, n_features_to_keep, same_dataset=True)
X_val_projected = feature_reduction.transform(X_val, n_features_to_keep, same_dataset=False)
X_train_projected = X_train_projected.reshape(n_train_samples, *old_shape)
X_val_projected = X_val_projected.reshape(n_val_samples, *old_shape)
old_model = models[sensor][i_trial]
model = deepcopy(old_model)
model.to(device)
model.eval()
model.forward = lambda X:model.forward_from_FC(X)
#using the old FC layer
val_dataset = torch.utils.data.TensorDataset(torch.tensor(X_val_projected, dtype=torch.float32, device=device),
torch.tensor(Y_val, dtype=torch.long, device=device))
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=64, num_workers=0)
_, old_score = model.validate(val_dataloader)
results_old_dict[sensor][method][i_trial][n_features_to_keep].append(old_score)
# retrain the model
train_dataset = torch.utils.data.TensorDataset(torch.tensor(X_train_projected, dtype=torch.float32, device=device),
torch.tensor(Y_train, dtype=torch.long, device=device))
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=64, num_workers=0, shuffle=True)
val_dataset = torch.utils.data.TensorDataset(torch.tensor(X_val_projected, dtype=torch.float32, device=device),
torch.tensor(Y_val, dtype=torch.long, device=device))
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=64, num_workers=0)
model.to(device)
model.train_process(train_dataloader, val_dataloader)
_, retrained_score = model.validate(val_dataloader)
results_retrained_dict[sensor][method][i_trial][n_features_to_keep].append(retrained_score)
else : # n_features_to_keep not in n_features_list:
if n_features_to_keep in results_retrained_dict[sensor][method][i_trial]:
del results_retrained_dict[sensor][method][i_trial][n_features_to_keep]
del results_old_dict[ sensor][method][i_trial][n_features_to_keep]
#%%
# =============================================================================
# Plot the results
# =============================================================================
# we are still in the first for loop (for dataset in ["CIFAR_10", "SHL_2018"])
plt.figure(figsize=(len(sensors_list)*4, 8))
matplotlib.rcParams['font.size'] = 10
n_classes = np.max(GT["val"])+1
for i_sensor, sensor in enumerate(sensors_list):
for method in methods_list:
mean_results_retrained = {} # results of the retrained FC layer
mean_results_old_layer = {} # results of the stored FC layer
std_results_retrained = {}
std_results_old_layer = {}
this_n_features_list = list(results_old_dict[sensor][method][0].keys())
#the list is assumed to be the same for all initializations of the net.
for n_features in list(this_n_features_list):
results_old, results_retrained = [], []
for i_trial in range(n_trials):
results_old += results_old_dict[sensor][method][i_trial][n_features]
results_retrained += results_retrained_dict[sensor][method][i_trial][n_features]
mean_results_retrained[n_features] = np.mean(results_retrained)
mean_results_old_layer[n_features] = np.mean(results_old)
std_results_retrained[n_features] = np.std(results_retrained)
std_results_old_layer[n_features] = np.std(results_old)
mean_retrained = np.array( list(mean_results_retrained.values()))
std_retrained = np.array( list(std_results_retrained.values()))
mean_old_model = np.array( list(mean_results_old_layer.values()))
std_old_model = np.array( list(std_results_old_layer.values()))
plt.subplot(2, len(sensors_list), i_sensor+1)
plt.title(sensor)
plt.xscale('log')
plt.grid('on')
if i_sensor == 0: plt.ylabel("previous layer\n"+score_names[dataset])
plt.plot(this_n_features_list, mean_old_model, color=colors[method])
plt.fill_between(this_n_features_list, mean_old_model-std_old_model, mean_old_model+std_old_model, color=colors[method]+[0.1], linestyle='-')
plt.subplot(2, len(sensors_list), i_sensor+1 +len(sensors_list))
plt.xscale('log')
plt.grid('on')
if i_sensor == 0: plt.ylabel("retrained layer\n"+score_names[dataset])
plt.plot(this_n_features_list, mean_retrained, color=colors[method])
plt.fill_between(this_n_features_list, mean_retrained-std_retrained, mean_retrained+std_retrained, color=colors[method]+[0.1], linestyle='-')
plt.xlabel("# components kept")
if i_sensor == len(sensors_list)-1: plt.legend(legend_types, methods_list, fontsize=10, loc='lower right')
if method == "CCA_highest":
plt.subplot(2, len(sensors_list), i_sensor+1)
plt.plot([n_classes, n_classes],
[0, mean_old_model.max()],
linestyle='dotted', zorder=-10 , color=colors["CCA_highest"])
plt.plot([min(this_n_features_list), max(this_n_features_list)],
[mean_results_old_layer[n_classes], mean_results_old_layer[n_classes]],
linestyle='dotted', zorder=-10 , color=colors["CCA_highest"])
plt.subplot(2, len(sensors_list), i_sensor+1 +len(sensors_list))
plt.plot([n_classes, n_classes],
[0, mean_retrained.max()],
linestyle='dotted', zorder=-10 , color=colors["CCA_highest"])
plt.plot([min(this_n_features_list), max(this_n_features_list)],
[mean_results_retrained[n_classes], mean_results_retrained[n_classes]],
linestyle='dotted', zorder=-10 , color=colors["CCA_highest"])
plt.show()
plt.savefig(f"{dataset}_fig.PNG")
|
{"hexsha": "b08f3a3dac116c8bb6cc3a1568e7839868f6221f", "size": 12324, "ext": "py", "lang": "Python", "max_stars_repo_path": "similarity/projection_same_sensors.py", "max_stars_repo_name": "HuguesMoreau/Sensors_similariy", "max_stars_repo_head_hexsha": "4b8592049c83b03a11f5c57fab247290ee29b8f5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "similarity/projection_same_sensors.py", "max_issues_repo_name": "HuguesMoreau/Sensors_similariy", "max_issues_repo_head_hexsha": "4b8592049c83b03a11f5c57fab247290ee29b8f5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "similarity/projection_same_sensors.py", "max_forks_repo_name": "HuguesMoreau/Sensors_similariy", "max_forks_repo_head_hexsha": "4b8592049c83b03a11f5c57fab247290ee29b8f5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.0, "max_line_length": 157, "alphanum_fraction": 0.5552580331, "include": true, "reason": "import numpy", "num_tokens": 2473}
|
'''
Notices:
Copyright 2018 United States Government as represented by the Administrator of
the National Aeronautics and Space Administration. No copyright is claimed in
the United States under Title 17, U.S. Code. All Other Rights Reserved.
Disclaimers
No Warranty: THE SUBJECT SOFTWARE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY OF
ANY KIND, EITHER EXPRESSED, IMPLIED, OR STATUTORY, INCLUDING, BUT NOT LIMITED
TO, ANY WARRANTY THAT THE SUBJECT SOFTWARE WILL CONFORM TO SPECIFICATIONS, ANY
IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
FREEDOM FROM INFRINGEMENT, ANY WARRANTY THAT THE SUBJECT SOFTWARE WILL BE ERROR
FREE, OR ANY WARRANTY THAT DOCUMENTATION, IF PROVIDED, WILL CONFORM TO THE
SUBJECT SOFTWARE. THIS AGREEMENT DOES NOT, IN ANY MANNER, CONSTITUTE AN
ENDORSEMENT BY GOVERNMENT AGENCY OR ANY PRIOR RECIPIENT OF ANY RESULTS,
RESULTING DESIGNS, HARDWARE, SOFTWARE PRODUCTS OR ANY OTHER APPLICATIONS
RESULTING FROM USE OF THE SUBJECT SOFTWARE. FURTHER, GOVERNMENT AGENCY
DISCLAIMS ALL WARRANTIES AND LIABILITIES REGARDING THIRD-PARTY SOFTWARE, IF
PRESENT IN THE ORIGINAL SOFTWARE, AND DISTRIBUTES IT "AS IS."
Waiver and Indemnity: RECIPIENT AGREES TO WAIVE ANY AND ALL CLAIMS AGAINST THE
UNITED STATES GOVERNMENT, ITS CONTRACTORS AND SUBCONTRACTORS, AS WELL AS ANY
PRIOR RECIPIENT. IF RECIPIENT'S USE OF THE SUBJECT SOFTWARE RESULTS IN ANY
LIABILITIES, DEMANDS, DAMAGES, EXPENSES OR LOSSES ARISING FROM SUCH USE,
INCLUDING ANY DAMAGES FROM PRODUCTS BASED ON, OR RESULTING FROM, RECIPIENT'S
USE OF THE SUBJECT SOFTWARE, RECIPIENT SHALL INDEMNIFY AND HOLD HARMLESS THE
UNITED STATES GOVERNMENT, ITS CONTRACTORS AND SUBCONTRACTORS, AS WELL AS ANY
PRIOR RECIPIENT, TO THE EXTENT PERMITTED BY LAW. RECIPIENT'S SOLE REMEDY FOR
ANY SUCH MATTER SHALL BE THE IMMEDIATE, UNILATERAL TERMINATION OF THIS
AGREEMENT.
'''
import numpy as np
import os
import warnings
from copy import copy
from ..mcmc.mcmc_sampler import MCMCSampler
from mpi4py import MPI
from pymc import Normal
from ..particles.particle import Particle
from ..particles.particle_chain import ParticleChain
from ..hdf5.hdf5_storage import HDF5Storage
class SMCSampler(object):
'''
Class for performing parallel Sequential Monte Carlo sampling
'''
def __init__(self, data, model, param_priors):
self._comm, self._size, self._rank = self._setup_communicator()
self._mcmc = self._setup_mcmc_sampler(data, model, param_priors)
@staticmethod
def _setup_communicator():
comm = MPI.COMM_WORLD.Clone()
size = comm.Get_size()
my_rank = comm.Get_rank()
return comm, size, my_rank
@staticmethod
def _setup_mcmc_sampler(data, model, param_priors):
mcmc = MCMCSampler(data=data, model=model, params=param_priors,
storage_backend='ram')
return mcmc
def sample(self, num_particles, num_time_steps, num_mcmc_steps,
measurement_std_dev, ESS_threshold=None, proposal_center=None,
proposal_scales=None, restart_time_step=0, hdf5_to_load=None,
autosave_file=None):
'''
:param num_particles: number of particles to use during sampling
:type num_particles: int
:param num_time_steps: number of time steps in temperature schedule that
is used to transition between prior and posterior distributions.
:type num_time_steps: int
:param num_mcmc_steps: number of mcmc steps to take during mutation
:param num_mcmc_steps: int
:param measurement_std_dev: standard deviation of the measurement error
:type measurement_std_dev: float
:param ESS_threshold: threshold equivalent sample size; triggers
resampling when ESS > ESS_threshold
:type ESS_threshold: float or int
:param proposal_center: initial parameter dictionary, which is used to
define the initial proposal distribution when generating particles;
default is None, and initial proposal distribution = prior.
:type proposal_center: dict
:param proposal_scales: defines the scale of the initial proposal
distribution, which is centered at proposal_center, the initial
parameters; i.e. prop ~ MultivarN(q1, (I*proposal_center*scales)^2).
Proposal scales should be passed as a dictionary with keys and
values corresponding to parameter names and their associated scales,
respectively. The default is None, which sets initial proposal
distribution = prior.
:type proposal_scales: dict
:param restart_time_step: time step at which to restart sampling;
default is zero, meaning the sampling process starts at the prior
distribution; note that restart_time_step < num_time_steps. The
step at restart_time is retained, and the sampling begins at the
next step (t=restart_time_step+1).
:type restart_time_step: int
:param hdf5_to_load: file path of a particle chain saved using the
ParticleChain.save() method.
:type hdf5_to_load: string
:Returns: A ParticleChain class instance that stores all particles and
their past generations at every time step.
'''
self._set_num_particles(num_particles)
self._set_temperature_schedule(num_time_steps)
self._set_num_mcmc_steps(num_mcmc_steps)
self._set_ESS_threshold(ESS_threshold)
self._set_autosave_behavior(autosave_file)
if restart_time_step == 0:
self._set_proposal_distribution(proposal_center, proposal_scales)
self._set_start_time_based_on_proposal()
particles = self._initialize_particles(measurement_std_dev)
particle_chain = self._initialize_particle_chain(particles)
elif 0 < restart_time_step <= num_time_steps:
self._set_start_time_equal_to_restart_time_step(restart_time_step)
particle_chain = self.load_particle_chain(hdf5_to_load)
particle_chain = self._trim_particle_chain(particle_chain,
restart_time_step)
else:
raise ValueError('restart time outside range [0, num_time_steps]')
self._set_particle_chain(particle_chain)
self._autosave_particle_chain()
for t in range(num_time_steps)[self._start_time_step+1:]:
temperature_step = self.temp_schedule[t] - self.temp_schedule[t-1]
new_particles = self._create_new_particles(temperature_step)
covariance = self._compute_current_step_covariance()
mutated_particles = self._mutate_new_particles(new_particles,
covariance,
measurement_std_dev,
temperature_step)
self._update_particle_chain_with_new_particles(mutated_particles)
self._autosave_particle_step()
self._close_autosaver()
return self.particle_chain
def _set_num_particles(self, num_particles):
self.num_particles = num_particles
return None
def _set_temperature_schedule(self, num_cooling_steps):
self.temp_schedule = np.linspace(0, 1, num_cooling_steps)
return None
def _set_num_mcmc_steps(self, num_mcmc_steps):
self.num_mcmc_steps = num_mcmc_steps
return None
def _set_ESS_threshold(self, ESS_threshold):
if ESS_threshold is None:
ESS_threshold = 0.5*self.num_particles
self.ESS_threshold = ESS_threshold
return None
def _set_autosave_behavior(self, autosave_file):
self._autosave_file = autosave_file
if self._autosave_file is not None and self._rank == 0:
self._autosaver = HDF5Storage(autosave_file, mode='w')
return None
def _set_proposal_distribution(self, proposal_center, proposal_scales):
if proposal_center is not None and proposal_scales is None:
msg = 'No scales given; setting scales to identity matrix.'
warnings.warn(msg)
proposal_scales = {k: 1. for k in self._mcmc.params.keys()}
elif proposal_center is None and proposal_scales is not None:
raise ValueError('Proposal scales given but center == None.')
self._proposal_center = proposal_center
self._proposal_scales = proposal_scales
return None
def _set_start_time_based_on_proposal(self,):
'''
If proposal distribution is equal to prior distribution, can start
Sequential Monte Carlo sampling at time = 1, since prior can be
sampled directly. If using a different proposal, must first start by
estimating the prior (i.e., time = 0). This is a result of the way
the temperature schedule is defined.
'''
if self._proposal_center is None:
self._start_time_step = 1
else:
self._start_time_step = 0
return None
def _initialize_particles(self, measurement_std_dev):
m_std = measurement_std_dev
self._mcmc.generate_pymc_model(fix_var=True, std_dev0=m_std)
num_particles_per_partition = self._get_num_particles_per_partition()
particles = []
prior_variables = self._create_prior_random_variables()
if self._proposal_center is not None:
proposal_variables = self._create_proposal_random_variables()
else:
proposal_variables = None
for _ in range(num_particles_per_partition):
part = self._create_particle(prior_variables, proposal_variables)
particles.append(part)
return particles
def _get_num_particles_per_partition(self,):
num_particles_per_partition = self.num_particles/self._size
remainder = self.num_particles % self._size
overtime_ranks = range(remainder)
if self._rank in overtime_ranks:
num_particles_per_partition += 1
return num_particles_per_partition
def _create_prior_random_variables(self,):
mcmc = copy(self._mcmc)
random_variables = dict()
for key in mcmc.params.keys():
index = mcmc.pymc_mod_order.index(key)
random_variables[key] = mcmc.pymc_mod[index]
return random_variables
def _create_proposal_random_variables(self,):
centers = self._proposal_center
scales = self._proposal_scales
random_variables = dict()
for key in self._mcmc.params.keys():
variance = (centers[key] * scales[key])**2
random_variables[key] = Normal(key, centers[key], 1/variance)
return random_variables
def _create_particle(self, prior_variables, prop_variables=None):
param_vals, prior_logp = self._draw_random_variables(prior_variables)
if prop_variables is None:
prop_logp = prior_logp
else:
param_vals, prop_logp = self._draw_random_variables(prop_variables)
log_like = self._evaluate_likelihood(param_vals)
temp_step = self.temp_schedule[self._start_time_step]
log_weight = log_like*temp_step + prior_logp - prop_logp
return Particle(param_vals, np.exp(log_weight), log_like)
def _draw_random_variables(self, random_variables):
param_keys = self._mcmc.params.keys()
param_vals = {key: random_variables[key].random() for key in param_keys}
param_log_prob = np.sum([rv.logp for rv in random_variables.values()])
return param_vals, param_log_prob
def _evaluate_likelihood(self, param_vals):
'''
Note: this method performs 1 model evaluation per call.
'''
mcmc = copy(self._mcmc)
for key, value in param_vals.iteritems():
index = mcmc.pymc_mod_order.index(key)
mcmc.pymc_mod[index].value = value
results_index = mcmc.pymc_mod_order.index('results')
log_like = mcmc.pymc_mod[results_index].logp
return log_like
def _initialize_particle_chain(self, particles):
particles = self._comm.gather(particles, root=0)
if self._rank == 0:
particle_chain = ParticleChain()
if self._start_time_step == 1:
particle_chain.add_step([]) # empty 0th step
particle_chain.add_step(np.concatenate(particles))
particle_chain.normalize_step_weights()
else:
particle_chain = None
return particle_chain
def _set_particle_chain(self, particle_chain):
self.particle_chain = particle_chain
return None
def _set_start_time_equal_to_restart_time_step(self, restart_time_step):
self._start_time_step = restart_time_step
return None
def _trim_particle_chain(self, particle_chain, restart_time_step):
if self._rank == 0:
to_keep = range(0, restart_time_step + 1)
trimmed_steps = [particle_chain.get_particles(i) for i in to_keep]
particle_chain._steps = trimmed_steps
return particle_chain
@staticmethod
def _file_exists(hdf5_file):
return os.path.exists(hdf5_file)
def _compute_current_step_covariance(self):
if self._rank == 0:
covariance = self.particle_chain.calculate_step_covariance(step=-1)
if not self._is_positive_definite(covariance):
msg = 'current step cov not pos def, setting to identity matrix'
warnings.warn(msg)
covariance = np.eye(covariance.shape[0])
else:
covariance = None
covariance = self._comm.scatter([covariance]*self._size, root=0)
return covariance
@staticmethod
def _is_positive_definite(covariance):
try:
np.linalg.cholesky(covariance)
return True
except np.linalg.linalg.LinAlgError:
return False
def _create_new_particles(self, temperature_step):
if self._rank == 0:
self._initialize_new_particles()
self._compute_new_particle_weights(temperature_step)
self._normalize_new_particle_weights()
self._resample_if_needed()
new_particles = self._partition_new_particles()
else:
new_particles = [None]
new_particles = self._comm.scatter(new_particles, root=0)
return new_particles
def _initialize_new_particles(self):
new_particles = self.particle_chain.copy_step(step=-1)
self.particle_chain.add_step(new_particles)
return None
def _compute_new_particle_weights(self, temperature_step):
for p in self.particle_chain.get_particles(-1):
p.weight = np.exp(np.log(p.weight)+p.log_like*temperature_step)
return None
def _normalize_new_particle_weights(self):
self.particle_chain.normalize_step_weights()
return None
def _resample_if_needed(self):
'''
Checks if ESS below threshold; if yes, resample with replacement.
'''
ESS = self.particle_chain.compute_ESS()
if ESS < self.ESS_threshold:
print 'ESS = %s' % ESS
print 'resampling...'
self.particle_chain.resample(overwrite=True)
else:
print 'ESS = %s' % ESS
print 'no resampling required.'
return None
def _partition_new_particles(self):
partitions = np.array_split(self.particle_chain.get_particles(-1),
self._size)
return partitions
def _mutate_new_particles(self, particles, covariance, measurement_std_dev,
temperature_step):
'''
Predicts next distribution along the temperature schedule path using
the MCMC kernel.
'''
mcmc = copy(self._mcmc)
step_method = 'smc_metropolis'
new_particles = []
for particle in particles:
mcmc.generate_pymc_model(fix_var=True, std_dev0=measurement_std_dev,
q0=particle.params)
mcmc.sample(self.num_mcmc_steps, burnin=0, step_method=step_method,
cov=covariance, verbose=-1, phi=temperature_step)
stochastics = mcmc.MCMC.db.getstate()['stochastics']
params = {key: stochastics[key] for key in particle.params.keys()}
particle.params = params
particle.log_like = mcmc.MCMC.logp
new_particles.append(particle)
new_particles = self._comm.gather(new_particles, root=0)
return new_particles
def _update_particle_chain_with_new_particles(self, new_particles):
if self._rank == 0:
particles = np.concatenate(new_particles)
self.particle_chain.overwrite_step(step=-1, particle_list=particles)
return None
def _autosave_particle_chain(self):
if self._rank == 0 and self._autosave_file is not None:
self._autosaver.write_chain(self.particle_chain)
return None
def _autosave_particle_step(self):
if self._rank == 0 and self._autosave_file is not None:
step_index = self.particle_chain.get_num_steps() - 1
step = self.particle_chain.get_particles(step_index)
self._autosaver.write_step(step, step_index)
return None
def _close_autosaver(self):
if self._rank == 0 and self._autosave_file is not None:
self._autosaver.close()
return None
def save_particle_chain(self, h5_file):
'''
Saves self.particle_chain to an hdf5 file using the HDF5Storage class.
:param hdf5_to_load: file path at which to save particle chain
:type hdf5_to_load: string
'''
if self._rank == 0:
hdf5 = HDF5Storage(h5_file, mode='w')
hdf5.write_chain(self.particle_chain)
hdf5.close()
return None
def load_particle_chain(self, h5_file):
'''
Loads and returns a particle chain object stored using the HDF5Storage
class.
:param hdf5_to_load: file path of a particle chain saved using the
ParticleChain.save() or self.save_particle_chain() methods.
:type hdf5_to_load: string
'''
if self._rank == 0:
hdf5 = HDF5Storage(h5_file, mode='r')
particle_chain = hdf5.read_chain()
hdf5.close()
print 'Particle chain loaded from %s.' % h5_file
else:
particle_chain = None
return particle_chain
|
{"hexsha": "d409bface5f763133bc45e1bdc02e225a38ae916", "size": 18765, "ext": "py", "lang": "Python", "max_stars_repo_path": "smcpy/smc/smc_sampler.py", "max_stars_repo_name": "omunroe-com/seqmontecarlosampwpython", "max_stars_repo_head_hexsha": "d64cb1683a2d9954cb05544427af9c85f14f1c6e", "max_stars_repo_licenses": ["NASA-1.3"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "smcpy/smc/smc_sampler.py", "max_issues_repo_name": "omunroe-com/seqmontecarlosampwpython", "max_issues_repo_head_hexsha": "d64cb1683a2d9954cb05544427af9c85f14f1c6e", "max_issues_repo_licenses": ["NASA-1.3"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "smcpy/smc/smc_sampler.py", "max_forks_repo_name": "omunroe-com/seqmontecarlosampwpython", "max_forks_repo_head_hexsha": "d64cb1683a2d9954cb05544427af9c85f14f1c6e", "max_forks_repo_licenses": ["NASA-1.3"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8407643312, "max_line_length": 80, "alphanum_fraction": 0.6707167599, "include": true, "reason": "import numpy", "num_tokens": 4059}
|
-- ---------------------------------------------------------------------
-- Ejercicio. Demostrar que en los anillos
-- a * 0 = 0
-- ----------------------------------------------------------------------
import algebra.ring
namespace my_ring
variables {R : Type*} [ring R]
variable (a : R)
-- 1ª demostración
-- ===============
example : a * 0 = 0 :=
begin
have h : a * 0 + a * 0 = a * 0 + 0,
calc a * 0 + a * 0
= a * (0 + 0) : (mul_add a 0 0).symm
... = a * 0 : congr_arg (λ x, a * x) (add_zero 0)
... = a * 0 + 0 : (add_zero (a * 0)).symm,
rw add_left_cancel h
end
-- 2ª demostración
-- ===============
example : a * 0 = 0 :=
begin
have h : a * 0 + a * 0 = a * 0 + 0,
calc a * 0 + a * 0
= a * (0 + 0) : by rw ← mul_add
... = a * 0 : by rw add_zero
... = a * 0 + 0 : by rw add_zero,
rw add_left_cancel h
end
-- 3ª demostración
-- ===============
example : a * 0 = 0 :=
begin
have h : a * 0 + a * 0 = a * 0 + 0,
{ rw [←mul_add, add_zero, add_zero] },
rw add_left_cancel h
end
-- 4ª demostración
-- ===============
example : a * 0 = 0 :=
begin
have h : a * 0 + a * 0 = a * 0 + 0,
calc a * 0 + a * 0
= a * (0 + 0) : by simp
... = a * 0 : by simp
... = a * 0 + 0 : by simp,
simp,
end
end my_ring
|
{"author": "jaalonso", "repo": "Matematicas_en_Lean", "sha": "c44e23d87665cb4aa00c813c6bfb3c41ebc83aa8", "save_path": "github-repos/lean/jaalonso-Matematicas_en_Lean", "path": "github-repos/lean/jaalonso-Matematicas_en_Lean/Matematicas_en_Lean-c44e23d87665cb4aa00c813c6bfb3c41ebc83aa8/src/Basicos/mul_zero.lean"}
|
import numpy as np
import matplotlib.pyplot as plt
import pdb
plt.ion()
plt.axis('off')
plt.axis('equal')
x1, x2, x3 = 0.0, 1.0, 0.5
y1, y2, y3 = 0.0, 0.0, np.sqrt(3)/2
plt.plot([x1, x2], [y1, y2], lw=4, zorder=1)
plt.plot([x2, x3], [y2, y3], lw=4, zorder=1)
plt.plot([x3, x1], [y3, y1], lw=4, zorder=1)
c1 = (89/255, 90/255, 15/255)
c2 = (11/12, 5/12, 12/12)
c3 = (12/(30+2/3), 4/(30+2/3), 27/(30+2/3))
plt.scatter(x1, y1, s=10000, color=c1, alpha=0.5)
plt.scatter(x2, y2, s=10000, color=c2, alpha=0.5)
plt.scatter(x3, y3, s=10000, color=c3, alpha=0.5)
r, theta = 0.4, np.pi/9*3.5
dx = r*np.cos(theta)
dy = r*np.sin(theta)
plt.arrow(-dx, -dy, 2*dx, 2*dy, width=0.05, color='black', length_includes_head=True, zorder=3)
plt.arrow(1+dx, dy, -2*dx, -2*dy, width=0.05, color='black', length_includes_head=True, zorder=3)
plt.text(0.5, np.sqrt(3)/2-r*0.15, "?", fontsize=65, ha='center', va='center', zorder=3)
plt.xlim(-1.0, 2.2)
plt.ylim(-1.5, 2.2)
pdb.set_trace()
plt.savefig("log.pdf")
plt.close()
|
{"hexsha": "307e642eab5311708be92632f9c192c3b1001b20", "size": 1004, "ext": "py", "lang": "Python", "max_stars_repo_path": "docs/logo.py", "max_stars_repo_name": "Quantum-Many-Body/QuantumLattices.jl", "max_stars_repo_head_hexsha": "203034b5281887811028484e24ce5a5ea9556185", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 65, "max_stars_repo_stars_event_min_datetime": "2019-04-08T08:30:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-17T14:16:28.000Z", "max_issues_repo_path": "docs/logo.py", "max_issues_repo_name": "NJU-Physicists/Hamiltonian.jl", "max_issues_repo_head_hexsha": "c795d1e3236a9d11108203e5c255afd7c7d000e2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2019-04-29T05:40:41.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-04T09:13:46.000Z", "max_forks_repo_path": "docs/logo.py", "max_forks_repo_name": "NJU-Physicists/Hamiltonian.jl", "max_forks_repo_head_hexsha": "c795d1e3236a9d11108203e5c255afd7c7d000e2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2019-09-29T14:33:44.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-01T06:04:51.000Z", "avg_line_length": 28.6857142857, "max_line_length": 97, "alphanum_fraction": 0.6205179283, "include": true, "reason": "import numpy", "num_tokens": 472}
|
import numpy as np
from vcnn import vCNN
import os, random
import tensorflow as tf
import argparse
class BatchLoader(object):
def __init__(self, data_dir, batch_size):
self._data = np.load(os.path.join(data_dir, 'data.npy'))
self._labels = np.load(os.path.join(data_dir, 'labels.npy'))
self.batch_size = batch_size
self._batch_iter = 0
# Split data into training, validation and test splits
n_samples = self._data.shape[0]
tr_idx = int(n_samples * 0.8)
val_idx = int(n_samples * 0.9)
self._train_split = list(zip(self._data[:tr_idx], self._labels[:tr_idx]))
self._val_split = list(zip(self._data[tr_idx:val_idx], self._labels[tr_idx:val_idx]))
self._test_split = list(zip(self._data[val_idx:], self._labels[val_idx:]))
del self._data, self._labels
self.n_batches = len(self._train_split) // self.batch_size
def get_batch(self, split):
if split == 'train':
if self._batch_iter == self.n_batches:
self._batch_iter = 1
random.shuffle(self._train_split)
return map(np.stack, zip(*self._train_split[:self.batch_size]))
else:
x,y = map(np.stack, zip(*self._train_split[self._batch_iter*self.batch_size:(self._batch_iter + 1)*self.batch_size]))
self._batch_iter += 1
return x,y
elif split == 'test':
random.shuffle(self._test_split)
return map(np.stack, zip(*self._test_split))
elif split == 'val':
random.shuffle(self._val_split)
return map(np.stack, zip(*self._val_split))
parser = argparse.ArgumentParser()
parser.add_argument('--restore', action='store_true', help='Whether to restore partially trained model')
parser.add_argument('--save_dir', type=str, default='./save', help='Directory where model is saved during/after training')
parser.add_argument('--load_dir', type=str, default='./save', help='Directory where pretrained model is present')
parser.add_argument('--batch_size', type=int, default=50, help='Batch size for training')
parser.add_argument('--lr', type=float, default=1e-3, help='Initial learning rate')
parser.add_argument('--n_epochs', type=int, default=5, help='Number of epochs of training')
parser.add_argument('--data_dir', type=str, default='data', help='Directory where training data is present')
parser.add_argument('--log_dir', type=str, default='logs', help='Directory where training information is logged')
def main():
args = parser.parse_args()
# Create data loader
loader = BatchLoader(data_dir=args.data_dir, batch_size=args.batch_size)
# Create model
model = vCNN()
# Create session and restore graph, variables
with tf.Session(graph=model.graph) as sess:
if args.restore:
saver = tf.train.Saver(max_to_keep=3)
saver.restore(sess, tf.train.latest_checkpoint(args.load_dir))
else:
sess.run(tf.global_variables_initializer())
model.train(sess, batch_loader=loader, save_dir=os.path.join(args.save_dir, 'model'), learning_rate=args.lr, n_epochs=args.n_epochs, log_dir=args.log_dir)
print("Model trained.")
x_test, y_test = loader.get_batch(split='test')
print("Test accuracy %.4f" % model.test(sess, x_test, y_test, batch_size=model.batch_loader.batch_size))
if __name__ == '__main__':
main()
|
{"hexsha": "920510aa48471a840b5fda3275f7591836f84a21", "size": 3449, "ext": "py", "lang": "Python", "max_stars_repo_path": "shape_recognition/train.py", "max_stars_repo_name": "ys1998/tactile-shape-recognition", "max_stars_repo_head_hexsha": "b5ab6f1cdf04ff23e14b467a590533e7ee740b52", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "shape_recognition/train.py", "max_issues_repo_name": "ys1998/tactile-shape-recognition", "max_issues_repo_head_hexsha": "b5ab6f1cdf04ff23e14b467a590533e7ee740b52", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "shape_recognition/train.py", "max_forks_repo_name": "ys1998/tactile-shape-recognition", "max_forks_repo_head_hexsha": "b5ab6f1cdf04ff23e14b467a590533e7ee740b52", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.9027777778, "max_line_length": 162, "alphanum_fraction": 0.6688895332, "include": true, "reason": "import numpy", "num_tokens": 797}
|
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
from scipy import stats
from scipy.signal import periodogram
from statsmodels.api import add_constant, OLS
from statsmodels.tsa.seasonal import STL
from statsmodels.tsa.stattools import acf, pacf
from statsmodels.tsa.holtwinters import ExponentialSmoothing
from sklearn.decomposition import PCA
def scale(x):
"""
Z-Score.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
scaled = (x - np.nanmean(x)) / np.nanstd(x, ddof=1)
return scaled
def fft_infer_period(x):
"""Fourier inference period.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
References
----------
1. https://github.com/xuawai/AutoPeriod/blob/master/auto_period.ipynb
"""
try:
if isinstance(x, pd.DataFrame):
x = x.values.reshape(-1, )
ft = np.fft.rfft(x)
freqs = np.fft.rfftfreq(len(x), 1)
mags = abs(ft)
inflection = np.diff(np.sign(np.diff(mags)))
peaks = (inflection < 0).nonzero()[0] + 1
peak = peaks[mags[peaks].argmax()]
signal_freq = freqs[peak]
period = int(1 / signal_freq)
except:
period = 2
return {'period': period}
def freq_to_numerical(x, timestamp, freq_mapping_dict=None):
"""Calculates frequency.
Parameters
----------
x: pd.DataFrame, the timestamp.
timestamp: str, timestamp name of x.
freq_mapping_dict, dict, default {'H': 24, 'D': 7, 'W': 54, 'M': 12,
'Q': 4, 'Y': 1, 'A': 1, 'S': 60, 'T': 60}.
"""
x[timestamp] = pd.to_datetime(x[timestamp])
x = x.sort_values([timestamp])
dateindex = pd.DatetimeIndex(x[timestamp])
sfreq = pd.infer_freq(dateindex)
if sfreq is None:
for i in range(len(x)):
sfreq = pd.infer_freq(dateindex[i:i + 3])
if sfreq != None:
break
if freq_mapping_dict is None:
freq_mapping_dict = {
'H': 24,
'D': 7,
'W': 54,
'M': 12,
'Q': 4,
'Y': 1,
'A': 1,
'T': 60,
'S': 60}
nfreq = freq_mapping_dict.get(sfreq[0], np.nan)
return {'nfreq': nfreq}, sfreq
def statistics(x, period: int = 1):
"""
Calculates statistics features, including length, count,
mean, var, min, max, median, range, hmean, iqr.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
period: int, the seasonal of the time series.
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
x_len = x.shape[0]
x_col = x.shape[1]
x_mean = np.nanmean(x, axis=0)
x_var = np.nanvar(x, ddof=1, axis=0)
x_min = np.nanmin(x, axis=0)
x_max = np.nanmax(x, axis=0)
x_median = np.nanmedian(x, axis=0)
x_range = np.ptp(x, axis=0)
x_iqr = stats.iqr(x, axis=0)
stat_features = {
'length_series': x_len,
'count_series': x_col,
'mean.mean_series': np.nanmean(x_mean),
'std.mean_series': np.nanstd(x_mean, ddof=1),
'mean.var_series': np.nanmean(x_var),
'std.var_series': np.nanstd(x_var, ddof=1),
'mean.min_series': np.nanmean(x_min),
'std.min_series': np.nanstd(x_min, ddof=1),
'mean.max_series': np.nanmean(x_max),
'std.max_series': np.nanstd(x_max, ddof=1),
'mean.median_series': np.nanmean(x_median),
'std.median_series': np.nanstd(x_median, ddof=1),
'mean.range_series': np.nanmean(x_range),
'std.range_series': np.nanstd(x_range, ddof=1),
'mean.iqr_series': np.nanmean(x_iqr),
'std.iqr_series': np.nanstd(x_iqr, ddof=1)
}
return stat_features
def acf_features(x, period: int = 1):
"""
Calculates autocorrelation function features.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
period: int, the seasonal of the time series.
References
----------
1. T.S. Talagala, et al., Meta-learning how to forecast time series, 2018.
2. https://github.com/robjhyndman/tsfeatures (R code)
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
x_len = len(x)
y_acf_list = acf(x, nlags=max(period, 10), fft=False)
if x_len > 10:
y_acf_diff1_list = acf(np.diff(x, n=1), nlags=10, fft=False)
else:
y_acf_diff1_list = [np.nan] * 2
if x_len > 11:
y_acf_diff2_list = acf(np.diff(x, n=2), nlags=10, fft=False)
else:
y_acf_diff2_list = [np.nan] * 2
y_acf1 = y_acf_list[1]
y_acf10 = np.nansum((y_acf_list[1:11]) ** 2) if x_len > 10 else np.nan
diff1y_acf1 = y_acf_diff1_list[1]
diff1y_acf10 = np.nansum((y_acf_diff1_list[1:11]) ** 2) if x_len > 10 else np.nan
diff2y_acf1 = y_acf_diff2_list[1]
diff2y_acf10 = np.nansum((y_acf_diff2_list[1:11]) ** 2) if x_len > 11 else np.nan
seas_acf1 = y_acf_list[period] if len(y_acf_list) > period else np.nan
acf_features = {
'y_acf1': y_acf1,
'y_acf10': y_acf10,
'diff1y_acf1': diff1y_acf1,
'diff1y_acf10': diff1y_acf10,
'diff2y_acf1': diff2y_acf1,
'diff2y_acf10': diff2y_acf10,
'seas_acf1': seas_acf1
}
return acf_features
def pacf_features(x, period: int = 1):
"""
Calculates partial autocorrelation function features.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
period: int, the seasonal of the time series.
References
----------
1. T.S. Talagala, et al., Meta-learning how to forecast time series, 2018.
2. https://github.com/robjhyndman/tsfeatures (R code)
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
x_len = len(x)
try:
y_pacf_list = pacf(x, nlags=max(period, 5), method='ldb')
except:
y_pacf_list = np.nan
if x_len > 5:
try:
y_pacf5 = np.nansum(y_pacf_list[1:6] ** 2)
except:
y_pacf5 = np.nan
else:
y_pacf5 = np.nan
if x_len > 6:
try:
diff1y_pacf_list = pacf(np.diff(x, n=1), nlags=5, method='ldb')
diff1y_pacf5 = np.nansum(diff1y_pacf_list[1:6] ** 2)
except:
diff1y_pacf5 = np.nan
else:
diff1y_pacf5 = np.nan
if x_len > 7:
try:
diff2_pacf_list = pacf(np.diff(x, n=2), nlags=5, method='ldb')
diff2y_pacf5 = np.nansum(diff2_pacf_list[1:6] ** 2)
except:
diff2y_pacf5 = np.nan
else:
diff2y_pacf5 = np.nan
try:
seas_pacf1 = y_pacf_list[period]
except:
seas_pacf1 = np.nan
pacf_features = {
'y_pacf5': y_pacf5,
'diff1y_pacf5': diff1y_pacf5,
'diff2y_pacf5': diff2y_pacf5,
'seas_pacf1': seas_pacf1
}
return pacf_features
def crossing_points(x, period: int = 1):
"""
Crossing points.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
period: int, the seasonal of the time series.
References
----------
1. T.S. Talagala, et al., Meta-learning how to forecast time series, 2018.
2. https://github.com/robjhyndman/tsfeatures (R code)
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
midline = np.median(x)
ab = x <= midline
p1 = ab[:(len(x) - 1)]
p2 = ab[1:]
cps = (p1 & (~p2)) | (p2 & (~p1))
return {'crossing_points': cps.sum()}
def stability(x, period: int = 10):
"""
Calculate the stability of time series.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
period: int, the seasonal of the time series.
References
----------
1. T.S. Talagala, et al., Meta-learning how to forecast time series, 2018.
2. https://github.com/robjhyndman/tsfeatures (R code)
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
width = period if period > 1 else 10
try:
meanx = [np.nanmean(x_w) for x_w in np.array_split(x, len(x) // width + 1)]
stability = np.nanvar(meanx, ddof=1)
except:
stability = np.nan
return {'stability': stability}
def lumpiness(x, period: int = 10):
"""
Calculating the lumpiness of time series.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
period: int, the seasonal of the time series.
References
----------
1. T.S. Talagala, et al., Meta-learning how to forecast time series, 2018.
2. https://github.com/robjhyndman/tsfeatures (R code)
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
width = period if period > 1 else 10
try:
varx = [np.nanvar(x_w, ddof=1) for x_w in np.array_split(x, len(x) // width + 1)]
lumpiness = np.nanmean(varx)
except:
lumpiness = np.nan
return {'lumpiness': lumpiness}
def entropy(x, period: int = 1):
"""
Calculates spectral entropy.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
period: int, the seasonal of the time series.
References
----------
1. T.S. Talagala, et al., Meta-learning how to forecast time series, 2018.
2. https://github.com/robjhyndman/tsfeatures (R code)
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
_, psd = periodogram(x, period)
psd_norm = psd / np.nansum(psd)
entropy = np.nansum(psd_norm * np.log2(psd_norm))
spectral_entropy = -(entropy / np.log2(len(psd_norm)))
return {'spectral_entropy': spectral_entropy}
def hurst(x, period: int = 30):
"""
Calculates hurst exponet.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
period: int, the seasonal of the time series.
References
----------
1. T.S. Talagala, et al., Meta-learning how to forecast time series, 2018.
2. https://github.com/robjhyndman/tsfeatures (R code)
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
x_len = len(x)
try:
lags = range(2, min(period, x_len - 1))
tau = [np.nanstd(x[lag:] - x[:-lag]) for lag in lags]
poly = np.polyfit(np.log(lags), np.log(tau), 1)
hurst = poly[0] if not np.isnan(poly[0]) else np.nan
except:
hurst = np.nan
return {'hurst_exponet': hurst}
def stl_features(x, period: int = 1):
"""
Calculates the strength of trend and seasonality, spikiness, linearity,
curvature, peak, trough, e_acf1.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
period: int, the seasonal of the time series.
References
----------
1. T.S. Talagala, et al., Meta-learning how to forecast time series, 2018.
2. https://github.com/robjhyndman/tsfeatures (R code)
3. https://github.com/Nixtla/tsfeatures/blob/master/tsfeatures (Python code)
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
x_len = len(x)
try:
stl = STL(x, period=period).fit()
trend_ = stl.trend
seasonal_ = stl.seasonal
remainder_ = stl.resid
re_var = np.nanvar(remainder_, ddof=1)
re_mean = np.nanmean(remainder_)
trend_strength = 1 - re_var / np.nanvar(trend_ + remainder_, ddof=1)
seasonal_strength = 1 - re_var / np.nanvar(seasonal_ + remainder_, ddof=1)
d = (remainder_ - re_mean) ** 2
varloo = (re_var * (x_len - 1) - d) / (x_len - 2)
spikiness = np.nanvar(varloo, ddof=1)
time = np.arange(x_len) + 1
poly_m = np.transpose(np.vstack(list((time ** k for k in range(3)))))
poly_m = np.linalg.qr(poly_m)[0][:, 1:]
time_x = add_constant(poly_m)
coefs = OLS(trend_, time_x).fit().params
linearity = coefs[1]
curvature = -coefs[2]
peak = (np.argmax(seasonal_) + 1) % period
peak = period if peak == 0 else peak
trough = (np.argmin(seasonal_) + 1) % period
trough = period if trough == 0 else trough
acfremainder = acf_features(remainder_, period)
e_acf1 = acfremainder['y_acf1']
except:
return {
'trend_strength': np.nan,
'seasonal_strength': np.nan,
'spikiness': np.nan,
'linearity': np.nan,
'curvature': np.nan,
'peak': np.nan,
'trough': np.nan,
'e_acf1': np.nan
}
return {
'trend_strength': trend_strength,
'seasonal_strength': seasonal_strength,
'spikiness': spikiness,
'linearity': linearity,
'curvature': curvature,
'peak': peak,
'trough': trough,
'e_acf1': e_acf1
}
def holt_parameters(x, period: int = 1):
"""
Calculates the parameters of a Holt model.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
period: int, the seasonal of the time series.
References
----------
1. T.S. Talagala, et al., Meta-learning how to forecast time series, 2018.
2. https://github.com/robjhyndman/tsfeatures (R code)
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
try:
holt = ExponentialSmoothing(x, trend='add').fit()
alpha = holt.params['smoothing_level']
beta = holt.params['smoothing_trend']
except:
return {'alpha': np.nan,
'beta': np.nan
}
return {'alpha': alpha,
'beta': beta
}
def hw_parameters(x: np.ndarray, period: int = 1):
"""
Calculates the parameters of a Holt-Winters model.
Parameters
----------
x: np.array or pd.DataFrame, the time series.
period: int, the seasonal of the time series.
References
----------
1. T.S. Talagala, et al., Meta-learning how to forecast time series, 2018.
2. https://github.com/robjhyndman/tsfeatures (R code)
"""
if not isinstance(x, np.ndarray):
x = np.array(x)
try:
hw = ExponentialSmoothing(
endog=x,
seasonal_periods=period,
seasonal='add',
trend='add').fit()
hw_alpha = hw.params['smoothing_level']
hw_beta = hw.params['smoothing_trend']
hw_gamma = hw.params['smoothing_seasonal']
except:
return {'hw_alpha': np.nan,
'hw_beta': np.nan,
'hw_gamma': np.nan
}
return {'hw_alpha': hw_alpha,
'hw_beta': hw_beta,
'hw_gamma': hw_gamma
}
ts_metafeatures_list = {
'acf_features': acf_features,
'pacf_features': pacf_features,
'crossing_points': crossing_points,
'stability': stability,
'lumpiness': lumpiness,
'entropy': entropy,
'hurst': hurst,
'stl_features': stl_features,
'holt_parameters': holt_parameters,
'hw_parameters': hw_parameters
}
def metafeatures_from_timeseries(
x,
timestamp,
period=None,
scale_ts=True,
freq_mapping_dict=None,
features_list=None):
"""
Extracting the meta-features of time series.
Parameters
----------
x: pd.DataFrame, the time series.
timestamp: str, timestamp name of x.
period: int or None, the seasonal of the time series, default None.
scale_ts: bool, whether scale original time series.
freq_mapping_dict, dict, default {'H': 24, 'D': 7, 'W': 54, 'M': 12,
'Q': 4, 'Y': 1, 'A': 1, 'S': 60, 'T': 60}.
features_list, List[str], default ['simple', 'all'].
"""
metafeatures_dict = {}
if not isinstance(x, pd.DataFrame):
raise ValueError('x should be a DataFrame')
if features_list is None:
features_list = ['simple', 'all']
nfreq, sfreq = freq_to_numerical(x, timestamp, freq_mapping_dict)
metafeatures_dict.update(nfreq)
x = x.drop(columns=[timestamp])
if period is None:
val_cols = x.columns.to_list()
periods = [fft_infer_period(x[col])['period'] for col in val_cols]
period = int(np.argmax(np.bincount(periods)))
metafeatures_dict['period'] = period
if period > 365:
if isinstance(nfreq['nfreq'], int):
period = nfreq['nfreq']
elif isinstance(sfreq[0], int):
period = sfreq[0]
else:
period = 365
x = np.array(x)
if scale_ts:
x = scale(x)
if 'simple' in features_list:
features_list.remove('simple')
stat_mfs = statistics(x)
metafeatures_dict.update(stat_mfs)
if 'all' in features_list:
metafeatures_list = ts_metafeatures_list.keys()
else:
metafeatures_list = features_list
if len(x.shape) == 2 and x.shape[1] != 1:
x = PCA(n_components=1).fit_transform(x)
if len(x.shape) != 1:
x = x.reshape(-1, )
for mf in metafeatures_list:
if ts_metafeatures_list.get(mf) is not None:
feature = ts_metafeatures_list[mf](x, period)
metafeatures_dict.update(feature)
metafeatures = pd.DataFrame(metafeatures_dict, index=[0])
metafeatures.fillna(0, inplace=True)
return metafeatures
|
{"hexsha": "a333afcefecb52113ca1a1be48f891f35f50c63f", "size": 17227, "ext": "py", "lang": "Python", "max_stars_repo_path": "hyperts/framework/meta_learning/tsfeatures.py", "max_stars_repo_name": "DataCanvasIO/HyperTS", "max_stars_repo_head_hexsha": "b2560daf8f08748691e7bd9efd14cc5f881186a3", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2021-11-15T08:07:54.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-27T18:21:20.000Z", "max_issues_repo_path": "hyperts/framework/meta_learning/tsfeatures.py", "max_issues_repo_name": "DataCanvasIO/HyperTS", "max_issues_repo_head_hexsha": "b2560daf8f08748691e7bd9efd14cc5f881186a3", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 34, "max_issues_repo_issues_event_min_datetime": "2021-11-26T01:34:49.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T06:28:39.000Z", "max_forks_repo_path": "hyperts/framework/meta_learning/tsfeatures.py", "max_forks_repo_name": "DataCanvasIO/HyperTS", "max_forks_repo_head_hexsha": "b2560daf8f08748691e7bd9efd14cc5f881186a3", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-11-15T08:07:57.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T07:35:49.000Z", "avg_line_length": 27.0864779874, "max_line_length": 89, "alphanum_fraction": 0.5821675277, "include": true, "reason": "import numpy,from scipy,from statsmodels", "num_tokens": 4953}
|
# --- Do not remove these libs ---
from freqtrade.strategy.interface import IStrategy
from pandas import DataFrame, Series
import talib.abstract as ta
import freqtrade.vendor.qtpylib.indicators as qtpylib
import numpy as np
from freqtrade.strategy import DecimalParameter, IntParameter
from datetime import datetime, timedelta
from functools import reduce
# --------------------------------
def EWO(dataframe, ema_length=5, ema2_length=35):
df = dataframe.copy()
ema1 = ta.EMA(df, timeperiod=ema_length)
ema2 = ta.EMA(df, timeperiod=ema2_length)
emadif = (ema1 - ema2) / df['close'] * 100
return emadif
# Volume Weighted Moving Average
def vwma(dataframe: DataFrame, length: int = 10):
"""Indicator: Volume Weighted Moving Average (VWMA)"""
# Calculate Result
pv = dataframe['close'] * dataframe['volume']
vwma = Series(ta.SMA(pv, timeperiod=length) / ta.SMA(dataframe['volume'], timeperiod=length))
return vwma
# Modified Elder Ray Index
def moderi(dataframe: DataFrame, len_slow_ma: int = 32) -> Series:
slow_ma = Series(ta.EMA(vwma(dataframe, length=len_slow_ma), timeperiod=len_slow_ma))
return slow_ma >= slow_ma.shift(1) # we just need true & false for ERI trend
class BBRSITV(IStrategy):
INTERFACE_VERSION = 2
# Buy hyperspace params:
buy_params = {
"ewo_high": 4.86,
"for_ma_length": 22,
"for_sigma": 1.74,
}
# Sell hyperspace params:
sell_params = {
"for_ma_length_sell": 65,
"for_sigma_sell": 1.895,
"rsi_high": 72,
}
# ROI table: # value loaded from strategy
minimal_roi = {
"0": 0.1
}
# Stoploss:
stoploss = -0.25 # value loaded from strategy
# Trailing stop:
trailing_stop = False # value loaded from strategy
trailing_stop_positive = 0.005 # value loaded from strategy
trailing_stop_positive_offset = 0.025 # value loaded from strategy
trailing_only_offset_is_reached = True # value loaded from strategy
# Sell signal
use_sell_signal = True
sell_profit_only = False
sell_profit_offset = 0.01
ignore_roi_if_buy_signal = False
process_only_new_candles = True
startup_candle_count = 30
protections = [
# {
# "method": "StoplossGuard",
# "lookback_period_candles": 12,
# "trade_limit": 1,
# "stop_duration_candles": 6,
# "only_per_pair": True
# },
# {
# "method": "StoplossGuard",
# "lookback_period_candles": 12,
# "trade_limit": 2,
# "stop_duration_candles": 6,
# "only_per_pair": False
# },
{
"method": "LowProfitPairs",
"lookback_period_candles": 60,
"trade_limit": 1,
"stop_duration": 60,
"required_profit": -0.05
},
{
"method": "MaxDrawdown",
"lookback_period_candles": 24,
"trade_limit": 1,
"stop_duration_candles": 12,
"max_allowed_drawdown": 0.2
},
]
ewo_high = DecimalParameter(0, 7.0, default=buy_params['ewo_high'], space='buy', optimize=True)
for_sigma = DecimalParameter(0, 10.0, default=buy_params['for_sigma'], space='buy', optimize=True)
for_sigma_sell = DecimalParameter(0, 10.0, default=sell_params['for_sigma_sell'], space='sell', optimize=True)
rsi_high = IntParameter(60, 100, default=sell_params['rsi_high'], space='sell', optimize=True)
for_ma_length = IntParameter(5, 80, default=buy_params['for_ma_length'], space='buy', optimize=True)
for_ma_length_sell = IntParameter(5, 80, default=sell_params['for_ma_length_sell'], space='sell', optimize=True)
# Optimal timeframe for the strategy
timeframe = '5m'
# Protection
fast_ewo = 50
slow_ewo = 200
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
# //@version=3
# study(" RSI + BB (EMA) + Dispersion (2.0)", overlay=false)
#
# // Инициализация параметров
# src = input(title="Source", type=source, defval=close) // Устанавливаем тип цены для расчетов
src = 'close'
# for_rsi = input(title="RSI_period", type=integer, defval=14) // Период для RSI
for_rsi = 14
# for_ma = input(title="Basis_BB", type=integer, defval=20) // Период для MA внутри BB
# for_ma = 20
# for_mult = input(title="Stdev", type=integer, defval=2, minval=1, maxval=5) // Число стандартных отклонений для BB
for_mult = 2
# for_sigma = input(title="Dispersion", type=float, defval=0.1, minval=0.01, maxval=1) // Дисперсия вокруг MA
for_sigma = 0.1
#
# // Условия работы скрипта
# current_rsi = rsi(src, for_rsi) // Текущее положение индикатора RSI
dataframe['rsi'] = ta.RSI(dataframe[src], for_rsi)
dataframe['rsi_4'] = ta.RSI(dataframe[src], 4)
if self.config['runmode'].value == 'hyperopt':
for for_ma in range(5, 81):
# basis = ema(current_rsi, for_ma)
dataframe[f'basis_{for_ma}'] = ta.EMA(dataframe['rsi'], for_ma)
# dev = for_mult * stdev(current_rsi, for_ma)
dataframe[f'dev_{for_ma}'] = ta.STDDEV(dataframe['rsi'], for_ma)
# upper = basis + dev
#dataframe[f'upper_{for_ma}'] = (dataframe[f'basis_{for_ma}'] + (dataframe[f'dev_{for_ma}'] * for_mult))
# lower = basis - dev
#dataframe[f'lower_{for_ma}'] = dataframe[f'basis_{for_ma}'] - (dataframe[f'dev_{for_ma}'] * for_mult)
# disp_up = basis + ((upper - lower) * for_sigma) // Минимально-допустимый порог в области мувинга, который должен преодолеть RSI (сверху)
# dataframe[f'disp_up_{for_ma}'] = dataframe[f'basis_{for_ma}'] + ((dataframe[f'upper_{for_ma}'] - dataframe[f'lower_{for_ma}']) * for_sigma)
# disp_down = basis - ((upper - lower) * for_sigma) // Минимально-допустимый порог в области мувинга, который должен преодолеть RSI (снизу)
# dataframe[f'disp_down_{for_ma}'] = dataframe[f'basis_{for_ma}'] - ((dataframe[f'upper_{for_ma}'] - dataframe[f'lower_{for_ma}']) * for_sigma)
# color_rsi = current_rsi >= disp_up ? lime : current_rsi <= disp_down ? red : #ffea00 // Текущий цвет RSI, в зависимости от его местоположения внутри BB
else:
dataframe[f'basis_{self.for_ma_length.value}'] = ta.EMA(dataframe['rsi'], self.for_ma_length.value)
dataframe[f'basis_{self.for_ma_length_sell.value}'] = ta.EMA(dataframe['rsi'], self.for_ma_length_sell.value)
# dev = for_mult * stdev(current_rsi, for_ma)
dataframe[f'dev_{self.for_ma_length.value}'] = ta.STDDEV(dataframe['rsi'], self.for_ma_length.value)
dataframe[f'dev_{self.for_ma_length_sell.value}'] = ta.STDDEV(dataframe['rsi'], self.for_ma_length_sell.value)
#
# // Дополнительные линии и заливка для областей для RSI
# h1 = hline(70, color=#d4d4d4, linestyle=dotted, linewidth=1)
h1 = 70
# h2 = hline(30, color=#d4d4d4, linestyle=dotted, linewidth=1)
h2 = 30
# fill (h1, h2, transp=95)
#
# // Алерты и условия срабатывания
# rsi_Green = crossover(current_rsi, disp_up)
# rsi_Red = crossunder(current_rsi, disp_down)
# alertcondition(condition=rsi_Green,
# title="RSI cross Above Dispersion Area",
# message="The RSI line closing crossed above the Dispersion area.")
#
# alertcondition(condition=rsi_Red,
# title="RSI cross Under Dispersion Area",
# message="The RSI line closing crossed below the Dispersion area")
#
# // Результаты и покраска
# plot(basis, color=black)
# plot(upper, color=#00fff0, linewidth=2)
# plot(lower, color=#00fff0, linewidth=2)
# s1 = plot(disp_up, color=white)
# s2 = plot(disp_down, color=white)
# fill(s1, s2, color=white, transp=80)
# plot(current_rsi, color=color_rsi, linewidth=2)
dataframe['EWO'] = EWO(dataframe, self.fast_ewo, self.slow_ewo)
return dataframe
def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe.loc[
(
# upper = basis + dev
# lower = basis - dev
# disp_up = basis + ((upper - lower) * for_sigma) // Минимально-допустимый порог в области мувинга, который должен преодолеть RSI (сверху)
# disp_up = basis + ((basis + dev * for_mult) - (basis - dev * for_mult)) * for_sigma) // Минимально-допустимый порог в области мувинга, который должен преодолеть RSI (сверху)
# disp_up = basis + (basis + dev * for_mult - basis + dev * for_mult)) * for_sigma) // Минимально-допустимый порог в области мувинга, который должен преодолеть RSI (сверху)
# disp_up = basis + (2 * dev * for_sigma * for_mult) // Минимально-допустимый порог в области мувинга, который должен преодолеть RSI (сверху)
(dataframe['rsi'] < (dataframe[f'basis_{self.for_ma_length.value}'] - (dataframe[f'dev_{self.for_ma_length.value}'] * self.for_sigma.value))) &
(dataframe['EWO'] > self.ewo_high.value) &
(dataframe['volume'] > 0)
),
'buy'] = 1
return dataframe
def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe.loc[
(
(
(dataframe['rsi'] > self.rsi_high.value) |
# upper = basis + dev
# lower = basis - dev
# disp_down = basis - ((upper - lower) * for_sigma) // Минимально-допустимый порог в области мувинга, который должен преодолеть RSI (снизу)
# disp_down = basis - ((2* dev * for_sigma) // Минимально-допустимый порог в области мувинга, который должен преодолеть RSI (снизу)
(dataframe['rsi'] > dataframe[f'basis_{self.for_ma_length_sell.value}'] + ((dataframe[f'dev_{self.for_ma_length_sell.value}'] * self.for_sigma_sell.value)))
) &
(dataframe['volume'] > 0)
),
'sell'] = 1
return dataframe
class BBRSITV4(BBRSITV):
minimal_roi = {
"0": 0.07
}
ignore_roi_if_buy_signal = True
startup_candle_count = 400
stoploss = -0.3 # value loaded from strategy
def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe.loc[
(
(dataframe['rsi'] < (dataframe[f'basis_{self.for_ma_length.value}'] - (dataframe[f'dev_{self.for_ma_length.value}'] * self.for_sigma.value)))
&
(
(
(dataframe['EWO'] > self.ewo_high.value)
&
(dataframe['EWO'] < 10)
)
|
(
(dataframe['EWO'] >= 10)
&
(dataframe['rsi'] < 40)
)
)
&
(dataframe['rsi_4'] < 25)
&
(dataframe['volume'] > 0)
# &
# (dataframe["roc_bbwidth_max"] < 70)
),
'buy'] = 1
return dataframe
class BBRSITV1(BBRSITV):
"""
2021-07-01 00:00:00 -> 2021-09-28 00:00:00 | Max open trades : 4
============================================================================= STRATEGY SUMMARY =============================================================================
| Strategy | Buys | Avg Profit % | Cum Profit % | Tot Profit USDT | Tot Profit % | Avg Duration | Win Draw Loss Win% | Drawdown |
|-----------------------+--------+----------------+----------------+-------------------+----------------+----------------+-------------------------+-----------------------|
| Elliotv8_08SL | 906 | 0.92 | 832.19 | 19770.304 | 659.01 | 0:38:00 | 717 0 189 79.1 | 2020.917 USDT 79.84% |
| SMAOffsetProtectOptV1 | 417 | 1.33 | 555.91 | 8423.809 | 280.79 | 1:44:00 | 300 0 117 71.9 | 1056.072 USDT 61.08% |
| BBRSITV | 309 | 1.10 | 340.17 | 3869.800 | 128.99 | 2:53:00 | 223 0 86 72.2 | 261.984 USDT 25.84% |
============================================================================================================================================================================
"""
INTERFACE_VERSION = 2
# Buy hyperspace params:
buy_params = {
"ewo_high": 4.964,
"for_ma_length": 12,
"for_sigma": 2.313,
}
# Sell hyperspace params:
sell_params = {
"for_ma_length_sell": 78,
"for_sigma_sell": 1.67,
"rsi_high": 60,
}
# ROI table: # value loaded from strategy
minimal_roi = {
"0": 0.1
}
# Stoploss:
stoploss = -0.25 # value loaded from strategy
# Trailing stop:
trailing_stop = False # value loaded from strategy
trailing_stop_positive = 0.005 # value loaded from strategy
trailing_stop_positive_offset = 0.025 # value loaded from strategy
trailing_only_offset_is_reached = True # value loaded from strategy
class BBRSITV2(BBRSITV):
"""
2021-07-01 00:00:00 -> 2021-09-28 00:00:00 | Max open trades : 4
============================================================================= STRATEGY SUMMARY =============================================================================
| Strategy | Buys | Avg Profit % | Cum Profit % | Tot Profit USDT | Tot Profit % | Avg Duration | Win Draw Loss Win% | Drawdown |
|-----------------------+--------+----------------+----------------+-------------------+----------------+----------------+-------------------------+-----------------------|
| Elliotv8_08SL | 906 | 0.92 | 832.19 | 19770.304 | 659.01 | 0:38:00 | 717 0 189 79.1 | 2020.917 USDT 79.84% |
| SMAOffsetProtectOptV1 | 417 | 1.33 | 555.91 | 8423.809 | 280.79 | 1:44:00 | 300 0 117 71.9 | 1056.072 USDT 61.08% |
| BBRSITV | 486 | 1.11 | 537.58 | 7689.862 | 256.33 | 5:01:00 | 287 0 199 59.1 | 1279.461 USDT 75.45% |
============================================================================================================================================================================
"""
# Buy hyperspace params:
buy_params = {
"ewo_high": 4.85,
"for_ma_length": 11,
"for_sigma": 2.066,
}
# Sell hyperspace params:
sell_params = {
"for_ma_length_sell": 61,
"for_sigma_sell": 1.612,
"rsi_high": 87,
}
# ROI table: # value loaded from strategy
minimal_roi = {
"0": 0.1
}
# Stoploss:
stoploss = -0.25 # value loaded from strategy
# Trailing stop:
trailing_stop = False # value loaded from strategy
trailing_stop_positive = 0.005 # value loaded from strategy
trailing_stop_positive_offset = 0.025 # value loaded from strategy
trailing_only_offset_is_reached = True # value loaded from strategy
class BBRSITV3(BBRSITV):
"""
2021-07-01 00:00:00 -> 2021-09-28 00:00:00 | Max open trades : 4
============================================================================== STRATEGY SUMMARY =============================================================================
| Strategy | Buys | Avg Profit % | Cum Profit % | Tot Profit USDT | Tot Profit % | Avg Duration | Win Draw Loss Win% | Drawdown |
|-----------------------+--------+----------------+----------------+-------------------+----------------+----------------+-------------------------+------------------------|
| Elliotv8_08SL | 906 | 0.92 | 832.19 | 19770.304 | 659.01 | 0:38:00 | 717 0 189 79.1 | 2020.917 USDT 79.84% |
| SMAOffsetProtectOptV1 | 417 | 1.33 | 555.91 | 8423.809 | 280.79 | 1:44:00 | 300 0 117 71.9 | 1056.072 USDT 61.08% |
| BBRSITV | 627 | 1.14 | 715.85 | 12998.605 | 433.29 | 5:35:00 | 374 0 253 59.6 | 2294.408 USDT 100.60% |
============================================================================================================================================================================="""
INTERFACE_VERSION = 2
# Buy hyperspace params:
buy_params = {
"ewo_high": 4.86,
"for_ma_length": 22,
"for_sigma": 1.74,
}
# Sell hyperspace params:
sell_params = {
"for_ma_length_sell": 65,
"for_sigma_sell": 1.895,
"rsi_high": 72,
}
# ROI table: # value loaded from strategy
minimal_roi = {
"0": 0.1
}
# Stoploss:
stoploss = -0.25 # value loaded from strategy
# Trailing stop:
trailing_stop = True
trailing_stop_positive = 0.078
trailing_stop_positive_offset = 0.095
trailing_only_offset_is_reached = False
|
{"hexsha": "f28db6dfe82ba3fd116bdfb90fa9b83aaf09c580", "size": 17715, "ext": "py", "lang": "Python", "max_stars_repo_path": "BBRSITV.py", "max_stars_repo_name": "ketzah/freqtrade-stuff", "max_stars_repo_head_hexsha": "786f1e7cdea930dc7bfc4fe926e8a9629514e27d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 60, "max_stars_repo_stars_event_min_datetime": "2021-11-06T01:10:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T03:14:35.000Z", "max_issues_repo_path": "BBRSITV.py", "max_issues_repo_name": "ketzah/freqtrade-stuff", "max_issues_repo_head_hexsha": "786f1e7cdea930dc7bfc4fe926e8a9629514e27d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-18T10:43:24.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-19T19:58:31.000Z", "max_forks_repo_path": "BBRSITV.py", "max_forks_repo_name": "ketzah/freqtrade-stuff", "max_forks_repo_head_hexsha": "786f1e7cdea930dc7bfc4fe926e8a9629514e27d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 16, "max_forks_repo_forks_event_min_datetime": "2021-10-10T18:37:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-09T15:25:59.000Z", "avg_line_length": 46.1328125, "max_line_length": 191, "alphanum_fraction": 0.5186565058, "include": true, "reason": "import numpy", "num_tokens": 4979}
|
# %matplotlib notebook
import pandas as pd
import numpy as np
import seaborn as sns
sns.set(color_codes=True)
from sklearn import preprocessing
# from imblearn.over_sampling import SMOTE
from imblearn.over_sampling import SMOTE,RandomOverSampler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score,confusion_matrix
import matplotlib.pyplot as plt
import scipy
from sklearn.decomposition import PCA
from scipy.stats import pearsonr
import os
# import boto3
import time
from scipy import stats
from bokeh.events import Tap
from bokeh.models.widgets import PreText, Select,TextInput
from sklearn.preprocessing import MinMaxScaler
# import utils.visualization
blackListFeatures=[
'Nuclei_Correlation_Manders_AGP_DNA',
'Nuclei_Correlation_Manders_AGP_ER',
'Nuclei_Correlation_Manders_AGP_Mito',
'Nuclei_Correlation_Manders_AGP_RNA',
'Nuclei_Correlation_Manders_DNA_AGP',
'Nuclei_Correlation_Manders_DNA_ER',
'Nuclei_Correlation_Manders_DNA_Mito',
'Nuclei_Correlation_Manders_DNA_RNA',
'Nuclei_Correlation_Manders_ER_AGP',
'Nuclei_Correlation_Manders_ER_DNA',
'Nuclei_Correlation_Manders_ER_Mito',
'Nuclei_Correlation_Manders_ER_RNA',
'Nuclei_Correlation_Manders_Mito_AGP',
'Nuclei_Correlation_Manders_Mito_DNA',
'Nuclei_Correlation_Manders_Mito_ER',
'Nuclei_Correlation_Manders_Mito_RNA',
'Nuclei_Correlation_Manders_RNA_AGP',
'Nuclei_Correlation_Manders_RNA_DNA',
'Nuclei_Correlation_Manders_RNA_ER',
'Nuclei_Correlation_Manders_RNA_Mito',
'Nuclei_Correlation_RWC_AGP_DNA',
'Nuclei_Correlation_RWC_AGP_ER',
'Nuclei_Correlation_RWC_AGP_Mito',
'Nuclei_Correlation_RWC_AGP_RNA',
'Nuclei_Correlation_RWC_DNA_AGP',
'Nuclei_Correlation_RWC_DNA_ER',
'Nuclei_Correlation_RWC_DNA_Mito',
'Nuclei_Correlation_RWC_DNA_RNA',
'Nuclei_Correlation_RWC_ER_AGP',
'Nuclei_Correlation_RWC_ER_DNA',
'Nuclei_Correlation_RWC_ER_Mito',
'Nuclei_Correlation_RWC_ER_RNA',
'Nuclei_Correlation_RWC_Mito_AGP',
'Nuclei_Correlation_RWC_Mito_DNA',
'Nuclei_Correlation_RWC_Mito_ER',
'Nuclei_Correlation_RWC_Mito_RNA',
'Nuclei_Correlation_RWC_RNA_AGP',
'Nuclei_Correlation_RWC_RNA_DNA',
'Nuclei_Correlation_RWC_RNA_ER',
'Nuclei_Correlation_RWC_RNA_Mito',
'Nuclei_Granularity_14_AGP',
'Nuclei_Granularity_14_DNA',
'Nuclei_Granularity_14_ER',
'Nuclei_Granularity_14_Mito',
'Nuclei_Granularity_14_RNA',
'Nuclei_Granularity_15_AGP',
'Nuclei_Granularity_15_DNA',
'Nuclei_Granularity_15_ER',
'Nuclei_Granularity_15_Mito',
'Nuclei_Granularity_15_RNA',
'Nuclei_Granularity_16_AGP',
'Nuclei_Granularity_16_DNA',
'Nuclei_Granularity_16_ER',
'Nuclei_Granularity_16_Mito',
'Nuclei_Granularity_16_RNA']
#############################################
rootDir='/home/ubuntu/bucket/projects/2017_09_27_RareDiseases_Taipale/workspace'
ndd='5';
############################# load annotaations
AnnotSet2 = pd.read_excel(rootDir+'/metadata/Set2Annots20190801.xlsx', sheet_name=None)
# print(AnnotSet2.keys())
df_1=AnnotSet2['Replicate_Plates']
df_1['batch']='Maxproj_Replicates_Original_Screen'
df_2=AnnotSet2['Kinase_Mutants']
# df_2=df_2.drop([159])
df_2['batch']='Maxproj_Kinase_Plates'
df_3=AnnotSet2['Common_Variants']
df_3['batch']='Maxproj_Common_Variants'
df_4=AnnotSet2['Cancer_Mutants']
df_4['batch']='Maxproj_Cancer_Mutations_Screen'
annot_df_2 = pd.concat([df_1,df_2,df_3,df_4],axis=0,sort=False,ignore_index=True)
metaDataPlates=annot_df_2['Metadata_Plate'].unique()
# listOfPlates0=os.listdir(rootDir+'/backend/wellsSingleCells/')[1:]
# listOfPlates0=os.listdir(rootDir+'/backend/'+profType+'PerWells'+ndd+'/')
annot_df_2['Metadata_Sample']=annot_df_2['Metadata_Sample'].str.rstrip()
minInAllCells,maxInAllCells=0.0003, 0.968
############################# load rank list
# dfRank4bokeh1 = pd.read_excel(rootDir+'/metadata/dtFimpact4bokeh.xlsx', sheet_name=None)
# dfRank4bokeh1 = pd.read_excel(rootDir+'/metadata/dtFimpact4bokeh.xlsx', sheet_name=None)
dfRank = pd.read_excel(rootDir+'/metadata/impactList_20191126.xlsx', sheet_name='Sheet_1')
dfRank4bokeh=dfRank[~dfRank['CC-All'].isnull()].reset_index(drop=True)
indices = np.argsort(dfRank4bokeh['CC-All'])
dfRank4bokeh1=dfRank4bokeh.loc[indices,:].reset_index(drop=True)
dfRank4bokeh1['ticker1']=dfRank4bokeh1['UNIQUE'].astype(str)+'_rw:'+dfRank4bokeh1['WT-rep'].astype(int).astype(str)+'_rm:'+dfRank4bokeh1['MT-rep'].astype(int).astype(str)
DEFAULT_TICKERS=sorted(dfRank4bokeh1['ticker1'].tolist())
ticker1 = Select(title="WT-MT pairs", value=DEFAULT_TICKERS[0], options=DEFAULT_TICKERS)
############################ load single cell data
def load_data():
tickVal = ticker1.value
pairWM=tickVal.split('_rw:')[0]
wt=tickVal.split(' ')[0]
wtRep=tickVal.split('_rw:')[1][0]
mtRep=tickVal.split('_rm:')[1][0]
selDF_W=dfRank[(dfRank['UNIQUE']==wt)&(dfRank['rep']==int(wtRep))]
selDF_M=dfRank4bokeh1[(dfRank4bokeh1['UNIQUE']==pairWM)&(dfRank4bokeh1['WT-rep']==int(wtRep))&(dfRank4bokeh1['MT-rep']==int(mtRep))]
plateW=selDF_W.Metadata_Plate.tolist()[0]
wellW=selDF_W.Metadata_Well.tolist()[0]
plateM=selDF_M.Metadata_Plate.tolist()[0]
wellM=selDF_M.Metadata_Well.tolist()[0]
# plateW,wellW='Kinase_Mutants_1_Replicate','D11'
# plateM,wellM='Kinase_Mutants_1','E10'
# plateW,wellW='Replicate_24','F09'
# plateM,wellM='Replicate_24','G03'
annotForWellW=annot_df_2.loc[(annot_df_2['Metadata_Plate']==plateW) & (annot_df_2['Metadata_Well']==wellW),:].reset_index(drop=True)
fileNameW=rootDir+'/backend/wellsSingleCells'+ndd+'/df_'+plateW+'_'+wellW;
fileNameUW=rootDir+'/backend/wellsSingleCells'+ndd+'/df_control_'+plateW+'_'+wellW;
annotForWellM=annot_df_2.loc[(annot_df_2['Metadata_Plate']==plateM) & (annot_df_2['Metadata_Well']==wellM),:].reset_index(drop=True)
fileNameM=rootDir+'/backend/wellsSingleCells'+ndd+'/df_'+plateM+'_'+wellM;
fileNameUM=rootDir+'/backend/wellsSingleCells'+ndd+'/df_control_'+plateM+'_'+wellM;
perWellDataFilteredW=pd.read_pickle(fileNameW).reset_index(drop=True);
perWellDataUntransW=pd.read_pickle(fileNameUW).reset_index(drop=True);
perWellDataFilteredM=pd.read_pickle(fileNameM).reset_index(drop=True);
perWellDataUntransM=pd.read_pickle(fileNameUM).reset_index(drop=True);
# def fun(dff):
# wellMin,wellMax=dff['Cells_Intensity_MinIntensity_Protein'].min(),dff['Cells_Intensity_MaxIntensity_Protein'].max()
# dff['Cells_Intensity_MinIntensity_Protein_corrected']=dff['Cells_Intensity_MinIntensity_Protein']-wellMin+minInAllCells
# dff['Cells_Intensity_MaxIntensity_Protein_corrected']=dff['Cells_Intensity_MaxIntensity_Protein']-wellMax+maxInAllCells
# wellMinMaxDiff=wellMax-wellMin;
# dff['Cells_Intensity_MeanIntensity_Protein_corrected']=dff['Cells_Intensity_MeanIntensity_Protein']-(wellMinMaxDiff/2);
# return dff
# perWellDataFilteredW,perWellDataUntransW,perWellDataFilteredM,perWellDataUntransM = \
# (df.apply(fun) for df in [perWellDataFilteredW,perWellDataUntransW,perWellDataFilteredM,perWellDataUntransM])
def normalizeIntFeatures(dff,dffU):
bothDf=pd.concat([dff,dffU],sort=False,ignore_index=True)
scaler = MinMaxScaler()
bothDf['Cells_Intensity_MaxIntensity_Protein_corrected']=scaler.fit_transform(bothDf['Cells_Intensity_MaxIntensity_Protein'].values.reshape(-1, 1))
bothDf['Cells_Intensity_MinIntensity_Protein_corrected']=scaler.fit_transform(bothDf['Cells_Intensity_MinIntensity_Protein'].values.reshape(-1, 1))
wellMin,wellMax=bothDf['Cells_Intensity_MinIntensity_Protein'].min(),bothDf['Cells_Intensity_MaxIntensity_Protein'].max()
wellMinMaxDiff=wellMax-wellMin;
# print(wellMinMaxDiff)
bothDf['Cells_Intensity_MeanIntensity_Protein_corrected']=bothDf['Cells_Intensity_MeanIntensity_Protein']-(wellMinMaxDiff/2);
dff2=bothDf.iloc[0:dff.shape[0]]
dffU2=bothDf.iloc[dff.shape[0]:]
return dff2,dffU2
perWellDataFilteredW,perWellDataUntransW=normalizeIntFeatures(perWellDataFilteredW,perWellDataUntransW)
perWellDataFilteredM,perWellDataUntransM=normalizeIntFeatures(perWellDataFilteredM,perWellDataUntransM)
# print(perWellDataFilteredW.shape,perWellDataUntransW.shape,perWellDataFilteredM.shape,perWellDataUntransM.shape)
cpFeaturesW=perWellDataUntransW.columns[~(perWellDataUntransW.columns.str.contains("_Protein")) & perWellDataUntransW.columns.str.contains("Cells_|Cytoplasm_|Nuclei_")]# & (perWellDataUntransW.columns.str.contains("Intensity"))]
cpFeaturesM=perWellDataUntransM.columns[~(perWellDataUntransM.columns.str.contains("_Protein")) & perWellDataUntransM.columns.str.contains("Cells_|Cytoplasm_|Nuclei_")]# & (perWellDataUntransM.columns.str.contains("Intensity"))]
# cpFeaturesW=perWellDataUntransW.columns[perWellDataUntransW.columns.str.contains("Cells_|Cytoplasm_|Nuclei_")]
# cpFeaturesM=perWellDataUntransM.columns[perWellDataUntransM.columns.str.contains("Cells_|Cytoplasm_|Nuclei_")]
# cpFeatures=list(set(cpFeaturesW).intersection(cpFeaturesM).intersection(columns4scalerA))
cpFeatures=list(set(cpFeaturesW).intersection(cpFeaturesM))
# locFeature2beremoved=df_meanP.columns[df_meanP.columns.str.contains("_Location_Center_X|_Location_Center_Y")]
locFeature2beremoved=list(filter(lambda x: "_Location_Center_X" in x or "_Location_Center_Y" in x , cpFeatures))
corFeature2beremoved=list(filter(lambda x: "Correlation" in x , cpFeatures))
allFeatures=list(set(cpFeatures)-set(blackListFeatures)-set(locFeature2beremoved)-set(corFeature2beremoved))
nomalizedCellsW=perWellDataFilteredW.copy()
nomalizedCellsM=perWellDataFilteredM.copy()
nomalizedCellsW['Label']='nomalizedCellsW'
nomalizedCellsM['Label']='nomalizedCellsM'
scaledCellsW=perWellDataFilteredW.copy()
scaledCellsM=perWellDataFilteredM.copy()
scaledCellsW['Label']='scaledCellsW'
scaledCellsM['Label']='scaledCellsM'
scaledUntransCellsW=perWellDataUntransW.copy()
scaledUntransCellsM=perWellDataUntransM.copy()
scaledUntransCellsW['Label']='scaledUntransCellsW'
scaledUntransCellsM['Label']='scaledUntransCellsM'
perWellDataFilteredW['Label']='scaledCellsW'
perWellDataFilteredM['Label']='scaledCellsM'
perWellDataUntransW['Label']='scaledUntransCellsW'
perWellDataUntransM['Label']='scaledUntransCellsM'
scalerW = preprocessing.StandardScaler()
scalerM = preprocessing.StandardScaler()
scaler = preprocessing.StandardScaler()
scalerW.fit(perWellDataUntransW.loc[:,allFeatures].astype('float64'))
scalerM.fit(perWellDataUntransM.loc[:,allFeatures].astype('float64'))
nomalizedCellsW.loc[:,allFeatures]=scalerW.transform(perWellDataFilteredW.loc[:,allFeatures].astype('float64'))
nomalizedCellsM.loc[:,allFeatures]=scalerM.transform(perWellDataFilteredM.loc[:,allFeatures].astype('float64'))
# scaledCellsW.loc[:,allFeatures]=scaler.fit_transform(perWellDataFilteredW.loc[:,allFeatures].astype('float64'))
# scaledCellsM.loc[:,allFeatures]=scaler.fit_transform(perWellDataFilteredM.loc[:,allFeatures].astype('float64'))
# scaledUntransCellsW.loc[:,allFeatures]=scaler.fit_transform(perWellDataUntransW.loc[:,allFeatures].astype('float64'))
# scaledUntransCellsM.loc[:,allFeatures]=scaler.fit_transform(perWellDataUntransM.loc[:,allFeatures].astype('float64'))
WellW=pd.concat([perWellDataFilteredW,perWellDataUntransW], ignore_index=True,sort=False)
WellM=pd.concat([perWellDataFilteredM,perWellDataUntransM], ignore_index=True,sort=False)
scaledWellW=WellW.copy()
scaledWellM=WellM.copy()
scaledWellW.loc[:,allFeatures]=scaler.fit_transform(WellW.loc[:,allFeatures].astype('float64'))
scaledWellM.loc[:,allFeatures]=scaler.fit_transform(WellM.loc[:,allFeatures].astype('float64'))
scaledCellsMeanW=scaledWellW.loc[scaledWellW['Label']=='scaledCellsW',allFeatures].mean().to_frame().T
scaledUnCellsMeanW=scaledWellW.loc[scaledWellW['Label']=='scaledUntransCellsW',allFeatures].mean().to_frame().T
nomalizedCellsMeanW=nomalizedCellsW.loc[:,allFeatures].mean().to_frame().T
avgProfileW=pd.concat([nomalizedCellsMeanW,annotForWellW],sort=False, axis=1)
MeanW=perWellDataFilteredW.loc[:,allFeatures].astype('float64').mean().to_frame().T
scaledCellsMeanM=scaledWellM.loc[scaledWellM['Label']=='scaledCellsM',allFeatures].mean().to_frame().T
scaledUnCellsMeanM=scaledWellM.loc[scaledWellM['Label']=='scaledCellsM',allFeatures].mean().to_frame().T
nomalizedCellsMeanM=nomalizedCellsM.loc[:,allFeatures].mean().to_frame().T
avgProfileM=pd.concat([nomalizedCellsMeanM,annotForWellM],sort=False, axis=1)
MeanM=perWellDataFilteredM.loc[:,allFeatures].astype('float64').mean().to_frame().T
avgPcc=scipy.stats.pearsonr(MeanW.T, MeanM.T)[0][0]
avgUnPcc=scipy.stats.pearsonr(perWellDataUntransW.loc[:,allFeatures].astype('float64').mean().to_frame(),\
perWellDataUntransM.loc[:,allFeatures].astype('float64').mean().to_frame())[0][0]
scalPcc=scipy.stats.pearsonr(scaledCellsMeanW.T, scaledCellsMeanM.T)[0][0]
scalUnPcc=scipy.stats.pearsonr(scaledUnCellsMeanW.T, scaledUnCellsMeanM.T)[0][0]
normPcc=scipy.stats.pearsonr(nomalizedCellsMeanW.T, nomalizedCellsMeanM.T)[0][0]
print('Avg profiles CC: ',avgPcc)
print('Avg untransfected profiles CC: ',avgUnPcc)
print('Normalized profiles CC: ',normPcc)
print('Scaled profiles CC: ',scalPcc)
print('Scaled untransfected profiles CC: ',scalUnPcc)
import umap
from sklearn.manifold import TSNE
umapT=umap.UMAP()
tsneT = TSNE(perplexity=10)
# data4umap=pd.concat([perWellDataFilteredW, perWellDataFilteredM,perWellDataUntransW, perWellDataUntransM], ignore_index=True,sort=False)
data4umap=pd.concat([scaledWellW,scaledWellM], ignore_index=True,sort=False)
# data4umap=pd.concat([scaledCellsW,scaledCellsM,scaledUntransCellsW, scaledUntransCellsM], ignore_index=True,sort=False)
# data4umap['diffMinMaxNucCyto']=abs(data4umap['Cells_Intensity_MaxIntensity_Protein_corrected'])
data4umap['meanInt']=data4umap['Cells_Intensity_MeanIntensity_Protein']
# data4umap['Cells_Intensity_MaxIntensity_Protein'].max()
data4umap['diffMinMaxNucCyto']=data4umap['Cells_Intensity_UpperQuartileIntensity_Protein']-data4umap['Cells_Intensity_MinIntensity_Protein']
data4umap['UpperQ']=data4umap['Cells_Intensity_UpperQuartileIntensity_Protein']
data4umap['MaxInt']=data4umap['Cells_Intensity_MaxIntensity_Protein']
data4umap['StdInt']=data4umap['Cells_Intensity_StdIntensity_Protein']
# data4umap['meanInt']=data4umap['Cells_Intensity_MeanIntensity_Protein']
# data4umap['diffMinMaxNucCyto']=abs(data4umap['Cells_Intensity_MaxIntensity_Protein_corrected']
# data4umap['meanInt']=data4umap['Cells_Intensity_MeanIntensity_Protein_corrected']
# data4umap=pd.concat([scaledCellsW,scaledCellsM,scaledUntransCellsW, scaledUntransCellsM], ignore_index=True,sort=False)
pcaEnabled=0;
if pcaEnabled:
n_pc=60
pcaT = PCA(n_components = n_pc)
preprocData=pcaT.fit_transform(data4umap.loc[:,allFeatures])
else:
preprocData=data4umap.loc[:,allFeatures]
from sklearn.cluster import AgglomerativeClustering,SpectralClustering,KMeans
# clustering = AgglomerativeClustering(n_clusters=10).fit(preprocData)
# clustering = SpectralClustering(n_clusters=10,affinity='nearest_neighbors',assign_labels="discretize").fit(preprocData)
clustering = KMeans(n_clusters=10).fit(preprocData)
clusterLabels=clustering.labels_#.reshape(1,preprocData.shape[0])
# print(clusterLabels.shape,preprocData.shape)
# Y = umapT.fit_transform(preprocData)
Y = tsneT.fit_transform(preprocData)
tsneResDF=pd.DataFrame(index=range(Y.shape[0]),columns=['one','two','Label','clsLabel','Metadata_Plate','Metadata_Well',\
'Metadata_FieldID','ObjectNumber','diffMinMaxNucCyto','meanInt','UpperQ','MaxInt','StdInt']);
tsneResDF.loc[:,['one','two']]=Y
tsneResDF.loc[:,'clsLabel']=clusterLabels
tsneResDF['clsLabel'] = tsneResDF['clsLabel'].astype(str)
tsneResDF.loc[:,['Label','Metadata_Plate','Metadata_Well','Metadata_FieldID','ObjectNumber','diffMinMaxNucCyto','meanInt','UpperQ','MaxInt','StdInt']]=\
data4umap[['Label','Metadata_Plate','Metadata_Well','Metadata_FieldID','ObjectNumber','diffMinMaxNucCyto','meanInt','UpperQ','MaxInt','StdInt']]
# g=sns.scatterplot(x="one", y="two", hue="Label", data=tsneResDF)
# print(tsneResDF.shape, data4umap.shape)
# print(tsneResDF.dtypes)
tsneResDF['meanInt'] = tsneResDF['meanInt'].astype(float)
tsneResDF['StdInt'] = tsneResDF['StdInt'].astype(float)
# print(tsneResDF[['clsLabel','meanInt']].groupby(['clsLabel']).describe())
x=tsneResDF[['clsLabel','meanInt']].groupby(['clsLabel']).describe().reset_index()#.sort_values(by=['mean'])
x.columns = x.columns.droplevel(0)
# print(x.sort_values(by=['mean']))
x=tsneResDF[['clsLabel','StdInt']].groupby(['clsLabel']).describe().reset_index()#.sort_values(by=['mean'])
x.columns = x.columns.droplevel(0)
# print(x.sort_values(by=['mean']))
return tsneResDF, data4umap
# plate0,well0=plateM,wellM
# plate0,well0=plateW,wellW
# field=21; objectN=10;
# field=5; objectN=16;
# field=3; objectN=55;
# field=15; objectN=38;
# field=11; objectN=40;
from bokeh.plotting import figure, output_file, show
from bokeh.models import ColumnDataSource,LinearColorMapper
from bokeh.layouts import row, column, layout, gridplot
from bokeh.models.tools import HoverTool
from bokeh.io import output_notebook
import skimage.io
from bokeh.io import curdoc
import bokeh
from skimage.color import rgb2gray
from bokeh.palettes import d3
# output_notebook()
# output_file('columndatasource_example.html')
# output_file("color_scatter.html", title="color_scatter.py example", mode="cdn")
# df = pd.read_csv('thor_wwii.csv')
# sample = tsneResDF.sample(100)
tsneResDF,data4umap=load_data();
sample = tsneResDF.copy()
colormap = {'scaledUntransCellsW': 'blue', 'scaledUntransCellsM': 'green', 'scaledCellsW': 'red','scaledCellsM':'orchid'}
colors = [colormap[x] for x in sample['Label']]
# source = ColumnDataSource(sample)
# source = ColumnDataSource(sample)
d=dict(one=sample['one'],
two=sample['two'],
color=colors,
Label=sample['Label'],
clsLabel=sample['clsLabel'],
Metadata_Plate=sample['Metadata_Plate'],
Metadata_Well=sample['Metadata_Well'],
Metadata_FieldID=sample['Metadata_FieldID'],
# actual_image_number=sample['actual_image_number'],
ObjectNumber=sample['ObjectNumber'],
diff=sample['diffMinMaxNucCyto'],
meanInt=sample['meanInt'],
UpperQ=sample['UpperQ'],
MaxInt=sample['MaxInt'],
StdInt=sample['StdInt']
)
# dCls=d.copy()
from bokeh.palettes import viridis
palette2 = viridis(len(sample['clsLabel'].unique()))
# print(sample['clsLabel'].unique())
# color_map2 = bokeh.models.CategoricalColorMapper(factors=sample['clsLabel'].unique().tolist(),palette=palette2)
colors2 = [palette2[int(x)] for x in sample['clsLabel']]
# dCls['color']=color_map
source = ColumnDataSource(data=d)
source2 = ColumnDataSource(data4umap)
# print(len(d),len(dCls))
dCls=dict(one=sample['one'],
two=sample['two'],
color=colors2,
Label=sample['Label'],
clsLabel=sample['clsLabel'],
Metadata_Plate=sample['Metadata_Plate'],
Metadata_Well=sample['Metadata_Well'],
Metadata_FieldID=sample['Metadata_FieldID'],
# actual_image_number=sample['actual_image_number'],
ObjectNumber=sample['ObjectNumber'],
diff=sample['diffMinMaxNucCyto'],
meanInt=sample['meanInt'],
UpperQ=sample['UpperQ'],
MaxInt=sample['MaxInt'],
StdInt=sample['StdInt']
)
source3 = ColumnDataSource(data=dCls)
measured1=sample['meanInt'].values;
hist1, edges1 = np.histogram(measured1, density=True, bins=50)
# kde1 = stats.gaussian_kde(measured1.astype(float))
dDist=dict(hist_1=hist1,
edges_1_right=edges1[1:],
edges_1_left=edges1[:-1])
# kde_1=kde1)
sourceDis=ColumnDataSource(data=dDist)
hover = HoverTool()
hover.tooltips=[
('Label', '@Label'),
('clsLabel', '@clsLabel'),
('Metadata_Plate', '@Metadata_Plate'),
('Metadata_Well', '@Metadata_Well'),
('Metadata_FieldID', '@Metadata_FieldID'),
('ObjectNumber', '@ObjectNumber'),
('diff', '@diff'),
('meanInt', '@meanInt'),
('UpperQ', '@UpperQ'),
('MaxInt', '@MaxInt'),
('StdInt', '@StdInt')
]
p1 = figure(title='dis - meanInt', tools='', background_fill_color="#fafafa")
p1.quad(top='hist_1', bottom=0, left='edges_1_left', right='edges_1_right',
fill_color="navy", line_color="white", alpha=0.5,source=sourceDis)
# p1.line(x, pdf1, line_color="#ff8888", line_width=4, alpha=0.7, legend="PDF")
#p1.y_range.start = 0;p1.legend.location = "top_left";p1.legend.background_fill_color = "#fefefe"
p1.xaxis.axis_label = 'x';p1.yaxis.axis_label = 'Pr(x)';p1.grid.grid_line_color="white"
p = figure(tools="tap,reset",tooltips=hover.tooltips)
p.circle(x='one', y='two',
source=source,
size=2, color='color', legend='Label')
p.title.text = 'UMAP - Applied on WT and Mutant transfected and untrasfected single cells'
p.xaxis.axis_label = 'one'
p.yaxis.axis_label = 'two'
p2 = figure(tools="tap,reset",tooltips=hover.tooltips)
p2.circle(x='one', y='two',
source=source3,
size=2, color='color', legend='clsLabel')
def get_images(imInf):
plate0,well0,field,objectN=imInf[0],imInf[1],imInf[2],imInf[3]
# print(source2.shape)
data4umap3=pd.DataFrame(source2.data, columns=source2.column_names)
# data4umap3=source2.data;
# print(data4umap3.shape)
dfWithWTlabels=data4umap3[(data4umap3['Metadata_Plate']==plate0) & (data4umap3['Metadata_Well']==well0) &\
(data4umap3['Metadata_FieldID']==field) & (data4umap3['ObjectNumber']==objectN)]
ch_p=dfWithWTlabels['FileName_OrigProtein'].values[0];
ch_M=dfWithWTlabels['FileName_OrigMito'].values[0];
ch_E=dfWithWTlabels['FileName_OrigER'].values[0];
ch_D=dfWithWTlabels['FileName_OrigDNA'].values[0];
projectPath='/home/ubuntu/bucket/projects/2017_09_27_RareDiseases_Taipale/'
plateName=dfWithWTlabels['Metadata_Plate'].values[0]
# wellName=dfWithWTlabels.loc[index,'Metadata_Well']
# # print(index,wellName)
# fieldName=dfWithWTlabels.loc[index,'Field']
# objectNum=dfWithWTlabels.loc[index,'ObjectNumber']
# protLoc=dfWithWTlabels.loc[index,'manual_Annot']
# protLoc=dfWithWTlabels.loc[index,'Metadata_Location']
batch=dfWithWTlabels['batch'].values[0]
# ch_seg=projectPath+'/workspace/analysis/'+batch+'/'+plateName+'/analysis/'+plateName+'-'+well0+'-'+str(field)+\
# '/binarymask/'+well0+'_s'+str(field)+'_binarymask.png'
ch_seg=projectPath+'/workspace/analysis/'+batch+'/'+plateName+'/analysis/'+plateName+'-'+well0+'-'+str(field)+\
'/Cell_outlines/'+well0+'_s'+str(field)+'_cell_outlines.png'
# dataFileName=projectPath+batch+'/images/'+plateName+'/' # for dataset1
dataFileName=projectPath+batch+'/images/'+plateName+'/Images/' # for dataset2
boxSize=100;
xCenter=int(dfWithWTlabels['Nuclei_Location_Center_X'].values[0])
yCenter=int(dfWithWTlabels['Nuclei_Location_Center_Y'].values[0])
print(dataFileName+ch_M,xCenter,yCenter)
if (xCenter>boxSize) & (yCenter>boxSize) & (os.path.exists(dataFileName+ch_p)) & (os.path.exists(dataFileName+ch_M)) &\
(os.path.exists(dataFileName+ch_E)) & (os.path.exists(dataFileName+ch_D)):
# print(xCenter,yCenter,boxSize)
imP=np.squeeze(skimage.io.imread(dataFileName+ch_p))[yCenter-boxSize:yCenter+boxSize,xCenter-boxSize:xCenter+boxSize]
imM=np.squeeze(skimage.io.imread(dataFileName+ch_M))[yCenter-boxSize:yCenter+boxSize,xCenter-boxSize:xCenter+boxSize]
imE=np.squeeze(skimage.io.imread(dataFileName+ch_E))[yCenter-boxSize:yCenter+boxSize,xCenter-boxSize:xCenter+boxSize]
imD=np.squeeze(skimage.io.imread(dataFileName+ch_D))[yCenter-boxSize:yCenter+boxSize,xCenter-boxSize:xCenter+boxSize]
imSeg=np.squeeze(skimage.io.imread(ch_seg))[yCenter-boxSize:yCenter+boxSize,xCenter-boxSize:xCenter+boxSize]
imSeg = rgb2gray(imSeg)
else:
print('Here')
imP=np.ones((boxSize,boxSize));imM=np.ones((boxSize,boxSize));
imE=np.ones((boxSize,boxSize));imD=np.ones((boxSize,boxSize));
imSeg=np.ones((boxSize,boxSize));
print(imP.shape,imM.shape,imE.shape,imD.shape,imSeg.shape)
return np.concatenate([imP/(imP.max()),imM/(imM.max()),imE/(imE.max()),imD/(imD.max()),imSeg/(imSeg.max())],axis=1)
# return np.concatenate([imP,imM,imE,imD],axis=1)
# print(hover.tooltips)
# def read_rgb(imagePath):
# boxSize=200;
# xCenter,yCenter=200,200
# img = np.squeeze(skimage.io.imread(imagePath))[yCenter-boxSize:yCenter+boxSize,xCenter-boxSize:xCenter+boxSize]
# # img = skimage.io.imread(imagePath)
# print(img.shape)
# # img=np.flip(img,0)
# # rgb_img = img.astype(np.uint8)
# return img
def create_figure(imageInfoList):
new_data1 = dict();
# adss='/home/ubuntu/bucket/projects/2017_09_27_RareDiseases_Taipale/Maxproj_Kinase_Plates/images/Kinase_Mutants_1/Images/r05c10f11p02-ch2sk1fk1fl1.tiff'
# adss='/home/ubuntu/2017_09_27_RareDiseases_Taipale/cluster10_examplar.png'
images = get_images(imageInfoList)
N, M=160, 800
kw = dict()
kw['x_range'] = (0,M)
kw['y_range'] = (0,N)
kw['plot_width'] = M
kw['plot_height'] = N
kw['title']=' Protein Mito \
ER DNA Segmentation '
ts1 = figure(tools='pan,wheel_zoom,xbox_select,reset', **kw)
ts1.axis.visible = False
ts1.xgrid.visible = False
ts1.ygrid.visible = False
# print(images[l].shape,Ms[l],Ns[l])
color = LinearColorMapper(bokeh.palettes.gray(256))
r1=ts1.image(image=[], x=0, y=0, dw=M, dh=N, color_mapper=color)
ds1 = r1.data_source;
new_data1['image'] = [images]
ds1.data = new_data1
# ts.append(ts1);
return ts1
def ticker1_change(attrname, old, new):
print(old,new)
tsneResDF2,data4umap2=load_data();
source2.data=ColumnDataSource(data4umap2).data
# print('shapeHere',tsneResDF2.shape,data4umap2.shape)
colors = [colormap[x] for x in tsneResDF2['Label']]
d2=dict(one=tsneResDF2['one'],
two=tsneResDF2['two'],
color=colors,
Label=tsneResDF2['Label'],
clsLabel=tsneResDF2['clsLabel'],
Metadata_Plate=tsneResDF2['Metadata_Plate'],
Metadata_Well=tsneResDF2['Metadata_Well'],
Metadata_FieldID=tsneResDF2['Metadata_FieldID'],
# actual_image_number=sample['actual_image_number'],
ObjectNumber=tsneResDF2['ObjectNumber'],
diff=tsneResDF2['diffMinMaxNucCyto'],
meanInt=tsneResDF2['meanInt'],
UpperQ=tsneResDF2['UpperQ'],
MaxInt=tsneResDF2['MaxInt'],
StdInt=tsneResDF2['StdInt'])
source.data=d2
dCls2=d2.copy()
palette2 = viridis(len(tsneResDF2['clsLabel'].unique()))
colors2 = [palette2[int(x)] for x in tsneResDF2['clsLabel']]
# dCls2['color']=color_map
dCls2=dict(one=tsneResDF2['one'],
two=tsneResDF2['two'],
color=colors2,
Label=tsneResDF2['Label'],
clsLabel=tsneResDF2['clsLabel'],
Metadata_Plate=tsneResDF2['Metadata_Plate'],
Metadata_Well=tsneResDF2['Metadata_Well'],
Metadata_FieldID=tsneResDF2['Metadata_FieldID'],
# actual_image_number=sample['actual_image_number'],
ObjectNumber=tsneResDF2['ObjectNumber'],
diff=tsneResDF2['diffMinMaxNucCyto'],
meanInt=tsneResDF2['meanInt'],
UpperQ=tsneResDF2['UpperQ'],
MaxInt=tsneResDF2['MaxInt'],
StdInt=tsneResDF2['StdInt'])
source3.data=dCls2
measured1=tsneResDF2['meanInt'].values;
hist1, edges1 = np.histogram(measured1, density=True, bins=50)
kde1 = stats.gaussian_kde(measured1.astype(float))
dDist=dict(hist_1=hist1,
edges_1_right=edges1[1:],
edges_1_left=edges1[:-1])
# kde_1=kde1)
sourceDis.data=dDist
# make_plot_dist()
update()
# def update_notes(attrname, old, new):
# t1, t2 = ticker1.value, ticker2.value.split(' _')[0]
# notesTxtFile=DATA_DIR+'/WT-MUT-'+t1+'_'+t2+'/observerNotes.txt'
# fileW = open(notesTxtFile,"w")
# # print(new,inputText.value)
# # inputText.value=str(new)
# fileW.write(new)
# fileW.close()
# def update(selected=None):
# print(hover.dataspecs())
# print('hello')
# fS=create_figure()
# pp=make_plot()
# t1, t2 = ticker1.value, ticker2.value.split(' _')[0]
# # print(t1,t2)
# stats2.text = str(load_ticker_info(t1,t2))
# inputText.value=str(load_observerNotes(t1,t2))
# series.children=[column(widgets, pp)]+[column(row(stats2,inputText),fS)]
# inputText.on_change('value', update_notes)
# hover.on_change('tooltips', ticker0_change)
def update():
# print('source.selected.indices',source.selected.indices,len(source.data),len(source.data['Metadata_Plate']))
# print(source.selected)
indexActive=source.selected.indices
# indexActive2=source3.selected.indices
# indexActive=indexActive1+indexActive2
# print('indexActive',indexActive,indexActive1,indexActive2)
imageInfoList=[source.data['Metadata_Plate'][indexActive[0]],source.data['Metadata_Well'][indexActive[0]],source.data['Metadata_FieldID'][indexActive[0]],source.data['ObjectNumber'][indexActive[0]]]
# print('imageInfoList',imageInfoList)
fS=create_figure(imageInfoList)
series.children=[ticker1]+[row(p,p2,p1)]+[fS]
# print(source.data['Label'][indexActive])
# print(source.data['ObjectNumber'][indexActive])
def update2():
# print('source.selected.indices',source.selected.indices,len(source.data),len(source.data['Metadata_Plate']))
# print(source.selected)
indexActive=source3.selected.indices
imageInfoList=[source3.data['Metadata_Plate'][indexActive[0]],source3.data['Metadata_Well'][indexActive[0]],source3.data['Metadata_FieldID'][indexActive[0]],source3.data['ObjectNumber'][indexActive[0]]]
# print('imageInfoList',imageInfoList)
fS=create_figure(imageInfoList)
series.children=[ticker1]+[row(p,p2,p1)]+[fS]
p.on_event(Tap, update)
p2.on_event(Tap, update2)
ticker1.on_change('value', ticker1_change)
# indexActive=source.selected.indices[0]
indd=0
imageInfoList0=[d['Metadata_Plate'][indd],d['Metadata_Well'][indd],d['Metadata_FieldID'][indd],d['ObjectNumber'][indd]]
fS=create_figure(imageInfoList0)
p.add_tools(hover)
p2.add_tools(hover)
plotss = row(p,p2,p1)
series = column(children=[ticker1]+[plotss]+[fS])
# series2 = row(children=[widgets]+[fS])
# show(series)
# update()
curdoc().add_root(series)
|
{"hexsha": "ea6d99e5e3938af4d57e23888c7da98f350d333e", "size": 31423, "ext": "py", "lang": "Python", "max_stars_repo_path": "Web-Interfaces/singleCellVisBokeh2.py", "max_stars_repo_name": "broadinstitute/SingleCell_Morphological_Analysis", "max_stars_repo_head_hexsha": "b3685af5ba4ba12e8c767e03bb99c860bfba545c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Web-Interfaces/singleCellVisBokeh2.py", "max_issues_repo_name": "broadinstitute/SingleCell_Morphological_Analysis", "max_issues_repo_head_hexsha": "b3685af5ba4ba12e8c767e03bb99c860bfba545c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Web-Interfaces/singleCellVisBokeh2.py", "max_forks_repo_name": "broadinstitute/SingleCell_Morphological_Analysis", "max_forks_repo_head_hexsha": "b3685af5ba4ba12e8c767e03bb99c860bfba545c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.4089595376, "max_line_length": 232, "alphanum_fraction": 0.7265697101, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 9221}
|
import numpy as np
def strong_selection_weak_mutation(fitness1, fitness2):
"""Strong selection, weak mutation model."""
sij = (fitness2 - fitness1) / fitness1
if sij < 0:
sij = 0
return 1 - np.exp(-sij)
|
{"hexsha": "897706af8f12b49a056bf450bd1d251a5d3d1dd9", "size": 228, "ext": "py", "lang": "Python", "max_stars_repo_path": "gpgraph/models.py", "max_stars_repo_name": "lgoldbach/gpgraph", "max_stars_repo_head_hexsha": "f57d98b3cd02083fb4f0f0330ff3222e7e6e584c", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "gpgraph/models.py", "max_issues_repo_name": "lgoldbach/gpgraph", "max_issues_repo_head_hexsha": "f57d98b3cd02083fb4f0f0330ff3222e7e6e584c", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "gpgraph/models.py", "max_forks_repo_name": "lgoldbach/gpgraph", "max_forks_repo_head_hexsha": "f57d98b3cd02083fb4f0f0330ff3222e7e6e584c", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3333333333, "max_line_length": 55, "alphanum_fraction": 0.649122807, "include": true, "reason": "import numpy", "num_tokens": 68}
|
OneonOne Tutoring and Educational Mapping provides oneonone tutoring in a supervised environment for all grade levels and all subjects. They offer assistance with homework, school projects, and test preparation. They also offer college planning services, SAT prep, curriculum planning, major/career selection, college selection, and help with the admissions/scholarship essays.
Classes offered
SAT Class. The next class is scheduled for January 4th March 1st (Saturdays). Sign up now. Class space is limited!!! Classes will be held from 1:00pm to 4:00pm. Textbook is included.
Be ready for the March 8th SAT test.
20071102 00:36:58 nbsp I am currently going here for some extra help in Chem 2A and Math 16A and have so far found their services to be great! The tutors are extremely knowledgeable and personable and after 2 sessions, I know that I will stick with them for a while. Prices are $30/hour for a college student tutor, $35/hour for a tutor with a BA or above. If you are looking for a tutor, I recommend One on One. :) Users/kthrnngo
20090602 21:41:57 nbsp I highly recommend One on One. My daughter has been tutored there once a week for 2 school years. I cant say enough about the wonderful Math tutor who helps her with her homework and test preparation. The tutor is reliable (never missed a week!) and very accomodating. She has moved the weekly appointment to better fit my daughters school schedule or occasional need for a different day. She reinforces the classroom learning and provides individualized support for specific concepts. The owners are professional, personable and very clientfocused. They strive to match tutor to student based on skills and personalities. In addition, they will do their best to accomodate short notice requests for special help for new or established students. I have nothing but positive feedback for One on One. Users/JGQ
|
{"hexsha": "290fb3134939d327ef9ed596c7bab387dc592bff", "size": 1896, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/One-on-One_Tutoring_and_Educational_Mapping.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/One-on-One_Tutoring_and_Educational_Mapping.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/One-on-One_Tutoring_and_Educational_Mapping.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 90.2857142857, "max_line_length": 831, "alphanum_fraction": 0.7979957806, "num_tokens": 423}
|
[STATEMENT]
lemma reachable_disconnect_netmap [elim]:
assumes "s \<in> reachable (pnet np n) TT"
and "(s, disconnect(i, i'), s') \<in> trans (pnet np n)"
shows "netmap s' = netmap s"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. netmap s' = netmap s
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
s \<in> reachable (pnet np n) TT
(s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n)
goal (1 subgoal):
1. netmap s' = netmap s
[PROOF STEP]
proof (induction n arbitrary: s s')
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>ia R s s'. \<lbrakk>s \<in> reachable (pnet np \<langle>ia; R\<rangle>) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np \<langle>ia; R\<rangle>)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
2. \<And>n1 n2 s s'. \<lbrakk>\<And>s s'. \<lbrakk>s \<in> reachable (pnet np n1) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; \<And>s s'. \<lbrakk>s \<in> reachable (pnet np n2) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; s \<in> reachable (pnet np (n1 \<parallel> n2)) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np (n1 \<parallel> n2))\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
fix ii R\<^sub>i s s'
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>ia R s s'. \<lbrakk>s \<in> reachable (pnet np \<langle>ia; R\<rangle>) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np \<langle>ia; R\<rangle>)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
2. \<And>n1 n2 s s'. \<lbrakk>\<And>s s'. \<lbrakk>s \<in> reachable (pnet np n1) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; \<And>s s'. \<lbrakk>s \<in> reachable (pnet np n2) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; s \<in> reachable (pnet np (n1 \<parallel> n2)) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np (n1 \<parallel> n2))\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
assume sr: "s \<in> reachable (pnet np \<langle>ii; R\<^sub>i\<rangle>) TT"
and "(s, disconnect(i, i'), s') \<in> trans (pnet np \<langle>ii; R\<^sub>i\<rangle>)"
[PROOF STATE]
proof (state)
this:
s \<in> reachable (pnet np \<langle>ii; R\<^sub>i\<rangle>) TT
(s, disconnect(i, i'), s') \<in> automaton.trans (pnet np \<langle>ii; R\<^sub>i\<rangle>)
goal (2 subgoals):
1. \<And>ia R s s'. \<lbrakk>s \<in> reachable (pnet np \<langle>ia; R\<rangle>) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np \<langle>ia; R\<rangle>)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
2. \<And>n1 n2 s s'. \<lbrakk>\<And>s s'. \<lbrakk>s \<in> reachable (pnet np n1) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; \<And>s s'. \<lbrakk>s \<in> reachable (pnet np n2) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; s \<in> reachable (pnet np (n1 \<parallel> n2)) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np (n1 \<parallel> n2))\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
from this(2)
[PROOF STATE]
proof (chain)
picking this:
(s, disconnect(i, i'), s') \<in> automaton.trans (pnet np \<langle>ii; R\<^sub>i\<rangle>)
[PROOF STEP]
have tr: "(s, disconnect(i, i'), s') \<in> node_sos (trans (np ii))"
[PROOF STATE]
proof (prove)
using this:
(s, disconnect(i, i'), s') \<in> automaton.trans (pnet np \<langle>ii; R\<^sub>i\<rangle>)
goal (1 subgoal):
1. (s, disconnect(i, i'), s') \<in> node_sos (automaton.trans (np ii))
[PROOF STEP]
..
[PROOF STATE]
proof (state)
this:
(s, disconnect(i, i'), s') \<in> node_sos (automaton.trans (np ii))
goal (2 subgoals):
1. \<And>ia R s s'. \<lbrakk>s \<in> reachable (pnet np \<langle>ia; R\<rangle>) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np \<langle>ia; R\<rangle>)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
2. \<And>n1 n2 s s'. \<lbrakk>\<And>s s'. \<lbrakk>s \<in> reachable (pnet np n1) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; \<And>s s'. \<lbrakk>s \<in> reachable (pnet np n2) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; s \<in> reachable (pnet np (n1 \<parallel> n2)) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np (n1 \<parallel> n2))\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
from sr
[PROOF STATE]
proof (chain)
picking this:
s \<in> reachable (pnet np \<langle>ii; R\<^sub>i\<rangle>) TT
[PROOF STEP]
obtain p R where "s = NodeS ii p R"
[PROOF STATE]
proof (prove)
using this:
s \<in> reachable (pnet np \<langle>ii; R\<^sub>i\<rangle>) TT
goal (1 subgoal):
1. (\<And>p R. s = NodeS ii p R \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (metis net_node_reachable_is_node)
[PROOF STATE]
proof (state)
this:
s = NodeS ii p R
goal (2 subgoals):
1. \<And>ia R s s'. \<lbrakk>s \<in> reachable (pnet np \<langle>ia; R\<rangle>) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np \<langle>ia; R\<rangle>)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
2. \<And>n1 n2 s s'. \<lbrakk>\<And>s s'. \<lbrakk>s \<in> reachable (pnet np n1) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; \<And>s s'. \<lbrakk>s \<in> reachable (pnet np n2) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; s \<in> reachable (pnet np (n1 \<parallel> n2)) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np (n1 \<parallel> n2))\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
with tr
[PROOF STATE]
proof (chain)
picking this:
(s, disconnect(i, i'), s') \<in> node_sos (automaton.trans (np ii))
s = NodeS ii p R
[PROOF STEP]
show "netmap s' = netmap s"
[PROOF STATE]
proof (prove)
using this:
(s, disconnect(i, i'), s') \<in> node_sos (automaton.trans (np ii))
s = NodeS ii p R
goal (1 subgoal):
1. netmap s' = netmap s
[PROOF STEP]
by (auto elim!: node_sos.cases)
[PROOF STATE]
proof (state)
this:
netmap s' = netmap s
goal (1 subgoal):
1. \<And>n1 n2 s s'. \<lbrakk>\<And>s s'. \<lbrakk>s \<in> reachable (pnet np n1) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; \<And>s s'. \<lbrakk>s \<in> reachable (pnet np n2) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; s \<in> reachable (pnet np (n1 \<parallel> n2)) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np (n1 \<parallel> n2))\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>n1 n2 s s'. \<lbrakk>\<And>s s'. \<lbrakk>s \<in> reachable (pnet np n1) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; \<And>s s'. \<lbrakk>s \<in> reachable (pnet np n2) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; s \<in> reachable (pnet np (n1 \<parallel> n2)) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np (n1 \<parallel> n2))\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
fix p1 p2 s s'
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>n1 n2 s s'. \<lbrakk>\<And>s s'. \<lbrakk>s \<in> reachable (pnet np n1) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; \<And>s s'. \<lbrakk>s \<in> reachable (pnet np n2) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; s \<in> reachable (pnet np (n1 \<parallel> n2)) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np (n1 \<parallel> n2))\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
assume IH1: "\<And>s s'. \<lbrakk> s \<in> reachable (pnet np p1) TT;
(s, disconnect(i, i'), s') \<in> trans (pnet np p1) \<rbrakk> \<Longrightarrow> netmap s' = netmap s"
and IH2: "\<And>s s'. \<lbrakk> s \<in> reachable (pnet np p2) TT;
(s, disconnect(i, i'), s') \<in> trans (pnet np p2) \<rbrakk> \<Longrightarrow> netmap s' = netmap s"
and sr: "s \<in> reachable (pnet np (p1 \<parallel> p2)) TT"
and tr: "(s, disconnect(i, i'), s') \<in> trans (pnet np (p1 \<parallel> p2))"
[PROOF STATE]
proof (state)
this:
\<lbrakk>?s \<in> reachable (pnet np p1) TT; (?s, disconnect(i, i'), ?s') \<in> automaton.trans (pnet np p1)\<rbrakk> \<Longrightarrow> netmap ?s' = netmap ?s
\<lbrakk>?s \<in> reachable (pnet np p2) TT; (?s, disconnect(i, i'), ?s') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap ?s' = netmap ?s
s \<in> reachable (pnet np (p1 \<parallel> p2)) TT
(s, disconnect(i, i'), s') \<in> automaton.trans (pnet np (p1 \<parallel> p2))
goal (1 subgoal):
1. \<And>n1 n2 s s'. \<lbrakk>\<And>s s'. \<lbrakk>s \<in> reachable (pnet np n1) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; \<And>s s'. \<lbrakk>s \<in> reachable (pnet np n2) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; s \<in> reachable (pnet np (n1 \<parallel> n2)) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np (n1 \<parallel> n2))\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
from tr
[PROOF STATE]
proof (chain)
picking this:
(s, disconnect(i, i'), s') \<in> automaton.trans (pnet np (p1 \<parallel> p2))
[PROOF STEP]
have "(s, disconnect(i, i'), s') \<in> pnet_sos (trans (pnet np p1)) (trans (pnet np p2))"
[PROOF STATE]
proof (prove)
using this:
(s, disconnect(i, i'), s') \<in> automaton.trans (pnet np (p1 \<parallel> p2))
goal (1 subgoal):
1. (s, disconnect(i, i'), s') \<in> pnet_sos (automaton.trans (pnet np p1)) (automaton.trans (pnet np p2))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
(s, disconnect(i, i'), s') \<in> pnet_sos (automaton.trans (pnet np p1)) (automaton.trans (pnet np p2))
goal (1 subgoal):
1. \<And>n1 n2 s s'. \<lbrakk>\<And>s s'. \<lbrakk>s \<in> reachable (pnet np n1) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; \<And>s s'. \<lbrakk>s \<in> reachable (pnet np n2) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np n2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s; s \<in> reachable (pnet np (n1 \<parallel> n2)) TT; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np (n1 \<parallel> n2))\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
thus "netmap s' = netmap s"
[PROOF STATE]
proof (prove)
using this:
(s, disconnect(i, i'), s') \<in> pnet_sos (automaton.trans (pnet np p1)) (automaton.trans (pnet np p2))
goal (1 subgoal):
1. netmap s' = netmap s
[PROOF STEP]
proof cases
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>s s' t. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s' t; (s, \<tau>, s') \<in> automaton.trans (pnet np p1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
2. \<And>t t' s. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s t'; (t, \<tau>, t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
3. \<And>s s' t t'. \<lbrakk>s = SubnetS s t; s' = SubnetS s' t'; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np p1); (t, disconnect(i, i'), t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
fix s1 s1' s2 s2'
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. \<And>s s' t. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s' t; (s, \<tau>, s') \<in> automaton.trans (pnet np p1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
2. \<And>t t' s. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s t'; (t, \<tau>, t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
3. \<And>s s' t t'. \<lbrakk>s = SubnetS s t; s' = SubnetS s' t'; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np p1); (t, disconnect(i, i'), t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
assume "s = SubnetS s1 s2"
and "s' = SubnetS s1' s2'"
and tr1: "(s1, disconnect(i, i'), s1') \<in> trans (pnet np p1)"
and tr2: "(s2, disconnect(i, i'), s2') \<in> trans (pnet np p2)"
[PROOF STATE]
proof (state)
this:
s = SubnetS s1 s2
s' = SubnetS s1' s2'
(s1, disconnect(i, i'), s1') \<in> automaton.trans (pnet np p1)
(s2, disconnect(i, i'), s2') \<in> automaton.trans (pnet np p2)
goal (3 subgoals):
1. \<And>s s' t. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s' t; (s, \<tau>, s') \<in> automaton.trans (pnet np p1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
2. \<And>t t' s. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s t'; (t, \<tau>, t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
3. \<And>s s' t t'. \<lbrakk>s = SubnetS s t; s' = SubnetS s' t'; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np p1); (t, disconnect(i, i'), t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
from this(1) and sr
[PROOF STATE]
proof (chain)
picking this:
s = SubnetS s1 s2
s \<in> reachable (pnet np (p1 \<parallel> p2)) TT
[PROOF STEP]
have "SubnetS s1 s2 \<in> reachable (pnet np (p1 \<parallel> p2)) TT"
[PROOF STATE]
proof (prove)
using this:
s = SubnetS s1 s2
s \<in> reachable (pnet np (p1 \<parallel> p2)) TT
goal (1 subgoal):
1. SubnetS s1 s2 \<in> reachable (pnet np (p1 \<parallel> p2)) TT
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
SubnetS s1 s2 \<in> reachable (pnet np (p1 \<parallel> p2)) TT
goal (3 subgoals):
1. \<And>s s' t. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s' t; (s, \<tau>, s') \<in> automaton.trans (pnet np p1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
2. \<And>t t' s. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s t'; (t, \<tau>, t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
3. \<And>s s' t t'. \<lbrakk>s = SubnetS s t; s' = SubnetS s' t'; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np p1); (t, disconnect(i, i'), t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
hence sr1: "s1 \<in> reachable (pnet np p1) TT"
and sr2: "s2 \<in> reachable (pnet np p2) TT"
[PROOF STATE]
proof (prove)
using this:
SubnetS s1 s2 \<in> reachable (pnet np (p1 \<parallel> p2)) TT
goal (1 subgoal):
1. s1 \<in> reachable (pnet np p1) TT &&& s2 \<in> reachable (pnet np p2) TT
[PROOF STEP]
by (auto intro: subnet_reachable)
[PROOF STATE]
proof (state)
this:
s1 \<in> reachable (pnet np p1) TT
s2 \<in> reachable (pnet np p2) TT
goal (3 subgoals):
1. \<And>s s' t. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s' t; (s, \<tau>, s') \<in> automaton.trans (pnet np p1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
2. \<And>t t' s. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s t'; (t, \<tau>, t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
3. \<And>s s' t t'. \<lbrakk>s = SubnetS s t; s' = SubnetS s' t'; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np p1); (t, disconnect(i, i'), t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
from sr1 tr1
[PROOF STATE]
proof (chain)
picking this:
s1 \<in> reachable (pnet np p1) TT
(s1, disconnect(i, i'), s1') \<in> automaton.trans (pnet np p1)
[PROOF STEP]
have "netmap s1' = netmap s1"
[PROOF STATE]
proof (prove)
using this:
s1 \<in> reachable (pnet np p1) TT
(s1, disconnect(i, i'), s1') \<in> automaton.trans (pnet np p1)
goal (1 subgoal):
1. netmap s1' = netmap s1
[PROOF STEP]
by (rule IH1)
[PROOF STATE]
proof (state)
this:
netmap s1' = netmap s1
goal (3 subgoals):
1. \<And>s s' t. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s' t; (s, \<tau>, s') \<in> automaton.trans (pnet np p1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
2. \<And>t t' s. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s t'; (t, \<tau>, t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
3. \<And>s s' t t'. \<lbrakk>s = SubnetS s t; s' = SubnetS s' t'; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np p1); (t, disconnect(i, i'), t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
netmap s1' = netmap s1
goal (3 subgoals):
1. \<And>s s' t. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s' t; (s, \<tau>, s') \<in> automaton.trans (pnet np p1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
2. \<And>t t' s. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s t'; (t, \<tau>, t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
3. \<And>s s' t t'. \<lbrakk>s = SubnetS s t; s' = SubnetS s' t'; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np p1); (t, disconnect(i, i'), t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
from sr2 tr2
[PROOF STATE]
proof (chain)
picking this:
s2 \<in> reachable (pnet np p2) TT
(s2, disconnect(i, i'), s2') \<in> automaton.trans (pnet np p2)
[PROOF STEP]
have "netmap s2' = netmap s2"
[PROOF STATE]
proof (prove)
using this:
s2 \<in> reachable (pnet np p2) TT
(s2, disconnect(i, i'), s2') \<in> automaton.trans (pnet np p2)
goal (1 subgoal):
1. netmap s2' = netmap s2
[PROOF STEP]
by (rule IH2)
[PROOF STATE]
proof (state)
this:
netmap s2' = netmap s2
goal (3 subgoals):
1. \<And>s s' t. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s' t; (s, \<tau>, s') \<in> automaton.trans (pnet np p1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
2. \<And>t t' s. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s t'; (t, \<tau>, t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
3. \<And>s s' t t'. \<lbrakk>s = SubnetS s t; s' = SubnetS s' t'; (s, disconnect(i, i'), s') \<in> automaton.trans (pnet np p1); (t, disconnect(i, i'), t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
netmap s1' = netmap s1
netmap s2' = netmap s2
[PROOF STEP]
show "netmap s' = netmap s"
[PROOF STATE]
proof (prove)
using this:
netmap s1' = netmap s1
netmap s2' = netmap s2
goal (1 subgoal):
1. netmap s' = netmap s
[PROOF STEP]
using \<open>s = SubnetS s1 s2\<close> and \<open>s' = SubnetS s1' s2'\<close>
[PROOF STATE]
proof (prove)
using this:
netmap s1' = netmap s1
netmap s2' = netmap s2
s = SubnetS s1 s2
s' = SubnetS s1' s2'
goal (1 subgoal):
1. netmap s' = netmap s
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
netmap s' = netmap s
goal (2 subgoals):
1. \<And>s s' t. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s' t; (s, \<tau>, s') \<in> automaton.trans (pnet np p1)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
2. \<And>t t' s. \<lbrakk>s = SubnetS s t; disconnect(i, i') = \<tau>; s' = SubnetS s t'; (t, \<tau>, t') \<in> automaton.trans (pnet np p2)\<rbrakk> \<Longrightarrow> netmap s' = netmap s
[PROOF STEP]
qed simp_all
[PROOF STATE]
proof (state)
this:
netmap s' = netmap s
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 8716, "file": "AWN_Pnet", "length": 41}
|
from sklearn.svm import SVC
import numpy as np
class SVMNoneLinear:
def __init__(self, c, kernel, degree=3, gamma='auto', decision_function_shape='ovr'):
'''
:param c:
:param kernel:
:param degree:
:param gamma:
:param decision_function_shape:
'''
self.c = c
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.decision_function_shape = decision_function_shape
self.classifiers = {}
def fit(self, features, y):
'''
:param features:
:param y:
:return:
'''
for class_name in set(y):
svm = SVC(kernel=self.kernel, C=self.c, degree=self.degree, gamma=self.gamma)
self.classifiers[class_name] = svm
y_class = [label if label == class_name else "other" for label in y]
self.classifiers[class_name] = svm.fit(features, y_class)
def predict(self, features):
'''
:param features:
:return:
'''
predictions = []
for sample in features:
i_values = []
classes = []
for class_name, cls in self.classifiers.items():
classes.append(class_name)
i_values.append(cls.decision_function(np.array(sample).reshape(1, -1)))
predictions.append(classes[np.argmin(i_values)])
return np.asarray(predictions)
def score(self, features, y):
'''
:param features:
:param y:
:return:
'''
predictions = self.predict(features)
correct = sum([1 if prediction == y[i] else 0 for i, prediction in enumerate(predictions)])
score = correct / predictions.shape[0]
return score
def decision_function(self, features):
'''
:param features:
:return:
'''
predictions = []
for sample in features:
i_values = []
classes = []
for class_name, cls in self.classifiers.items():
classes.append(class_name)
i_values.append(cls.decision_function(np.array(sample).reshape(1, -1)))
predictions.append(classes)
return np.asarray(predictions)
|
{"hexsha": "98a27fa95d94bf48720910e7f7c4c8c265af349f", "size": 2288, "ext": "py", "lang": "Python", "max_stars_repo_path": "CVPipelines/SVMNoneLinear.py", "max_stars_repo_name": "houhashv/ComputerVisionTask1", "max_stars_repo_head_hexsha": "72647c1d3f3e720ecfc08d5506a9563909e0b5cb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "CVPipelines/SVMNoneLinear.py", "max_issues_repo_name": "houhashv/ComputerVisionTask1", "max_issues_repo_head_hexsha": "72647c1d3f3e720ecfc08d5506a9563909e0b5cb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CVPipelines/SVMNoneLinear.py", "max_forks_repo_name": "houhashv/ComputerVisionTask1", "max_forks_repo_head_hexsha": "72647c1d3f3e720ecfc08d5506a9563909e0b5cb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.3404255319, "max_line_length": 99, "alphanum_fraction": 0.5511363636, "include": true, "reason": "import numpy", "num_tokens": 466}
|
import cv2
import numpy as np
from datetime import datetime, timedelta
def get_black_background():
return np.zeros((500, 500))
start_time = datetime.strptime("2019-01-01", "%Y-%m-%d") # Можете выбрать любую дату
end_time = start_time + timedelta(days=1)
def convert_time_to_string(dt):
return f"{dt.hour}:{dt.minute:02}" if dt.hour > 9 else f"0{dt.hour}:{dt.minute:02}"
def generate_image_with_text(text):
image = get_black_background()
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image, "TIME", (int(image.shape[0]*0.225), int(image.shape[1]*0.4)), font, 4, (255, 255, 0), 2, cv2.LINE_AA)
cv2.putText(image, text, (int(image.shape[0]*0.15), int(image.shape[1]*0.7)), font, 4, (255, 255, 0), 2, cv2.LINE_AA)
return image
while start_time < end_time:
text = convert_time_to_string(start_time)
image = generate_image_with_text(text)
cv2.imwrite(f"time_images/1.jpg".replace(':', ''), image)
start_time += timedelta(minutes=1)
|
{"hexsha": "f1f49058309f97f9490b89409eaa07c100d49dd8", "size": 975, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/generators/generate_time_images.py", "max_stars_repo_name": "Sehin/teleg-upd-photo", "max_stars_repo_head_hexsha": "1e5c817cadd5652697ed97dc2d2aa70d84b3f17b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-07-24T04:54:53.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-25T13:04:42.000Z", "max_issues_repo_path": "src/generators/generate_time_images.py", "max_issues_repo_name": "Sehin/teleg-upd-photo", "max_issues_repo_head_hexsha": "1e5c817cadd5652697ed97dc2d2aa70d84b3f17b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2021-03-19T01:43:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-11T23:52:02.000Z", "max_forks_repo_path": "src/generators/generate_time_images.py", "max_forks_repo_name": "Sehin/teleg-upd-photo", "max_forks_repo_head_hexsha": "1e5c817cadd5652697ed97dc2d2aa70d84b3f17b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-07-09T14:44:41.000Z", "max_forks_repo_forks_event_max_datetime": "2019-07-09T14:44:41.000Z", "avg_line_length": 36.1111111111, "max_line_length": 124, "alphanum_fraction": 0.6923076923, "include": true, "reason": "import numpy", "num_tokens": 298}
|
import numpy as np
import skimage
import skimage.io
import skimage.morphology
import numba
def to_uint8(img):
return np.clip(img, 0, 255).astype(np.uint8)
def laplacian(img):
return (np.roll(img, 1, 0) + np.roll(img, -1, 0) +
np.roll(img, 1, 1) + np.roll(img, -1, 1) -
4 * img)
def laplacian_absmax(img1, img2):
def absmax(a, b):
return np.where(np.abs(a) > np.abs(b), a, b)
res = np.zeros_like(img1)
for axis in [0, 1]:
for delta in [-1, 1]:
res += absmax(np.roll(img1, delta, axis) - img1,
np.roll(img2, delta, axis) - img2)
return res
@numba.jit
def poisson1(mask, sol, rhs):
assert sol.shape[:2] == mask.shape[:2] == rhs.shape[:2], 'Dimensions should be equal'
for i in range(1, sol.shape[0] - 1):
for j in range(1, sol.shape[1] - 1):
if mask[i, j]:
for c in range(3):
sol[i, j, c] = (sol[i - 1, j, c] + sol[i + 1, j, c] +
sol[i, j - 1, c] + sol[i, j + 1, c] -
rhs[i, j, c]) / 4
def poisson(n, mask, sol, rhs):
for i in range(n):
poisson1(mask, sol, rhs)
return sol
back = skimage.io.imread('back.png').astype(float)
fore = skimage.io.imread('fore.png').astype(float)
mask = skimage.morphology.binary_erosion((fore != 0).any(axis=2),
np.ones((3, 3)))
clone = back.copy()
clone[mask] = fore[mask]
skimage.io.imsave('clone.png', to_uint8(clone))
laplace = poisson(100, mask, back.copy(), np.zeros_like(back))
skimage.io.imsave('laplace.png', to_uint8(laplace))
imported = poisson(100, mask, back.copy(), laplacian(fore))
skimage.io.imsave('import.png', to_uint8(imported))
mixed = poisson(100, mask, back.copy(), laplacian_absmax(fore, back))
skimage.io.imsave('mixed.png', to_uint8(mixed))
|
{"hexsha": "eefa7176b6115fe0faadce0a936f22e444d10c70", "size": 1949, "ext": "py", "lang": "Python", "max_stars_repo_path": "basic_example.py", "max_stars_repo_name": "Apollo-18/poie", "max_stars_repo_head_hexsha": "242c64cf4bfbb8667aa858a3b298529299fa36bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "basic_example.py", "max_issues_repo_name": "Apollo-18/poie", "max_issues_repo_head_hexsha": "242c64cf4bfbb8667aa858a3b298529299fa36bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "basic_example.py", "max_forks_repo_name": "Apollo-18/poie", "max_forks_repo_head_hexsha": "242c64cf4bfbb8667aa858a3b298529299fa36bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.9846153846, "max_line_length": 89, "alphanum_fraction": 0.5464340688, "include": true, "reason": "import numpy,import numba", "num_tokens": 588}
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import pandas as pd
import numpy as np
import seaborn as sns
from lightfm.evaluation import precision_at_k, recall_at_k
def model_perf_plots(df):
"""Function to plot model performance metrics.
Args:
df (pandas.DataFrame): Dataframe in tidy format, with ['epoch','level','value'] columns
Returns:
object: matplotlib axes
"""
g = sns.FacetGrid(df, col="metric", hue="stage", col_wrap=2, sharey=False)
g = g.map(sns.scatterplot, "epoch", "value").add_legend()
def compare_metric(df_list, metric="prec", stage="test"):
"""Function to combine and prepare list of dataframes into tidy format.
Args:
df_list (list): List of dataframes
metrics (str): name of metric to be extracted, optional
stage (str): name of model fitting stage to be extracted, optional
Returns:
pandas.DataFrame: Metrics
"""
colnames = ["model" + str(x) for x in list(range(1, len(df_list) + 1))]
models = [
df[(df["stage"] == stage) & (df["metric"] == metric)]["value"]
.reset_index(drop=True)
.values
for df in df_list
]
output = pd.DataFrame(zip(*models), columns=colnames).stack().reset_index()
output.columns = ["epoch", "data", "value"]
return output
def track_model_metrics(
model,
train_interactions,
test_interactions,
k=10,
no_epochs=100,
no_threads=8,
show_plot=True,
**kwargs
):
"""Function to record model's performance at each epoch, formats the performance into tidy format,
plots the performance and outputs the performance data.
Args:
model (LightFM instance): fitted LightFM model
train_interactions (scipy sparse COO matrix): train interactions set
test_interactions (scipy sparse COO matrix): test interaction set
k (int): number of recommendations, optional
no_epochs (int): Number of epochs to run, optional
no_threads (int): Number of parallel threads to use, optional
**kwargs: other keyword arguments to be passed down
Returns:
pandas.DataFrame, LightFM model, matplotlib axes:
- Performance traces of the fitted model
- Fitted model
- Side effect of the method
"""
# initialising temp data storage
model_prec_train = [0] * no_epochs
model_prec_test = [0] * no_epochs
model_rec_train = [0] * no_epochs
model_rec_test = [0] * no_epochs
# fit model and store train/test metrics at each epoch
for epoch in range(no_epochs):
model.fit_partial(
interactions=train_interactions, epochs=1, num_threads=no_threads, **kwargs
)
model_prec_train[epoch] = precision_at_k(
model, train_interactions, k=k, **kwargs
).mean()
model_prec_test[epoch] = precision_at_k(
model, test_interactions, k=k, **kwargs
).mean()
model_rec_train[epoch] = recall_at_k(
model, train_interactions, k=k, **kwargs
).mean()
model_rec_test[epoch] = recall_at_k(
model, test_interactions, k=k, **kwargs
).mean()
# collect the performance metrics into a dataframe
fitting_metrics = pd.DataFrame(
zip(model_prec_train, model_prec_test, model_rec_train, model_rec_test),
columns=[
"model_prec_train",
"model_prec_test",
"model_rec_train",
"model_rec_test",
],
)
# convert into tidy format
fitting_metrics = fitting_metrics.stack().reset_index()
fitting_metrics.columns = ["epoch", "level", "value"]
# exact the labels for each observation
fitting_metrics["stage"] = fitting_metrics.level.str.split("_").str[-1]
fitting_metrics["metric"] = fitting_metrics.level.str.split("_").str[1]
fitting_metrics.drop(["level"], axis=1, inplace=True)
# replace the metric keys to improve visualisation
metric_keys = {"prec": "Precision", "rec": "Recall"}
fitting_metrics.metric.replace(metric_keys, inplace=True)
# plots the performance data
if show_plot:
model_perf_plots(fitting_metrics)
return fitting_metrics, model
def similar_users(user_id, user_features, model, N=10):
"""Function to return top N similar users based on https://github.com/lyst/lightfm/issues/244#issuecomment-355305681
Args:
user_id (int): id of user to be used as reference
user_features (scipy sparse CSR matrix): user feature matric
model (LightFM instance): fitted LightFM model
N (int): Number of top similar users to return
Returns:
pandas.DataFrame: top N most similar users with score
"""
_, user_representations = model.get_user_representations(features=user_features)
# Cosine similarity
scores = user_representations.dot(user_representations[user_id, :])
user_norms = np.linalg.norm(user_representations, axis=1)
user_norms[user_norms == 0] = 1e-10
scores /= user_norms
best = np.argpartition(scores, -(N + 1))[-(N + 1) :]
return pd.DataFrame(
sorted(zip(best, scores[best] / user_norms[user_id]), key=lambda x: -x[1])[1:],
columns=["userID", "score"],
)
def similar_items(item_id, item_features, model, N=10):
"""Function to return top N similar items
based on https://github.com/lyst/lightfm/issues/244#issuecomment-355305681
Args:
item_id (int): id of item to be used as reference
item_features (scipy sparse CSR matrix): item feature matric
model (LightFM instance): fitted LightFM model
N (int): Number of top similar items to return
Returns:
pandas.DataFrame: top N most similar items with score
"""
_, item_representations = model.get_item_representations(features=item_features)
# Cosine similarity
scores = item_representations.dot(item_representations[item_id, :])
item_norms = np.linalg.norm(item_representations, axis=1)
item_norms[item_norms == 0] = 1e-10
scores /= item_norms
best = np.argpartition(scores, -(N + 1))[-(N + 1) :]
return pd.DataFrame(
sorted(zip(best, scores[best] / item_norms[item_id]), key=lambda x: -x[1])[1:],
columns=["itemID", "score"],
)
def prepare_test_df(test_idx, uids, iids, uid_map, iid_map, weights):
"""Function to prepare test df for evaluation
Args:
test_idx (slice): slice of test indices
uids (numpy.ndarray): Array of internal user indices
iids (numpy.ndarray): Array of internal item indices
uid_map (dict): Keys to map internal user indices to external ids.
iid_map (dict): Keys to map internal item indices to external ids.
weights (numpy.float32 coo_matrix): user-item interaction
Returns:
pandas.DataFrame: user-item selected for testing
"""
test_df = pd.DataFrame(
zip(
uids[test_idx],
iids[test_idx],
[list(uid_map.keys())[x] for x in uids[test_idx]],
[list(iid_map.keys())[x] for x in iids[test_idx]],
),
columns=["uid", "iid", "userID", "itemID"],
)
dok_weights = weights.todok()
test_df["rating"] = test_df.apply(lambda x: dok_weights[x.uid, x.iid], axis=1)
return test_df[["userID", "itemID", "rating"]]
def prepare_all_predictions(
data,
uid_map,
iid_map,
interactions,
model,
num_threads,
user_features=None,
item_features=None,
):
"""Function to prepare all predictions for evaluation.
Args:
data (pandas df): dataframe of all users, items and ratings as loaded
uid_map (dict): Keys to map internal user indices to external ids.
iid_map (dict): Keys to map internal item indices to external ids.
interactions (np.float32 coo_matrix): user-item interaction
model (LightFM instance): fitted LightFM model
num_threads (int): number of parallel computation threads
user_features (np.float32 csr_matrix): User weights over features
item_features (np.float32 csr_matrix): Item weights over features
Returns:
pandas.DataFrame: all predictions
"""
users, items, preds = [], [], [] # noqa: F841
item = list(data.itemID.unique())
for user in data.userID.unique():
user = [user] * len(item)
users.extend(user)
items.extend(item)
all_predictions = pd.DataFrame(data={"userID": users, "itemID": items})
all_predictions["uid"] = all_predictions.userID.map(uid_map)
all_predictions["iid"] = all_predictions.itemID.map(iid_map)
dok_weights = interactions.todok()
all_predictions["rating"] = all_predictions.apply(
lambda x: dok_weights[x.uid, x.iid], axis=1
)
all_predictions = all_predictions[all_predictions.rating < 1].reset_index(drop=True)
all_predictions = all_predictions.drop("rating", axis=1)
all_predictions["prediction"] = all_predictions.apply(
lambda x: model.predict(
user_ids=x["uid"],
item_ids=[x["iid"]],
user_features=user_features,
item_features=item_features,
num_threads=num_threads,
)[0],
axis=1,
)
return all_predictions[["userID", "itemID", "prediction"]]
|
{"hexsha": "2256df53bb3697f9b6cdaf8d1aa162e0530c4b8a", "size": 9363, "ext": "py", "lang": "Python", "max_stars_repo_path": "recommenders/models/lightfm/lightfm_utils.py", "max_stars_repo_name": "enowy/Recommenders", "max_stars_repo_head_hexsha": "60033231b9167438032843c23158c0c776856e0e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-10T05:47:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T05:47:19.000Z", "max_issues_repo_path": "recommenders/models/lightfm/lightfm_utils.py", "max_issues_repo_name": "enowy/Recommenders", "max_issues_repo_head_hexsha": "60033231b9167438032843c23158c0c776856e0e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-01-19T20:24:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-18T20:25:24.000Z", "max_forks_repo_path": "recommenders/models/lightfm/lightfm_utils.py", "max_forks_repo_name": "enowy/Recommenders", "max_forks_repo_head_hexsha": "60033231b9167438032843c23158c0c776856e0e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8066914498, "max_line_length": 120, "alphanum_fraction": 0.6558795258, "include": true, "reason": "import numpy", "num_tokens": 2221}
|
// Boost.Geometry
// Copyright (c) 2021, Oracle and/or its affiliates.
// Contributed and/or modified by Adam Wulkiewicz, on behalf of Oracle
// Licensed under the Boost Software License version 1.0.
// http://www.boost.org/users/license.html
#ifndef BOOST_GEOMETRY_STRATEGIES_DISTANCE_GEOGRAPHIC_HPP
#define BOOST_GEOMETRY_STRATEGIES_DISTANCE_GEOGRAPHIC_HPP
#include <boost/geometry/strategies/distance/comparable.hpp>
#include <boost/geometry/strategies/distance/detail.hpp>
#include <boost/geometry/strategies/distance/services.hpp>
#include <boost/geometry/strategies/detail.hpp>
#include <boost/geometry/strategies/geographic/azimuth.hpp>
#include <boost/geometry/strategies/geographic/distance.hpp>
#include <boost/geometry/strategies/geographic/distance_cross_track.hpp>
#include <boost/geometry/strategies/geographic/distance_cross_track_box_box.hpp>
#include <boost/geometry/strategies/geographic/distance_cross_track_point_box.hpp>
#include <boost/geometry/strategies/geographic/distance_segment_box.hpp>
// TODO - for backwards compatibility, remove?
#include <boost/geometry/strategies/geographic/distance_andoyer.hpp>
#include <boost/geometry/strategies/geographic/distance_thomas.hpp>
#include <boost/geometry/strategies/geographic/distance_vincenty.hpp>
#include <boost/geometry/strategies/normalize.hpp>
#include <boost/geometry/strategies/relate/geographic.hpp>
namespace boost { namespace geometry
{
namespace strategies { namespace distance
{
// TODO: azimuth and normalize getters would not be needed if distance_segment_box was implemented differently
// right now it calls disjoint algorithm details.
template
<
typename FormulaPolicy = strategy::andoyer,
std::size_t SeriesOrder = strategy::default_order<FormulaPolicy>::value,
typename Spheroid = srs::spheroid<double>,
typename CalculationType = void
>
class geographic
: public strategies::relate::geographic<FormulaPolicy, SeriesOrder, Spheroid, CalculationType>
{
using base_t = strategies::relate::geographic<FormulaPolicy, SeriesOrder, Spheroid, CalculationType>;
public:
geographic() = default;
explicit geographic(Spheroid const& spheroid)
: base_t(spheroid)
{}
// azimuth
auto azimuth() const
{
return strategy::azimuth::geographic
<
FormulaPolicy, Spheroid, CalculationType
>(base_t::m_spheroid);
}
// distance
template <typename Geometry1, typename Geometry2>
auto distance(Geometry1 const&, Geometry2 const&,
detail::enable_if_pp_t<Geometry1, Geometry2> * = nullptr) const
{
return strategy::distance::geographic
<
FormulaPolicy, Spheroid, CalculationType
>(base_t::m_spheroid);
}
template <typename Geometry1, typename Geometry2>
auto distance(Geometry1 const&, Geometry2 const&,
detail::enable_if_ps_t<Geometry1, Geometry2> * = nullptr) const
{
return strategy::distance::geographic_cross_track
<
FormulaPolicy, Spheroid, CalculationType
>(base_t::m_spheroid);
}
template <typename Geometry1, typename Geometry2>
auto distance(Geometry1 const&, Geometry2 const&,
detail::enable_if_pb_t<Geometry1, Geometry2> * = nullptr) const
{
return strategy::distance::geographic_cross_track_point_box
<
FormulaPolicy, Spheroid, CalculationType
>(base_t::m_spheroid);
}
template <typename Geometry1, typename Geometry2>
auto distance(Geometry1 const&, Geometry2 const&,
detail::enable_if_sb_t<Geometry1, Geometry2> * = nullptr) const
{
return strategy::distance::geographic_segment_box
<
FormulaPolicy, Spheroid, CalculationType
>(base_t::m_spheroid);
}
template <typename Geometry1, typename Geometry2>
auto distance(Geometry1 const&, Geometry2 const&,
detail::enable_if_bb_t<Geometry1, Geometry2> * = nullptr) const
{
return strategy::distance::geographic_cross_track_box_box
<
FormulaPolicy, Spheroid, CalculationType
>(base_t::m_spheroid);
}
// normalize
template <typename Geometry>
static auto normalize(Geometry const&,
std::enable_if_t
<
util::is_point<Geometry>::value
> * = nullptr)
{
return strategy::normalize::spherical_point();
}
};
namespace services
{
template <typename Geometry1, typename Geometry2>
struct default_strategy<Geometry1, Geometry2, geographic_tag, geographic_tag>
{
using type = strategies::distance::geographic<>;
};
template <typename FP, typename S, typename CT>
struct strategy_converter<strategy::distance::geographic<FP, S, CT> >
{
static auto get(strategy::distance::geographic<FP, S, CT> const& s)
{
return strategies::distance::geographic<FP, strategy::default_order<FP>::value, S, CT>(s.model());
}
};
// TODO - for backwards compatibility, remove?
template <typename S, typename CT>
struct strategy_converter<strategy::distance::andoyer<S, CT> >
{
static auto get(strategy::distance::andoyer<S, CT> const& s)
{
return strategies::distance::geographic<strategy::andoyer, strategy::default_order<strategy::andoyer>::value, S, CT>(s.model());
}
};
// TODO - for backwards compatibility, remove?
template <typename S, typename CT>
struct strategy_converter<strategy::distance::thomas<S, CT> >
{
static auto get(strategy::distance::thomas<S, CT> const& s)
{
return strategies::distance::geographic<strategy::thomas, strategy::default_order<strategy::thomas>::value, S, CT>(s.model());
}
};
// TODO - for backwards compatibility, remove?
template <typename S, typename CT>
struct strategy_converter<strategy::distance::vincenty<S, CT> >
{
static auto get(strategy::distance::vincenty<S, CT> const& s)
{
return strategies::distance::geographic<strategy::vincenty, strategy::default_order<strategy::vincenty>::value, S, CT>(s.model());
}
};
template <typename FP, typename S, typename CT>
struct strategy_converter<strategy::distance::geographic_cross_track<FP, S, CT> >
{
static auto get(strategy::distance::geographic_cross_track<FP, S, CT> const& s)
{
return strategies::distance::geographic<FP, strategy::default_order<FP>::value, S, CT>(s.model());
}
};
template <typename FP, typename S, typename CT>
struct strategy_converter<strategy::distance::geographic_cross_track_point_box<FP, S, CT> >
{
static auto get(strategy::distance::geographic_cross_track_point_box<FP, S, CT> const& s)
{
return strategies::distance::geographic<FP, strategy::default_order<FP>::value, S, CT>(s.model());
}
};
template <typename FP, typename S, typename CT>
struct strategy_converter<strategy::distance::geographic_segment_box<FP, S, CT> >
{
static auto get(strategy::distance::geographic_segment_box<FP, S, CT> const& s)
{
return strategies::distance::geographic<FP, strategy::default_order<FP>::value, S, CT>(s.model());
}
};
template <typename FP, typename S, typename CT>
struct strategy_converter<strategy::distance::geographic_cross_track_box_box<FP, S, CT> >
{
static auto get(strategy::distance::geographic_cross_track_box_box<FP, S, CT> const& s)
{
return strategies::distance::geographic<FP, strategy::default_order<FP>::value, S, CT>(s.model());
}
};
// details
// TODO: This specialization wouldn't be needed if strategy::distance::geographic_cross_track was implemented as an alias
template <typename FP, typename S, typename CT, bool B, bool ECP>
struct strategy_converter<strategy::distance::detail::geographic_cross_track<FP, S, CT, B, ECP> >
{
struct altered_strategy
: strategies::distance::geographic<FP, strategy::default_order<FP>::value, S, CT>
{
typedef strategies::distance::geographic<FP, strategy::default_order<FP>::value, S, CT> base_t;
explicit altered_strategy(S const& s) : base_t(s) {}
using base_t::distance;
template <typename Geometry1, typename Geometry2>
auto distance(Geometry1 const&, Geometry2 const&,
std::enable_if_t
<
util::is_pointlike<Geometry1>::value
&& util::is_segmental<Geometry2>::value
|| util::is_segmental<Geometry1>::value
&& util::is_pointlike<Geometry2>::value
|| util::is_segmental<Geometry1>::value
&& util::is_segmental<Geometry2>::value
> * = nullptr) const
{
return strategy::distance::detail::geographic_cross_track
<
FP, S, CT, B, ECP
>(base_t::m_spheroid);
}
};
static auto get(strategy::distance::detail::geographic_cross_track<FP, S, CT, B, ECP> const& s)
{
return altered_strategy(s.model());
}
};
} // namespace services
}} // namespace strategies::distance
}} // namespace boost::geometry
#endif // BOOST_GEOMETRY_STRATEGIES_DISTANCE_GEOGRAPHIC_HPP
|
{"hexsha": "4c9aadd91e210beb6464a4c8df4c6e5f95aa6d61", "size": 9409, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "boost/geometry/strategies/distance/geographic.hpp", "max_stars_repo_name": "pranavgo/RRT", "max_stars_repo_head_hexsha": "87148c3ddb91600f4e74f00ffa8af14b54689aa4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "boost/geometry/strategies/distance/geographic.hpp", "max_issues_repo_name": "pranavgo/RRT", "max_issues_repo_head_hexsha": "87148c3ddb91600f4e74f00ffa8af14b54689aa4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "boost/geometry/strategies/distance/geographic.hpp", "max_forks_repo_name": "pranavgo/RRT", "max_forks_repo_head_hexsha": "87148c3ddb91600f4e74f00ffa8af14b54689aa4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.8481481481, "max_line_length": 138, "alphanum_fraction": 0.674566904, "num_tokens": 2175}
|
import warnings
from pyvista._version import __version__
from pyvista.plotting import *
from pyvista.utilities import *
from pyvista.core import *
# Per contract with Sphinx-Gallery, this method must be availabe at top level
from pyvista.utilities.sphinx_gallery import _get_sg_image_scraper
import numpy as np
import scooby
import vtk
# get the int type from vtk
VTK_ID_TYPE_SIZE = vtk.vtkIdTypeArray().GetDataTypeSize()
ID_TYPE = np.int32
if VTK_ID_TYPE_SIZE == 4:
ID_TYPE = np.int32
elif VTK_ID_TYPE_SIZE == 8:
ID_TYPE = np.int64
# determine if using vtk > 5
if vtk.vtkVersion().GetVTKMajorVersion() <= 5:
raise AssertionError('VTK version must be 5.0 or greater.')
# catch annoying numpy/vtk future warning:
warnings.simplefilter(action='ignore', category=FutureWarning)
# A simple flag to set when generating the documentation
OFF_SCREEN = False
try:
if os.environ['PYVISTA_OFF_SCREEN'].lower() == 'true':
OFF_SCREEN = True
except KeyError:
pass
# Grab system flag for anti-aliasing
try:
rcParams['multi_samples'] = int(os.environ['PYVISTA_MULTI_SAMPLES'])
except KeyError:
pass
# Grab system flag for auto-closing because of Panel issues
try:
# This only sets to false if PYVISTA_AUTO_CLOSE is false
rcParams['auto_close'] = not os.environ['PYVISTA_AUTO_CLOSE'].lower() == 'false'
except KeyError:
pass
# A threshold for the max cells to compute a volume for when repr-ing
REPR_VOLUME_MAX_CELLS = 1e6
# Set where figures are saved
FIGURE_PATH = None
# Set up data directory
import appdirs
import os
USER_DATA_PATH = appdirs.user_data_dir('pyvista')
if not os.path.exists(USER_DATA_PATH):
os.makedirs(USER_DATA_PATH)
EXAMPLES_PATH = os.path.join(USER_DATA_PATH, 'examples')
if not os.path.exists(EXAMPLES_PATH):
os.makedirs(EXAMPLES_PATH)
# Send VTK messages to the logging module:
send_errors_to_logging()
# Set up panel for interactive notebook rendering
try:
if os.environ['PYVISTA_USE_PANEL'].lower() == 'false':
rcParams['use_panel'] = False
elif os.environ['PYVISTA_USE_PANEL'].lower() == 'true':
rcParams['use_panel'] = True
except KeyError:
pass
# Only initialize panel if in a Jupyter environment
if scooby.in_ipykernel():
try:
import panel
panel.extension('vtk')
except (ImportError, RuntimeError):
rcParams['use_panel'] = False
# Set preferred plot theme
try:
theme = os.environ['PYVISTA_PLOT_THEME'].lower()
set_plot_theme(theme)
except KeyError:
pass
# Set a parameter to control default print format for floats
FLOAT_FORMAT = "{:.3e}"
|
{"hexsha": "8a663d2fb5d13b1033e6d8bd900d2e40d369d68c", "size": 2601, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyvista/__init__.py", "max_stars_repo_name": "imsodin/pyvista", "max_stars_repo_head_hexsha": "af3bd7dc7b5b8551b732f6e6fa74f6675027469c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyvista/__init__.py", "max_issues_repo_name": "imsodin/pyvista", "max_issues_repo_head_hexsha": "af3bd7dc7b5b8551b732f6e6fa74f6675027469c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyvista/__init__.py", "max_forks_repo_name": "imsodin/pyvista", "max_forks_repo_head_hexsha": "af3bd7dc7b5b8551b732f6e6fa74f6675027469c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5408163265, "max_line_length": 84, "alphanum_fraction": 0.738177624, "include": true, "reason": "import numpy", "num_tokens": 663}
|
"""Return the product of array elements over a given axis."""
from __future__ import annotations
from typing import Any, Optional, Sequence, Union
import numpy
import numpy.typing
import numpoly
from ..baseclass import ndpoly, PolyLike
from ..dispatch import implements
@implements(numpy.prod, numpy.product)
def prod(
a: PolyLike,
axis: Union[None, int, Sequence[int]] = None,
dtype: Optional[numpy.typing.DTypeLike] = None,
out: Optional[ndpoly] = None,
keepdims: bool = False,
**kwargs: Any,
) -> ndpoly:
"""
Return the product of array elements over a given axis.
Args:
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis. If axis is a tuple of ints, a product is performed on
all of the axes specified in the tuple instead of a single axis or
all the axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
initial : scalar, optional
The starting value for this product.
where : array_like of bool, optional
Elements to include in the product.
Returns:
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
Examples:
>>> q0, q1 = numpoly.variable(2)
>>> poly = numpoly.polynomial([[[1, q0, q0**2],
... [q0+q1, q1, q1]]])
>>> numpoly.prod(poly)
polynomial(q0**3*q1**3+q0**4*q1**2)
>>> numpoly.prod(poly, keepdims=True)
polynomial([[[q0**3*q1**3+q0**4*q1**2]]])
>>> numpoly.prod(poly, axis=1)
polynomial([[q1+q0, q0*q1, q0**2*q1]])
>>> numpoly.prod(poly, axis=2, keepdims=True)
polynomial([[[q0**3],
[q1**3+q0*q1**2]]])
>>> numpoly.prod(poly, axis=[1, 2])
polynomial([[[q0**3*q1**3+q0**4*q1**2]]])
"""
a = numpoly.aspolynomial(a)
assert out is None
if keepdims:
if axis is None:
out = _prod(numpoly.reshape(a, -1), axis=0)
out = numpoly.reshape(out, (1,)*len(a.shape))
return out
elif isinstance(axis, int):
axis = [axis]
if axis is None:
out = _prod(numpoly.reshape(a, -1), axis=0)
elif isinstance(axis, int):
out = _prod(a, axis=axis)
else:
for idx in axis:
a = _prod(a, axis=idx)
a = a[(slice(None),)*idx+(numpy.newaxis,)]
out = a
return out
def _prod(a: ndpoly, axis: int) -> ndpoly:
"""
Backend for the product function.
Args:
a:
Input data.
axis:
The axis to take product over.
Returns:
An array shaped as `a` but with the specified axis removed.
"""
axis = axis+a.ndim if axis < 0 else axis
assert a.ndim > axis, (a, axis)
indices = (slice(None),)*axis
out = a[indices+(0,)]
for idx in range(1, a.shape[axis]):
out = numpoly.multiply(out, a[indices+(idx,)])
assert len(out.shape)+1 == len(a.shape)
return out
|
{"hexsha": "35b269775df3486662fdcdd606e46910284ef2e9", "size": 4263, "ext": "py", "lang": "Python", "max_stars_repo_path": "numpoly/array_function/prod.py", "max_stars_repo_name": "jonathf/npoly", "max_stars_repo_head_hexsha": "9df4bd2a3b134e8a196e24389c0ad84c26da9662", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2019-12-13T23:54:33.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-08T22:44:25.000Z", "max_issues_repo_path": "numpoly/array_function/prod.py", "max_issues_repo_name": "jonathf/npoly", "max_issues_repo_head_hexsha": "9df4bd2a3b134e8a196e24389c0ad84c26da9662", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 54, "max_issues_repo_issues_event_min_datetime": "2019-08-25T20:03:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-09T08:59:27.000Z", "max_forks_repo_path": "numpoly/array_function/prod.py", "max_forks_repo_name": "jonathf/npoly", "max_forks_repo_head_hexsha": "9df4bd2a3b134e8a196e24389c0ad84c26da9662", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-03-05T12:03:28.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-07T16:56:09.000Z", "avg_line_length": 34.6585365854, "max_line_length": 79, "alphanum_fraction": 0.5890218156, "include": true, "reason": "import numpy", "num_tokens": 1081}
|
import numpy as np
from PIL import Image
import os
import glob
from tqdm import tqdm
from ISR.models import RDN, RRDN
''' ============================================================================ '''
#model = RRDN(weights='gans')
model = RDN(weights='psnr-small')
data_path = 'data'
data_clean_path = 'data_clean'
''' ============================================================================ '''
try:
if not os.path.exists(data_clean_path):
os.makedirs(data_clean_path)
except OSError:
print('Error')
''' ============================================================================ '''
counter = len(glob.glob1(data_path,"*.png"))
#count = 0
#for img in os.listdir('D:/Test/data'):
for _ in tqdm(range(counter)):
#print('COUNT: ', count)
img = Image.open(data_path + '/frame'+ str(count) + '.png')
img = img.resize((480, 270))
#sr_img = model.predict(np.array(img), by_patch_of_size=50)
sr_img = model.predict(np.array(img))
sr_img = Image.fromarray(sr_img)
sr_img.save(data_clean_path + '/' + str(count) + '.png')
'''
count += 1
if count > 200:
break
'''
|
{"hexsha": "722399bccceaff420099a151a04c01924bc077c1", "size": 1111, "ext": "py", "lang": "Python", "max_stars_repo_path": "Scripts/predict_gen_image.py", "max_stars_repo_name": "Darkshadow9799/Super-Resolution", "max_stars_repo_head_hexsha": "f9840f7905584c8a2fa6c79df5a1ee76f935a37e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-06-27T06:48:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-03T18:41:50.000Z", "max_issues_repo_path": "Scripts/predict_gen_image.py", "max_issues_repo_name": "Darkshadow9799/Super-Resolution", "max_issues_repo_head_hexsha": "f9840f7905584c8a2fa6c79df5a1ee76f935a37e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Scripts/predict_gen_image.py", "max_forks_repo_name": "Darkshadow9799/Super-Resolution", "max_forks_repo_head_hexsha": "f9840f7905584c8a2fa6c79df5a1ee76f935a37e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.0975609756, "max_line_length": 84, "alphanum_fraction": 0.5148514851, "include": true, "reason": "import numpy", "num_tokens": 254}
|
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.setrecursionlimit(10000)
delta = 0.001
def logiMap(x,r):
return x*r*(1-x)
def logisMap(x,r):
if r >= 4:
return 'Complete'
plt.plot(r,x,'o',color='b',markersize=1)
plt.draw()
plt.pause(0.0001)
r += delta
x = logiMap(x,r)
return logisMap(x,r)
logisMap(0.3,2.8)
|
{"hexsha": "b9e65b504f58919fa8961308f26412adb177729b", "size": 382, "ext": "py", "lang": "Python", "max_stars_repo_path": "reccurLogiMap.py", "max_stars_repo_name": "solothinker/miniature-octo-broccoli", "max_stars_repo_head_hexsha": "4f09139a42e603908009a6114d8c6ef5df03bdad", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reccurLogiMap.py", "max_issues_repo_name": "solothinker/miniature-octo-broccoli", "max_issues_repo_head_hexsha": "4f09139a42e603908009a6114d8c6ef5df03bdad", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reccurLogiMap.py", "max_forks_repo_name": "solothinker/miniature-octo-broccoli", "max_forks_repo_head_hexsha": "4f09139a42e603908009a6114d8c6ef5df03bdad", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.6923076923, "max_line_length": 44, "alphanum_fraction": 0.5994764398, "include": true, "reason": "import numpy", "num_tokens": 130}
|
```python
import sympy as sym
from sympy.polys import subresultants_qq_zz
sym.init_printing()
```
The Bezout matrix is a special square matrix associated with two polynomials, introduced by Sylvester (1853) and Cayley (1857) and named after Étienne Bézout. Bézoutian may also refer to the determinant of this matrix, which is equal to the resultant of the two polynomials.
The entries of Bezout matrix are bilinear functions of coefficients of the given polynomials. The Bezout formulation has gone over different generalizations. The most common one is the Cayley.. Cayley's matrix is given by,
$$ \left|
\begin{array}{cc}
p(x) & q(x)\\
p(a)& q(a)
\end{array}
\right| = \Delta(x, a)$$
where $\Delta(x, a)$ is the determinant.
We have the polynomial:
$$ \delta(x, a) = \frac{\Delta(x,a)}{x-a}$$
The matrix is then constructed from the coefficients of polynomial $\alpha$. Each coefficient is viewed as a polynomial of $x_1,..., x_n$.
The Bezout matrix is highly related to the Sylvester matrix and the greatest common divisor of polynomials. Unlike in Sylvester's formulation, where the resultant of $p$ and $q$ is the determinant of an $(m + n) \times (m + n)$ matrix, in the Cayley formulation, the resultant is obtained
as the determinant of a $n \times n$ matrix.
Example: Generic example
------------------------
```python
b_3, b_2, b_1, b_0 = sym.symbols("b_3, b_2, b_1, b_0")
x = sym.symbols('x')
```
```python
b = sym.IndexedBase("b")
```
```python
p = b_2 * x ** 2 + b_1 * x + b_0
q = sym.diff(p, x)
```
```python
subresultants_qq_zz.bezout(p, q, x)
```
Example: Existence of common roots
------------------------------------------
Note that if the system has a common root we are expecting the resultant/determinant to equal to zero.
**A commot root exists.**
```python
# example one
p = x ** 3 +1
q = x + 1
```
```python
subresultants_qq_zz.bezout(p, q, x)
```
```python
subresultants_qq_zz.bezout(p, q, x).det()
```
```python
# example two
p = x ** 2 - 5 * x + 6
q = x ** 2 - 3 * x + 2
```
```python
subresultants_qq_zz.bezout(p, q, x)
```
```python
subresultants_qq_zz.bezout(p, q, x).det()
```
**A common root does not exist.**
```python
z = x ** 2 - 7 * x + 12
h = x ** 2 - x
```
```python
subresultants_qq_zz.bezout(z, h, x).det()
```
Dixon's Resultant
-----------------
Dixon (1908) showed how to extend this formulation to $m = 3$ polynomials in $n = 2$ variables.
In a similar manner but this time,
$$ \left|
\begin{array}{cc}
p(x, y) & q(x, y) & h(x, y) \cr
p(a, y) & q(a, y) & h(b, y) \cr
p(a, b) & q(a, b) & h(a, b) \cr
\end{array}
\right| = \Delta(x, y, \alpha, \beta)$$
where $\Delta(x, y, \alpha, \beta)$ is the determinant.
Thus, we have the polynomial:
$$ \delta(x,y, \alpha, \beta) = \frac{\Delta(x, y, \alpha, \beta)}{(x-\alpha)(y - \beta)}$$
```python
from sympy.polys.multivariate_resultants import DixonResultant
```
Example: Generic example of Dixon $(n=2, m=3)$
---------------------------------------------------
```python
a_1, a_2, b_1, b_2, u_1, u_2, u_3 = sym.symbols('a_1, a_2, b_1, b_2, u_1, u_2, u_3')
```
```python
y = sym.symbols('y')
```
```python
p = a_1 * x ** 2 * y ** 2 + a_2 * x ** 2
q = b_1 * x ** 2 * y ** 2 + b_2 * y ** 2
h = u_1 * x + u_2 * y + u_3
```
```python
dixon = DixonResultant(variables=[x, y], polynomials=[p, q, h])
```
```python
poly = dixon.get_dixon_polynomial()
```
```python
poly
```
```python
matrix = dixon.get_dixon_matrix(poly)
```
```python
matrix
```
```python
matrix.det().factor()
```
Dixon's General Case
--------------------
[Yang et al.](https://rd.springer.com/chapter/10.1007/3-540-63104-6_11) generalized the Dixon resultant method of three polynomials with two variables to the system of $n+1$ polynomials with $n$ variables.
Example: Numerical example
--------------------
```python
p = x + y
q = x ** 2 + y ** 3
h = x ** 2 + y
```
```python
dixon = DixonResultant([p, q, h], (x, y))
```
```python
poly = dixon.get_dixon_polynomial()
poly.simplify()
```
```python
matrix = dixon.get_dixon_matrix(polynomial=poly)
matrix
```
```python
matrix.det()
```
Example: Generic example
---------
```python
a, b, c = sym.symbols('a, b, c')
```
```python
p_1 = a * x ** 2 + b * x * y + (b + c - a) * x + a * y + 3 * (c - 1)
p_2 = 2 * a ** 2 * x ** 2 + 2 * a * b * x * y + a * b * y + b ** 3
p_3 = 4 * (a - b) * x + c * (a + b) * y + 4 * a * b
```
```python
polynomials = [p_1, p_2, p_3]
```
```python
dixon = DixonResultant(polynomials, [x, y])
```
```python
poly = dixon.get_dixon_polynomial()
```
```python
size = len(poly.monoms())
size
```
```python
matrix = dixon.get_dixon_matrix(poly)
matrix
```
Example:
--------------------------------------------------------------------------------------------------
**From [Dixon resultant’s solution of systems of geodetic polynomial equations](https://rd.springer.com/content/pdf/10.1007%2Fs00190-007-0199-0.pdf)**
```python
z = sym.symbols('z')
```
```python
f = x ** 2 + y ** 2 - 1 + z * 0
g = x ** 2 + z ** 2 - 1 + y * 0
h = y ** 2 + z ** 2 - 1
```
```python
dixon = DixonResultant([f, g, h], [y, z])
```
```python
poly = dixon.get_dixon_polynomial()
```
```python
matrix = dixon.get_dixon_matrix(poly)
matrix
```
```python
matrix.det()
```
|
{"hexsha": "158feba0ef191febc900bb157cc02842e4aa8932", "size": 74443, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "examples/notebooks/Bezout_Dixon_resultant.ipynb", "max_stars_repo_name": "utkarshdeorah/sympy", "max_stars_repo_head_hexsha": "dcdf59bbc6b13ddbc329431adf72fcee294b6389", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 8323, "max_stars_repo_stars_event_min_datetime": "2015-01-02T15:51:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T13:13:19.000Z", "max_issues_repo_path": "examples/notebooks/Bezout_Dixon_resultant.ipynb", "max_issues_repo_name": "utkarshdeorah/sympy", "max_issues_repo_head_hexsha": "dcdf59bbc6b13ddbc329431adf72fcee294b6389", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 15102, "max_issues_repo_issues_event_min_datetime": "2015-01-01T01:33:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:53:13.000Z", "max_forks_repo_path": "examples/notebooks/Bezout_Dixon_resultant.ipynb", "max_forks_repo_name": "utkarshdeorah/sympy", "max_forks_repo_head_hexsha": "dcdf59bbc6b13ddbc329431adf72fcee294b6389", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4490, "max_forks_repo_forks_event_min_datetime": "2015-01-01T17:48:07.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T17:24:05.000Z", "avg_line_length": 76.3517948718, "max_line_length": 10492, "alphanum_fraction": 0.7618849321, "converted": true, "num_tokens": 1787}
|
! { dg-do run }
!
! PR fortran/65792
! The evaluation of the argument in the call to new_prt_spec2
! failed to properly initialize the comp component.
! While the array contents were properly copied, the array bounds remained
! uninitialized.
!
! Contributed by Dominique D'Humieres <dominiq@lps.ens.fr>
program main
implicit none
integer, parameter :: n = 2
type :: string_t
character(LEN=1), dimension(:), allocatable :: chars
end type string_t
type :: string_container_t
type(string_t) :: comp
end type string_container_t
type(string_t) :: prt_in, tmp, tmpa(n)
type(string_container_t) :: tmpc, tmpca(n)
integer :: i, j, k
do i=1,2
! scalar elemental function with structure constructor
prt_in = string_t(["D"])
tmpc = new_prt_spec2 (string_container_t(prt_in))
if (any(tmpc%comp%chars .ne. ["D"])) call abort
deallocate (prt_in%chars)
deallocate(tmpc%comp%chars)
! Check that function arguments are OK too
tmpc = new_prt_spec2 (string_container_t(new_str_t(["h","e","l","l","o"])))
if (any(tmpc%comp%chars .ne. ["h","e","l","l","o"])) call abort
deallocate(tmpc%comp%chars)
end do
contains
impure elemental function new_prt_spec2 (name) result (prt_spec)
type(string_container_t), intent(in) :: name
type(string_container_t) :: prt_spec
prt_spec = name
end function new_prt_spec2
function new_str_t (name) result (prt_spec)
character (*), intent(in), dimension (:) :: name
type(string_t) :: prt_spec
prt_spec = string_t(name)
end function new_str_t
end program main
|
{"hexsha": "a50d6965cabb317b5e064fd83bf7a6c78065a78c", "size": 1588, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "msp430-gcc-tics/msp430-gcc-7.3.1.24-source-full/gcc/gcc/testsuite/gfortran.dg/derived_constructor_comps_5.f90", "max_stars_repo_name": "TUDSSL/TICS", "max_stars_repo_head_hexsha": "575ed1b34403b435540bc946c2e6dc5b6bf13072", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-05-02T17:34:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-17T10:15:18.000Z", "max_issues_repo_path": "msp430-gcc-tics/msp430-gcc-7.3.1.24-source-full/gcc/gcc/testsuite/gfortran.dg/derived_constructor_comps_5.f90", "max_issues_repo_name": "TUDSSL/TICS", "max_issues_repo_head_hexsha": "575ed1b34403b435540bc946c2e6dc5b6bf13072", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "msp430-gcc-tics/msp430-gcc-7.3.1.24-source-full/gcc/gcc/testsuite/gfortran.dg/derived_constructor_comps_5.f90", "max_forks_repo_name": "TUDSSL/TICS", "max_forks_repo_head_hexsha": "575ed1b34403b435540bc946c2e6dc5b6bf13072", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-07-27T00:22:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-01T09:41:02.000Z", "avg_line_length": 26.4666666667, "max_line_length": 80, "alphanum_fraction": 0.6914357683, "num_tokens": 452}
|
using Colors
include("helpers/listing.jl")
isinstalled(pkg) = try Pkg.installed(pkg) != nothing catch e false end
function main(window)
push!(window.assets, "widgets")
push!(window.assets, "codemirror")
push!(window.assets, "tex")
vbox(
title(3, "Typography"),
vskip(1em),
md"Escher provides primitives that directly map to CSS font styling properties, as well as higher-level functions which form a standard typographic scale you can use to give your documents a consistent aesthetic.",
vskip(1em),
h1("High-level Functions"),
md"""These use styles from $(Pkg.dir("Escher"))/assets/font.css by default""", # It would be better to have the served asset directory instead
vskip(1em),
h2("Block Quotes"),
md"Block quotes is a style used when quoting large sections of text. They are also available within markdown by prepending a section with `>` ",
vskip(1em),
listing("""vbox(
blockquote(md" `map` is a function that that applies a given **function** to each element of a **list**, returning a **list of results**."),
md"e.g. `map(x->x*x, 1:4)` evaluates to \$(string(map(x->x*x, 1:4)))",
)
"""),
vskip(1em),
h2("Titles and Headings"),
"Useful things you might use if writing an article like this one",
vskip(1em),
listing("""vbox(
title(4,"Title 4"),
map(n->title(n,"Title \$n"), 3:-1:1 )...,
heading(1,"Heading 1"),
map(n->heading(n,"Heading \$n"), 2:4 )...,
"Body Text",
h1( "h1(x) is short for header(1,x)" ),
h2( "h2" ),
h3( "h3" ),
h4( "h4" ),
md\"\"\"# Heading 1 in Markdown
## Heading 2 in Markdown
### Heading 3 in Markdown
#### Heading 4 in Markdown\"\"\",
)"""),
h2("Code"),
md"""To show code with syntax highlighting, you can use the `codemirror` function. Codemirror requires the `"codemirror"` asset.
Use `push!(window.assets, "codemirror")` to do this.""",
vskip(1em),
listing("""
push!(window.assets, "codemirror")
codemirror(\"\"\"
function foo()
42
end
\"\"\"
)
""",
codemirror("""
function foo()
42
end
"""
)
),
md"""Note that you also need the `"codemirror"` asset to be loaded even if you are writing code inside `md`.""",
vskip(1em),
h2("LaTeX"),
md"LaTeX strings can be rendered with the `tex` function.",
vskip(1em),
listing("""
tex("(a+b)^2=a^2+b^2+2ab")"""),
vskip(1em),
h1("Low-level Functions"),
"Sometimes you want more explicit control over your document",
vskip(1em),
h2("Weights and Types"),
md"Many classes of fonts are available, often with many weights. By default, Escher uses the [*Source Sans Pro*](http://www.google.com/fonts/specimen/Source+Sans+Pro) (sans-serif) and [*Source Code Pro*](http://www.google.com/fonts/specimen/Source+Code+Pro) (monospaced) font families for great-looking and legible type.",
md"`fontweight` can take integer multiples of 100 from 100 to 900, or values such as `bold`, `bolder`, and `lighter` ",
md"`fonttype` arguments include `serif`, `sansserif`, `slabserif`, and `monospace`.",
vskip(1em),
listing("""hbox(intersperse(hskip(1em),[
vbox(map(x->fontweight(x,"Sans \$x."), 100:200:700 )...),
vbox(map(x->fontweight(x,"Mono \$x.") |> fonttype(monospace), 300:200:700 )...),
vbox(map(x->fontweight(x,"Serif \$x.") |> fonttype(serif), 500:200:700 )...),
fontweight(bold, "Bold flavored text")
]))"""),
vskip(1em),
h2("Styling and Alignment"),
"For those time where your squiggly lines are leaning, or if you want to write on the other side",
md"Valid alignments are `raggedright`, `raggedleft`, `justifytext`, and `centertext`",
md"note that `fontcase` only changes the style, and not the content of the text. Valid arguments are `ucase` and `lcase`",
vskip(1em),
listing("""vbox(
fontstyle(italic,"italics is a CSS class used for styling text"),
emph("emphasis is an HTML tag for adding semantic emphasis to a word or phrase"),
textalign(centertext,fontcase(ucase, "Loudness equals power." )),
fontcase(lcase, "Speak softly, and carry a BIG stick." ) |> textalign(raggedleft),
lineheight(80px,"This text has a high lineheight"),
)"""),
vskip(1em),
h2("Colors and Families"),
md"You can also change the color of text with *fontcolor* and [Colors.jl](https://github.com/JuliaGraphics/Colors.jl). You may change the family with *fontfamily*",
vskip(1em),
listing("""using Colors
hbox(
map(h->fontcolor(LCHab(50,200,h),"nya "),20:20:320)...,
fontcolor(colorant"magenta","ncat :3")
) |> fontfamily("Ubuntu")"""),
vskip(1em),
h2("Size"),
md"```fontsize``` accepts many units of length such as `pt` and `px`, as well as keywords `((x)x)large|small` and `medium`",
listing("""vbox(
Escher.fontsize(xxlarge,"Big Text"),
Escher.fontsize(36pt,"Bigger Text"),
Escher.fontsize(48px,"Bigger Text"),
)"""),
vskip(1em),
title(3, "Embellishment"),
vskip(1em),
md"A border may be styled by many functions. `borderstyle` may accept `solid`, `dashed`, `dotted`, and `noborder`. `fillcolor` and `bordercolor` can accept any color defined in colors.jl, and `borderwidth` may accept anything of type Length such as `em` or `px`.",
listing("""greybox = container(12em,2.5em) |> fillcolor(colorant"grey") |> borderwidth(0.4em)
vbox(
greybox,
greybox |> bordercolor(colorant"orange") |> borderstyle(solid),
greybox |> borderstyle(solid),
greybox |> borderstyle(dotted),
greybox |> borderstyle(dashed),
)"""),
vskip(1em),
md"You may specify a border all at once with `border`. It takes a list of sides (optionally), a style, a width, and a color.",
listing("""image(
"https://upload.wikimedia.org/wikipedia/commons/thumb/5/5f/Utah_teapot_simple_2.png/220px-Utah_teapot_simple_2.png",
alt="Teapot",
) |> border([right,top,left,bottom],dashed,2px,colorant"#2D2E30")"""),
vskip(1em),
md"`hline` and `vline` are very thin, bordered elements that are inteded for use as a separator. It is quite useful within a flow: `vline` within a `hbox` and `hline` in a `vbox`. As with any other bordered element, they can be styled accordingly",
listing("""vbox(
paper(4,hbox(
[
flex("Column A"),
vline(),
flex("Column B")
]
)),
vskip(4em),
paper(4,vbox(
[
"Row 1",
hline() |> borderwidth(3px),
"Row 2"
]
)),
)"""),
) |> pad(2em)
end
|
{"hexsha": "252bbd1323ccdd67aacb445c6ac68f6793a2df92", "size": 7025, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/user-guide/theme.jl", "max_stars_repo_name": "JuliaPackageMirrors/Escher.jl", "max_stars_repo_head_hexsha": "72c0f85c0908b1d08c47ebe75ef045462220a197", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/user-guide/theme.jl", "max_issues_repo_name": "JuliaPackageMirrors/Escher.jl", "max_issues_repo_head_hexsha": "72c0f85c0908b1d08c47ebe75ef045462220a197", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/user-guide/theme.jl", "max_forks_repo_name": "JuliaPackageMirrors/Escher.jl", "max_forks_repo_head_hexsha": "72c0f85c0908b1d08c47ebe75ef045462220a197", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1428571429, "max_line_length": 330, "alphanum_fraction": 0.5937366548, "num_tokens": 1918}
|
[STATEMENT]
lemma map_of_eqI:
assumes set_eq: "set (map fst xs) = set (map fst ys)"
assumes map_eq: "\<forall>k\<in>set (map fst xs). map_of xs k = map_of ys k"
shows "map_of xs = map_of ys"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_of xs = map_of ys
[PROOF STEP]
proof (rule ext)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. map_of xs x = map_of ys x
[PROOF STEP]
fix k
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x. map_of xs x = map_of ys x
[PROOF STEP]
show "map_of xs k = map_of ys k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. map_of xs k = map_of ys k
[PROOF STEP]
proof (cases "map_of xs k")
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. map_of xs k = None \<Longrightarrow> map_of xs k = map_of ys k
2. \<And>a. map_of xs k = Some a \<Longrightarrow> map_of xs k = map_of ys k
[PROOF STEP]
case None
[PROOF STATE]
proof (state)
this:
map_of xs k = None
goal (2 subgoals):
1. map_of xs k = None \<Longrightarrow> map_of xs k = map_of ys k
2. \<And>a. map_of xs k = Some a \<Longrightarrow> map_of xs k = map_of ys k
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
map_of xs k = None
[PROOF STEP]
have "k \<notin> set (map fst xs)"
[PROOF STATE]
proof (prove)
using this:
map_of xs k = None
goal (1 subgoal):
1. k \<notin> set (map fst xs)
[PROOF STEP]
by (simp add: map_of_eq_None_iff)
[PROOF STATE]
proof (state)
this:
k \<notin> set (map fst xs)
goal (2 subgoals):
1. map_of xs k = None \<Longrightarrow> map_of xs k = map_of ys k
2. \<And>a. map_of xs k = Some a \<Longrightarrow> map_of xs k = map_of ys k
[PROOF STEP]
with set_eq
[PROOF STATE]
proof (chain)
picking this:
set (map fst xs) = set (map fst ys)
k \<notin> set (map fst xs)
[PROOF STEP]
have "k \<notin> set (map fst ys)"
[PROOF STATE]
proof (prove)
using this:
set (map fst xs) = set (map fst ys)
k \<notin> set (map fst xs)
goal (1 subgoal):
1. k \<notin> set (map fst ys)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
k \<notin> set (map fst ys)
goal (2 subgoals):
1. map_of xs k = None \<Longrightarrow> map_of xs k = map_of ys k
2. \<And>a. map_of xs k = Some a \<Longrightarrow> map_of xs k = map_of ys k
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
k \<notin> set (map fst ys)
[PROOF STEP]
have "map_of ys k = None"
[PROOF STATE]
proof (prove)
using this:
k \<notin> set (map fst ys)
goal (1 subgoal):
1. map_of ys k = None
[PROOF STEP]
by (simp add: map_of_eq_None_iff)
[PROOF STATE]
proof (state)
this:
map_of ys k = None
goal (2 subgoals):
1. map_of xs k = None \<Longrightarrow> map_of xs k = map_of ys k
2. \<And>a. map_of xs k = Some a \<Longrightarrow> map_of xs k = map_of ys k
[PROOF STEP]
with None
[PROOF STATE]
proof (chain)
picking this:
map_of xs k = None
map_of ys k = None
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
map_of xs k = None
map_of ys k = None
goal (1 subgoal):
1. map_of xs k = map_of ys k
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
map_of xs k = map_of ys k
goal (1 subgoal):
1. \<And>a. map_of xs k = Some a \<Longrightarrow> map_of xs k = map_of ys k
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>a. map_of xs k = Some a \<Longrightarrow> map_of xs k = map_of ys k
[PROOF STEP]
case (Some v)
[PROOF STATE]
proof (state)
this:
map_of xs k = Some v
goal (1 subgoal):
1. \<And>a. map_of xs k = Some a \<Longrightarrow> map_of xs k = map_of ys k
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
map_of xs k = Some v
[PROOF STEP]
have "k \<in> set (map fst xs)"
[PROOF STATE]
proof (prove)
using this:
map_of xs k = Some v
goal (1 subgoal):
1. k \<in> set (map fst xs)
[PROOF STEP]
by (auto simp add: dom_map_of_conv_image_fst [symmetric])
[PROOF STATE]
proof (state)
this:
k \<in> set (map fst xs)
goal (1 subgoal):
1. \<And>a. map_of xs k = Some a \<Longrightarrow> map_of xs k = map_of ys k
[PROOF STEP]
with map_eq
[PROOF STATE]
proof (chain)
picking this:
\<forall>k\<in>set (map fst xs). map_of xs k = map_of ys k
k \<in> set (map fst xs)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
\<forall>k\<in>set (map fst xs). map_of xs k = map_of ys k
k \<in> set (map fst xs)
goal (1 subgoal):
1. map_of xs k = map_of ys k
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
map_of xs k = map_of ys k
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
map_of xs k = map_of ys k
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1997, "file": null, "length": 27}
|
# python3
# pylint: disable=g-bad-file-header
# Copyright 2021 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for efficient_agent.neural_testbed.generative.nt_kernels."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import haiku as hk
import jax
import jax.config
import jax.numpy as jnp
from neural_testbed.generative import nt_kernels
# TODO(author1): move this config update to an explicit initialize function.
jax.config.update('jax_enable_x64', True)
class NtKernelsTest(parameterized.TestCase):
@parameterized.parameters([[x] for x in range(10)])
def test_benchmark_kernel(self, seed: int):
# Generate benchmark kernel
kernel_fn = nt_kernels.make_benchmark_kernel()
rng = hk.PRNGSequence(seed)
# Evaluate at random x in 1D
x = jax.random.normal(next(rng), [1000, 1])
kernel = kernel_fn(x, x, 'nngp')
adjusted_kernel = kernel + 1e-6 * jnp.eye(len(kernel))
# Check that posterior sample non-nan
for _ in range(10):
sample = jax.random.multivariate_normal(
next(rng), jnp.zeros(len(kernel)), adjusted_kernel)
assert jnp.all(~jnp.isnan(sample))
@parameterized.parameters(
itertools.product(range(10), [1, 10], ['nngp', 'ntk']))
def test_kernel_matrix(self, seed: int, input_dim: int, method: str):
"""Checks that the kernel matrix is symmetric and positive semi-definite."""
def is_symmetric(x: jnp.ndarray, rtol: float = 1e-05, atol: float = 1e-08):
return jnp.allclose(x, x.T, rtol=rtol, atol=atol)
def is_pos_semi_definite(x: jnp.ndarray):
return jnp.all(jnp.linalg.eigvals(x) >= 0)
# Generate benchmark kernel
kernel_fn = nt_kernels.make_benchmark_kernel()
rng = hk.PRNGSequence(seed)
# Evaluate at random x
x = jax.random.normal(next(rng), [100, input_dim])
kernel = kernel_fn(x, x, method)
# Check that the kernel is symmetric, positive semi-definite
assert is_symmetric(kernel)
assert is_pos_semi_definite(kernel)
if __name__ == '__main__':
absltest.main()
|
{"hexsha": "2c6b3ce6fd66b80ec0efd682295c243a0cbeb127", "size": 2702, "ext": "py", "lang": "Python", "max_stars_repo_path": "neural_testbed/generative/nt_kernels_test.py", "max_stars_repo_name": "SamuelGabriel/neural_testbed", "max_stars_repo_head_hexsha": "cc2e3de49c29f29852c8cd5885ab54fb6e664e2e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "neural_testbed/generative/nt_kernels_test.py", "max_issues_repo_name": "SamuelGabriel/neural_testbed", "max_issues_repo_head_hexsha": "cc2e3de49c29f29852c8cd5885ab54fb6e664e2e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "neural_testbed/generative/nt_kernels_test.py", "max_forks_repo_name": "SamuelGabriel/neural_testbed", "max_forks_repo_head_hexsha": "cc2e3de49c29f29852c8cd5885ab54fb6e664e2e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.775, "max_line_length": 80, "alphanum_fraction": 0.7024426351, "include": true, "reason": "import jax", "num_tokens": 672}
|
/*
* Copyright (c) 2014, Stanislav Vorobiov
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "PowerGenComponent.h"
#include "Scene.h"
#include "SceneObjectFactory.h"
#include "Settings.h"
#include "Utils.h"
#include "Const.h"
#include "FadeOutComponent.h"
#include "RenderHealthbarComponent.h"
#include "AssetManager.h"
#include "SequentialTweening.h"
#include "SingleTweening.h"
#include "af/Utils.h"
#include <boost/make_shared.hpp>
namespace af
{
PowerGenComponent::PowerGenComponent(const RenderQuadComponentPtr& rc,
const RenderQuadComponentPtr& lampRc,
const LightPtr& light,
const LightPtr fireLight[3])
: PhasedComponent(phasePreRender),
rc_(rc),
lampRc_(lampRc),
light_(light),
dying_(false),
wasFreezable_(false)
{
fireLight_[0] = fireLight[0];
fireLight_[1] = fireLight[1];
fireLight_[2] = fireLight[2];
fireTime_[0] = 0.0001f;
fireTime_[1] = 0.5f;
fireTime_[2] = 1.0f;
}
PowerGenComponent::~PowerGenComponent()
{
}
void PowerGenComponent::accept(ComponentVisitor& visitor)
{
visitor.visitPhasedComponent(shared_from_this());
}
void PowerGenComponent::preRender(float dt)
{
if (dying_) {
bool done = true;
for (int i = 0; i < 3; ++i) {
if (fireTime_[i] > 0.0f) {
done = false;
fireTime_[i] -= dt;
if (fireTime_[i] <= 0.0f) {
fireTime_[i] = 0.0f;
light_->setVisible(false);
SceneObjectPtr explosion = sceneObjectFactory.createExplosion1(zOrderExplosion);
explosion->setPos(parent()->getWorldPoint(fireLight_[i]->pos()));
scene()->addObject(explosion);
fireLight_[i]->setVisible(true);
ParticleEffectComponentPtr pec = assetManager.getParticleEffect("fire1.p",
b2Vec2_zero, 0.0f, false);
pec->setTransform(b2Transform(fireLight_[i]->pos(), b2Rot(0.0f)));
pec->setFixedAngle(true);
pec->setZOrder(zOrderEffects);
pec->resetEmit();
parent()->addComponent(pec);
}
}
}
if (done) {
parent()->setFreezable(wasFreezable_);
removeFromParent();
}
return;
}
if (parent()->life() <= 0) {
wasFreezable_ = parent()->freezable();
parent()->setFreezable(false);
dying_ = true;
RenderHealthbarComponentPtr hc = parent()->findComponent<RenderHealthbarComponent>();
if (hc) {
hc->removeFromParent();
}
return;
}
float value = tweening_->getValue(tweenTime_);
rc_->setPos(pos_[0] + b2Vec2(0.0f, value));
lampRc_->setPos(pos_[1] + b2Vec2(0.0f, value));
light_->setPos(pos_[2] + b2Vec2(0.0f, value));
tweenTime_ += dt;
}
void PowerGenComponent::onRegister()
{
pos_[0] = rc_->pos();
pos_[1] = lampRc_->pos();
pos_[2] = light_->pos();
SequentialTweeningPtr tweening = boost::make_shared<SequentialTweening>(true);
tweening->addTweening(boost::make_shared<SingleTweening>(0.05f, EaseLinear, -0.08f, 0.08f));
tweening->addTweening(boost::make_shared<SingleTweening>(0.05f, EaseLinear, 0.08f, -0.08f));
tweening_ = tweening;
tweenTime_ = getRandom(0.0f, tweening->duration());
}
void PowerGenComponent::onUnregister()
{
}
}
|
{"hexsha": "2c7b5f0bc9943ffc8e53626fb839ee0e78cbacd8", "size": 5081, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "game/PowerGenComponent.cpp", "max_stars_repo_name": "Sheph/TriggerTime", "max_stars_repo_head_hexsha": "9265dee6a178e43bf7365e3aa2f7f2ca22df074f", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 24.0, "max_stars_repo_stars_event_min_datetime": "2019-02-24T14:48:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T21:37:26.000Z", "max_issues_repo_path": "game/PowerGenComponent.cpp", "max_issues_repo_name": "Sheph/TriggerTime", "max_issues_repo_head_hexsha": "9265dee6a178e43bf7365e3aa2f7f2ca22df074f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2019-02-25T20:45:09.000Z", "max_issues_repo_issues_event_max_datetime": "2019-02-28T18:05:45.000Z", "max_forks_repo_path": "game/PowerGenComponent.cpp", "max_forks_repo_name": "Sheph/TriggerTime", "max_forks_repo_head_hexsha": "9265dee6a178e43bf7365e3aa2f7f2ca22df074f", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 7.0, "max_forks_repo_forks_event_min_datetime": "2019-02-28T11:33:15.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-26T20:11:45.000Z", "avg_line_length": 32.3630573248, "max_line_length": 104, "alphanum_fraction": 0.603818146, "num_tokens": 1201}
|
function [mopt, vopt, avals, mvals, vvals] = ...
scimat_optimal_intersecting_plane(scimat, m0, v0, params)
% SCIMAT_OPTIMAL_INTERSECTING_PLANE Optimise intersection plane for SCIMAT
% segmentation mask.
%
% [MOPT, VOPT, AVALS, MVALS, VVALS] = ...
% scimat_optimal_intersecting_plane(SCIMAT, M0, V0, PARAMS)
%
% This function computes the plane that intersects a SCIMAT segmentation
% mask in a way that minimizes the segmentation area intersected by the
% plane. That is, in some sense in finds the plane more orthogonal to the
% segmented volume.
%
% (Note that the area is computed on the convex hull of the plane
% intersection with the volume.)
%
% SCIMAT is the struct with the volume that we want to intersect (see
% "help scimat" for details).
%
% M0 is the rotation centroid. This centroid will not change.
%
% V0 is a 3-vector that describes the normal vector to the initial
% intersecting plane. By default, the initial plane is horizontal.
%
% PARAMS is a struct with optimisation parameters:
%
% * PARAMS.TYPE is a string. 'local' (default) means that the
% intersected area will be minimised using multidimensional unconstrained
% nonlinear minimization (Nelder-Mead, fminsearch). 'global' means that
% area values will be systematically computed over a range of plane
% inclination values.
%
% * PARAMS.RAD can be a scalar or 2-vector:
%
% scalar: RAD is the radius of a 2D smoothing disk. The 2D image
% obtained from intersecting the 3D volume by the plane at
% each optimization iteration will be smoothed out using the
% disk before computing the area.
% 2-vector: The smoothing element is a 3D ball. Instead of smoothing
% a 2D image at each iteration, the whole 3D image volume is
% smoothed with the ball at the beginning. RAD(1) is the ball
% radius in the XY-plane. RAD(2) is the ball height.
%
% * PARAMS.RANGE (global optimisation only): Angular range. The azimuth
% of V0 will be changed RANGE(1) rad in any direction to compute
% area values. The elevation of V0 will be changed RANGE(2) rad.
% (Default RANGE(1) = RANGE(2) = 0.5236 rad = 30º).
%
% * PARAMS.N (global optimisation only): Number of azimuth (N(1)) or
% elevation (N(2)) samples. (Default N(1) = N(2) = 61).
%
% MOPT is the centroid of the optimal intersection.
%
% VOPT is the normal vector to the optimal plane.
%
% Note that because M0 and MOPT are both contained in the optimal plane,
% the duples (M0, VOPT) and (MOPT, VOPT) define the same optimal plane.
%
% AVALS is a vector with the record of area values from the optimization.
%
% MVALS is a volume with the coordinates of M at the different tested
% values. If optimisation is 'local', then MVALS is a matrix where each
% column is the centroid at each iteration step. If optimisation is
% 'global', then MVALS is a volume where MVALS(T,P,:) is the centroid for
% the normal vector VVALS(T,P,:).
%
% VVALS is a volume like MVALS, only for the normal vectors.
% Author(s): Ramon Casero <rcasero@gmail.com>, Vicente Grau
% Copyright © 2010, 2014 University of Oxford
% Version: 0.2.0
%
% University of Oxford means the Chancellor, Masters and Scholars of
% the University of Oxford, having an administrative office at
% Wellington Square, Oxford OX1 2JD, UK.
%
% This file is part of Gerardus.
%
% This program is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details. The offer of this
% program under the terms of the License is subject to the License
% being interpreted in accordance with English Law and subject to any
% action against the University of Oxford being under the jurisdiction
% of the English Courts.
%
% You should have received a copy of the GNU General Public License
% along with this program. If not, see <http://www.gnu.org/licenses/>.
%% Checks and initialization
% check arguments
narginchk(2, 4) ;
nargoutchk(0, 5) ;
% defaults
if (nargin < 3 || isempty(v0))
v0 = [0 0 1];
end
if (nargin < 4 || isempty(params))
params.type = 'local';
params.rad = [];
params.range = [30 30] / 180 * pi;
params.n = [61 61];
se = [];
end
if (~isfield(params, 'type'))
params.type = 'local';
end
if (~isfield(params, 'rad'))
params.rad = [];
se = [];
end
if (~isfield(params, 'range'))
params.range = [30 30] / 180 * pi;
end
if (~isfield(params, 'n'))
params.n = [61 61];
end
% prevent user entering rotation matrix instead of initial vector by
% mistake
if (size(v0, 2) ~= 1 || size(v0, 1) ~= 3)
error('V0 must be a column 3-vector')
end
% remove the dummy dimension and convert image data to double
scimat = scimat_squeeze(scimat, true);
% convert radius from real world size into number of pixels
params.rad = round(...
params.rad ./ [scimat.axis(1:length(params.rad)).spacing]);
% create disk for dilation/erosion if we are going to smooth in 2D
if (length(params.rad) == 1)
se = strel('disk', params.rad);
end
% 3D smoothing of the segmentation edges
if (length(params.rad) == 2)
se = strel('ball', params.rad(1), params.rad(2));
scimat.data = imdilate(scimat.data, se);
scimat.data = imerode(scimat.data, se);
end
% generate 3D grid of coordinates
[x, y, z] = scimat_ndgrid(scimat);
% % DEBUG: compute intersection of SCIMAT volume with the initial plane
% % (if you want to visualize the image as in Seg3D, you need to do 'axis
% % xy')
% im = scimat_intersect_plane(scimat, m0, v0, x, y, z);
%% Optimisation of the intersection area
% convert Cartesian coordinates into spherical coordinates (length has to
% be one); note: we use phi for azimuth, and theta for elevation, contrary
% to Matlab's naming convention
[phi0, theta0] = cart2sph(v0(1), v0(2), v0(3));
% group spherical coordinates into vector
alpha0 = [phi0, theta0];
% init variables to keep track of the evolution of area values in the
% optimisation
avals = [];
mvals = [];
vvals = [];
% local or global optimisation
if strcmp(params.type, 'local')
% run optimisation to find minimum area; note that v0 is the only
% variable optimised, but the rest (scimat, x, y, z, m, rad, se) are
% available to segmented_area_of_intersection() because the latter is a
% subfunction
alpha = fminsearch(@segmented_area_of_intersection, alpha0);
% convert result from spherical to Carterian coordinates
[aux1 aux2 aux3] = sph2cart(alpha(1), alpha(2), 1.0);
vopt = [aux1 aux2 aux3];
% final centroid of the intersecting plane
mopt = mvals(:, end);
elseif strcmp(params.type, 'global')
% interval of azimuth angle values, phi \in [-180º, 180º] or \in
% [0, 360º]
phimin = phi0 - abs(params.range(1));
phimax = phi0 + abs(params.range(1));
% interval of elevation angle values, theta \in [-90º, 90º]
thmin = max(theta0 - abs(params.range(2)), -pi/2);
thmax = min(theta0 + abs(params.range(2)), pi/2);
% sample angle intervals
phivals = linspace(phimin, phimax, params.n(1));
thetavals = linspace(thmin, thmax, params.n(2));
% create matrices to save outputs; note that for each area value we
% need to save a 3-vector with the rotation point, and a 3-vector with
% the normal plane
avals = zeros(length(thetavals), length(phivals));
mvals = zeros(length(thetavals), length(phivals), 3);
vvals = zeros(length(thetavals), length(phivals), 3);
% compute area for each combination of elevation and azimuth angles
for T = 1:length(thetavals) % elevation
for P = 1:length(phivals) % azimuth
[a, mnew, v] = segmented_area_of_intersection(...
[phivals(P) thetavals(T)]);
% put values in output matrices
avals(T, P) = a;
mvals(T, P, :) = mnew;
vvals(T, P, :) = v;
end
end
% find minimum area
[foo, idx] = min(avals(:));
% convert linear index to multiple subscripts
[T, P] = ind2sub(size(avals), idx);
% output optimal plane
mopt = squeeze(mvals(T, P, :));
vopt = squeeze(vvals(T, P, :));
else
error(['Optimisation type not implemented: ' params.type])
end
%% Objective function (the function we are trying to minimise)
% rotate plane, intersect with image, and compute segmented area
function [a, mnew, v] = segmented_area_of_intersection(alpha)
% convert spherical to Carterian coordinates
[aux1 aux2 aux3] = sph2cart(alpha(1), alpha(2), 1.0);
v = [aux1 aux2 aux3]';
% vector cannot be zero
if (norm(v) == 0)
error('Normal vector to plane cannot be (0,0,0)')
end
% this function cannot deal with vertical planes, because of a
% singularity
if (v(3) == 0)
error('Intersecting plane cannot be vertical')
end
% compute intersection of plane with volume
[im, zp, xp, yp] = scimat_intersect_plane(scimat, m0, v, x, y, z);
% 2D smoothing of the segmentation edges
if (length(params.rad) == 1)
im = imdilate(im, se);
im = imerode(im, se);
end
% % DEBUG: plot rotated plane
% hold off
% plot3(xp(:), yp(:), zp(:), '.r')
% find segmented voxels in the 2D cut
idx = find(im);
% get coordinates of segmented voxels
xps = xp(idx);
yps = yp(idx);
zps = zp(idx);
% % DEBUG: visualize intersection projected onto horizontal plane
% hold off
% imagesc(xp(:), yp(:), im > 0)
% hold on
% % DEBUG: compute and plot convex hull
% idx2 = convhull(xps, yps);
% vxs = xps(idx2);
% vys = yps(idx2);
% plot(vxs, vys, 'w')
% xlabel('x (m)')
% ylabel('y (m)')
% pause
% compute a rotation matrix from the Cartesian system to the
% rotated plane
rotmat = vec2rotmat(v);
% we are now seeing the rotated plane projected onto the horizontal
% plane, i.e. we see the segmentation mask in perspective.
% In order to see the true area of the segmentation mask, we need
% to change the system of coordinates so that the rotated plane
% becames the XY plane
% first, move segmented points so that centroid is at (0,0,0)...
xps = xps - m0(1);
yps = yps - m0(2);
zps = zps - m0(3);
% ...second, make the rotated plane horizontal, by inverting the
% rotation...
xyzps = [xps(:) yps(:) zps(:)] * rotmat;
xps = xyzps(:, 1);
yps = xyzps(:, 2);
zps = xyzps(:, 3);
% if everything has gone alright, then the z-coordinate of xyzp
% should be zero (+numerical errors), because the rotated plane is
% now the XY plane
assert(abs(min(zps)) < 1e-10)
assert(abs(max(zps)) < 1e-10)
% % DEBUG: visualize segmentation mask in real world coordinates
% hold off
% plot(xps + m0(1), yps + m0(2), 'r*')
% axis ij
% compute convex hull (reuse idx2): note convex hull coordinates
% are on projected space
idx2 = convhull(xps, yps);
vxs = xps(idx2);
vys = yps(idx2);
% compute x-,y-coordinates centroid and area of polygon
[mnew, a] = polycenter(vxs, vys);
mnew(3) = 0;
% the centroid is now on projected coordinates, but we need to put
% it back on the real world coordinates
mnew = rotmat * mnew';
mnew = (mnew' + m0)';
% for the global algorithm, values are recorded in a different way
if strcmp(params.type, 'local')
% kept track of optimisation evolution
avals = [avals a];
vvals = [vvals v];
mvals = [mvals mnew];
end
end
end
|
{"author": "vigente", "repo": "gerardus", "sha": "4d7c5195b826967781f1bb967872410e66b7cd3d", "save_path": "github-repos/MATLAB/vigente-gerardus", "path": "github-repos/MATLAB/vigente-gerardus/gerardus-4d7c5195b826967781f1bb967872410e66b7cd3d/matlab/FiltersToolbox/scimat_optimal_intersecting_plane.m"}
|
import Dyno
filename = joinpath(rootdir,"examples","rbc.mod")
model_data = Dyno.modfile_parser(filename)
# print(model_data)
|
{"hexsha": "a283b94af05eafb4c22d6a5863abc7ad25b3f835", "size": 128, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/test_model_import.jl", "max_stars_repo_name": "JuliaTagBot/Dyno.jl", "max_stars_repo_head_hexsha": "4e3f89c27418f16debe791eb0c1c1e78c89f7848", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-11-02T13:18:00.000Z", "max_stars_repo_stars_event_max_datetime": "2017-11-02T13:18:00.000Z", "max_issues_repo_path": "test/test_model_import.jl", "max_issues_repo_name": "JuliaTagBot/Dyno.jl", "max_issues_repo_head_hexsha": "4e3f89c27418f16debe791eb0c1c1e78c89f7848", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-01-15T07:14:12.000Z", "max_issues_repo_issues_event_max_datetime": "2018-01-17T18:57:34.000Z", "max_forks_repo_path": "test/test_model_import.jl", "max_forks_repo_name": "JuliaTagBot/Dyno.jl", "max_forks_repo_head_hexsha": "4e3f89c27418f16debe791eb0c1c1e78c89f7848", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:02:20.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-08T11:02:20.000Z", "avg_line_length": 16.0, "max_line_length": 49, "alphanum_fraction": 0.765625, "num_tokens": 32}
|
from TwoDimLookup_motor import TwoDimLookup_motor
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from scipy import interpolate
import numpy as np
### Input Parameters
# C_F = 27000 # Cornering stiffness front / Schräglaufsteifigkeit vorne [N/rad] - Is already for two wheels !
# C_R = 35000 # Cornering stiffness rear / Schräglaufsteifigkeit hinten [N/rad] - Is already for two wheels !
# m = 350 # Mass vehicle + driver [kg]
# CoG_X = 0.65 # Actual position of CG (0 at front axis, 1 at rear axis)
#
# mu = 1.4 # Friction coefficient
# alpha = 7.5 # Slip angle [deg]
# DriveType = 2WD or 4WD
# res = resolution = number of data points
class GG_ESB_OneDim:
def __init__(self, C_F, C_R, m, CoG_X, mu, alpha, DriveType):
self.C_F = C_F
self.C_R = C_R
self.m = m
self.CoG_X = CoG_X
self.mu = mu
self.alpha = alpha
self.DriveType = DriveType
# General constants
self.deg2rad = np.pi/180
self.g = 9.81
def GG_ESB_ay_Max(self):
# calculate ay max
FY_F = self.alpha * self.deg2rad * self.C_F
FY_R = self.alpha * self.deg2rad * self.C_R
FY_ovr = FY_F + FY_R
ay_max = FY_ovr / self.m
#ay_max = 14
return ay_max
def GG_ESB_ax_Max(self):
# calculate ax max
FZ_F = (1-self.CoG_X) * self.m * self.g
FZ_R = self.CoG_X * self.m * self.g
FX_F = self.mu * FZ_F
FX_R = self.mu * FZ_R
if self.DriveType == '2WD':
FX_ovr = FX_R
elif self.DriveType == '4WD':
FX_ovr = FX_F + FX_R
ax_max = FX_ovr / self.m
#ax_max = 7
return ax_max
def Plot_gg(self, ay_max, ax_max):
# Calculate values
resolution = 1000
ay = np.linspace((-1)*ay_max, ay_max, resolution)
ax_upper =((1-(ay**2 / ay_max**2)) * ax_max**2)**0.5
if self.DriveType == '2WD':
ax_lower = (-2) * ax_upper
if self.DriveType == '4WD':
ax_lower = (-1) * ax_upper
ay = ay /self.g
ax_U_inG = np.asarray(ax_upper)/self.g
ax_L_inG = np.asarray(ax_lower)/self.g
# # Plot g-g diagram
# plt.plot(ay, ax_U_inG , 'b')
# plt.plot(ay, ax_L_inG, 'b')
# plt.xlabel('ay [g]' + '\n' + ('ay max: ' + str(np.round(ay_max/self.g,2)) + ' g'), fontsize = 16)
# plt.ylabel('ax [g]' + '\n' + ('ax max: ' + str(np.round(ax_max/self.g,2)) + ' g'), fontsize = 16)
# plt.title('g-g diagram', fontsize = 20, y=1.03)
# plt.grid(True)
# plt.show()
########################################################
class GG_ESB_TwoDim:
def __init__(self, C_F, C_R, m, CoG_X, mu, alpha, CoP_X, C_la, rho, DriveType, gearRatio, tireRadius, fr, Lift2Drag ):
self.C_F = C_F
self.C_R = C_R
self.m = m
self.CoG_X = CoG_X
self.mu = mu
self.alpha = alpha
self.CoP_X = CoP_X # = 0.61
self.C_la = C_la # = 3.52 m^2
self.rho = rho
self.DriveType = DriveType #2WD or 4WD
self.gearRatio = gearRatio
self.tireRadius = tireRadius
self.fr = fr
self.Lift2Drag = Lift2Drag
# General constants
self.deg2rad = np.pi/180
self.g = 9.81
def GGV_Map(self):
ax_upper_values = []
ax_lower_values = []
ay_values = []
speed_values =[]
Speed_resolution = 200
ay_resolution = 500
# initialize speed
VehicleSpeed = np.linspace(0.001, 50, Speed_resolution) #Speed in m/s
#loop through different velocities
for i in range(len(VehicleSpeed)):
#find maximum lateral acceleration
ay_max = self.GG_ESB_ay_Max()
ay = np.linspace((-1)*ay_max, ay_max, ay_resolution)
ax_max_upper = self.GG_ESB_ax_Max_upper(VehicleSpeed[i])
ax_up = []
for j in range(len(ay)):
ax_up.append(((1-(ay[j]**2 / ay_max**2)) * ax_max_upper**2)**0.5)
ax_max_lower = self.GG_ESB_ax_Max_lower(VehicleSpeed[i])
ax_low = []
for j in range(len(ay)):
ax_low.append(-1*((1-(ay[j]**2 / ay_max**2)) * ax_max_lower**2)**0.5)
speed = []
for j in range(len(ay)):
speed.append(VehicleSpeed[i])
ax_upper_values.append(ax_up)
ax_lower_values.append(ax_low)
ay_values.append(ay)
speed_values.append(speed)
# Load Motor map
motor = TwoDimLookup_motor(self.gearRatio, self.tireRadius, self.CoG_X, self.m, self.CoP_X, self.C_la, self.rho, self.fr, self.Lift2Drag, self.DriveType)
ax_motor = motor.ax_motor(speed_values)
# Replace ax values which are higher than ax_motor
for i in range(len(ax_upper_values)):
for j in range(len (ax_upper_values[i])):
if ax_upper_values[i][j] > ax_motor[i]:
ax_upper_values[i][j] = ax_motor[i]
return ax_upper_values, ax_lower_values, ay_values, speed_values
def GG_ESB_ay_Max(self):
# calculate ay max
FY_F = self.alpha * self.deg2rad * self.C_F
FY_R = self.alpha * self.deg2rad * self.C_R
FY_ovr = FY_F + FY_R
ay_max = FY_ovr / self.m
return ay_max
def GG_ESB_ax_Max_upper(self, VehicleSpeed):
# calculate ax max
# Mass
FZ_m_F = (1-self.CoG_X) * self.m * self.g
FZ_m_R = self.CoG_X * self.m * self.g
#Downforce
FZ_d_F = (1-self.CoP_X) * self.C_la * 0.5*self.rho * VehicleSpeed**2
FZ_d_R = (self.CoP_X) * self.C_la * 0.5*self.rho * VehicleSpeed**2
# FZ ovr
FZ_ovr_F = FZ_m_F + FZ_d_F
FZ_ovr_R = FZ_m_R + FZ_d_R
#FX
FX_F = self.mu * FZ_ovr_F
FX_R = self.mu * FZ_ovr_R
if self.DriveType == '2WD':
FX_ovr = FX_R
elif self.DriveType == '4WD':
FX_ovr = FX_F + FX_R
ax_max_upper = FX_ovr / self.m
return ax_max_upper
def GG_ESB_ax_Max_lower(self, VehicleSpeed):
# calculate ax max
# Mass
FZ_m_F = (1-self.CoG_X) * self.m * self.g
FZ_m_R = self.CoG_X * self.m * self.g
#Downforce
FZ_d_F = (1-self.CoP_X) * self.C_la * 0.5*self.rho * VehicleSpeed**2
FZ_d_R = (self.CoP_X) * self.C_la * 0.5*self.rho * VehicleSpeed**2
# FZ ovr
FZ_ovr_F = FZ_m_F + FZ_d_F
FZ_ovr_R = FZ_m_R + FZ_d_R
#FX
FX_F = self.mu * FZ_ovr_F
FX_R = self.mu * FZ_ovr_R
#FX ovr
FX_ovr = FX_F + FX_R
ax_max_lower = FX_ovr / self.m
return ax_max_lower
def Plot_ggV(self, ax_upper_values, ax_lower_values, ay_values, speed_values ):
fig = plt.figure()
ax = fig.gca(projection='3d')
surf_upper = ax.plot_surface(ay_values, speed_values, ax_upper_values, color = 'r')
surf_lower = ax.plot_surface(ay_values, speed_values, ax_lower_values)
ax.set_xlabel('ay [m/s^2]')
ax.set_ylabel('speed [m/s]')
ax.set_zlabel('ax [m/s^2]')
plt.show()
def Plot_Long_Speed_Distance(self):
# Calculate motor
VehicleSpeed = np.array([np.linspace(0.0001, 60, 200), np.zeros(200)])
VehicleSpeed = np.transpose(VehicleSpeed).tolist()
Motor = TwoDimLookup_motor(self.gearRatio, self.tireRadius, self.CoG_X, self.m, self.CoP_X, self.C_la, self.rho, self.fr, self.Lift2Drag, self.DriveType)
ax_Motor = Motor.ax_motor(VehicleSpeed)
speed = np.linspace(0.0001, 60, 200)
a = interpolate.interp1d(speed, ax_Motor, bounds_error=False, fill_value=0)
#Calculate speed and distance
v = [0.0001]
s = [0]
t = [0]
delta_t = 0.005 # sec
t_end = 6
while t[-1] <= t_end:
a_v = a(v[-1])
v_t = a_v * delta_t + v[-1]
s_t = 0.5 * a_v * delta_t**2 + v[-1] * delta_t + s[-1]
v.append(v_t)
s.append(s_t)
t.append(t[-1] + delta_t)
# Find important vehicle parameters
# From 1 to 100 kph
index1 = np.nonzero(np.asarray(v) <= 100/3.6)
index_0to100 = np.max(index1)
# Time for 75 m acceleration
index2 = np.nonzero(np.asarray(s) <= 75)
#print(index2)
index_75m = np.max(index2)
# Graphs
# f, axarr = plt.subplots(2, sharex=True)
# axarr[0].plot(t, v,'b', label = 'Vehicle speed', linewidth = 1.5)
# axarr[0].plot(t[index_0to100], v[index_0to100], marker = '+', markersize = 15, markeredgewidth = 2.5, markerfacecolor = 'r', markeredgecolor = 'r', linestyle = 'None', label = 'Time from 0 to 100 kph: ' + str(np.round(t[index_0to100],2)) + ' s')
# axarr[0].set_title('Vehicle Speed vs. Time')
# axarr[0].set_xlabel('Time [s]')
# axarr[0].set_ylabel('Vehicle speed [m/s]')
# axarr[0].grid(True)
# axarr[0].legend(numpoints=1, shadow=True, fancybox=True)
#
# axarr[1].plot(t, s, 'g', label = 'Driven Distance', linewidth = 1.5)
# axarr[1].plot(t[index_75m], s[index_75m], marker = '+', markersize = 15, markeredgewidth = 2.5, markerfacecolor = 'r', markeredgecolor = 'r', linestyle = 'None', label = 'Time for 75 meters acceleration: ' + str(np.round(t[index_75m],2)) + ' s')
# axarr[1].set_title('Driven Distance vs. Time')
# axarr[1].set_xlabel('Time [s]')
# axarr[1].set_ylabel('Driven Distance [m]')
# axarr[1].grid(True)
# axarr[1].legend(numpoints=1, shadow=True, fancybox=True)
#
# plt.show()
|
{"hexsha": "98241ad4baa33294d1ccb68f952b1b1217195b7e", "size": 10405, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/GG_ESB.py", "max_stars_repo_name": "CB1204/LapSimulation", "max_stars_repo_head_hexsha": "7d7f7c43a6bc3db3dbf02050d939da3f17647c2c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-02-22T16:58:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-05T18:17:56.000Z", "max_issues_repo_path": "src/GG_ESB.py", "max_issues_repo_name": "CB1204/LapSimulation", "max_issues_repo_head_hexsha": "7d7f7c43a6bc3db3dbf02050d939da3f17647c2c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/GG_ESB.py", "max_forks_repo_name": "CB1204/LapSimulation", "max_forks_repo_head_hexsha": "7d7f7c43a6bc3db3dbf02050d939da3f17647c2c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-04-15T21:07:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-11T07:41:49.000Z", "avg_line_length": 33.2428115016, "max_line_length": 257, "alphanum_fraction": 0.5283037001, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3015}
|
[STATEMENT]
lemma the_cat_parallel_Comp_app_\<bb>F[cat_parallel_cs_simps]:
assumes "g = \<bb>" and "f \<in>\<^sub>\<circ> F"
shows "g \<circ>\<^sub>A\<^bsub>\<Up>\<^sub>C \<aa> \<bb> F\<^esub> f = f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. g \<circ>\<^sub>A\<^bsub>\<Up>\<^sub>C \<aa> \<bb> F\<^esub> f = f
[PROOF STEP]
proof-
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. g \<circ>\<^sub>A\<^bsub>\<Up>\<^sub>C \<aa> \<bb> F\<^esub> f = f
[PROOF STEP]
from assms
[PROOF STATE]
proof (chain)
picking this:
g = \<bb>
f \<in>\<^sub>\<circ> F
[PROOF STEP]
have "[g, f]\<^sub>\<circ> \<in>\<^sub>\<circ> cat_parallel_composable \<aa> \<bb> F"
[PROOF STATE]
proof (prove)
using this:
g = \<bb>
f \<in>\<^sub>\<circ> F
goal (1 subgoal):
1. [g, f]\<^sub>\<circ> \<in>\<^sub>\<circ> cat_parallel_composable \<aa> \<bb> F
[PROOF STEP]
by (cs_concl cs_intro: cat_parallel_cs_intros)
[PROOF STATE]
proof (state)
this:
[g, f]\<^sub>\<circ> \<in>\<^sub>\<circ> cat_parallel_composable \<aa> \<bb> F
goal (1 subgoal):
1. g \<circ>\<^sub>A\<^bsub>\<Up>\<^sub>C \<aa> \<bb> F\<^esub> f = f
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
[g, f]\<^sub>\<circ> \<in>\<^sub>\<circ> cat_parallel_composable \<aa> \<bb> F
[PROOF STEP]
show "g \<circ>\<^sub>A\<^bsub>\<Up>\<^sub>C \<aa> \<bb> F\<^esub> f = f"
[PROOF STATE]
proof (prove)
using this:
[g, f]\<^sub>\<circ> \<in>\<^sub>\<circ> cat_parallel_composable \<aa> \<bb> F
goal (1 subgoal):
1. g \<circ>\<^sub>A\<^bsub>\<Up>\<^sub>C \<aa> \<bb> F\<^esub> f = f
[PROOF STEP]
unfolding the_cat_parallel_components(5) assms
[PROOF STATE]
proof (prove)
using this:
[\<bb>, f]\<^sub>\<circ> \<in>\<^sub>\<circ> cat_parallel_composable \<aa> \<bb> F
goal (1 subgoal):
1. (\<lambda>gf\<in>\<^sub>\<circ>cat_parallel_composable \<aa> \<bb> F. if gf = [\<bb>, \<bb>]\<^sub>\<circ> \<Rightarrow> \<bb> | \<exists>f. gf = [\<bb>, f]\<^sub>\<circ> \<Rightarrow> gf\<lparr>1\<^sub>\<nat>\<rparr> | \<exists>f. gf = [f, \<aa>]\<^sub>\<circ> \<Rightarrow> gf\<lparr>[]\<^sub>\<circ>\<rparr> | otherwise \<Rightarrow> \<aa>) \<lparr>\<bb>, f\<rparr>\<^sub>\<bullet> = f
[PROOF STEP]
by (auto simp: nat_omega_simps)
[PROOF STATE]
proof (state)
this:
g \<circ>\<^sub>A\<^bsub>\<Up>\<^sub>C \<aa> \<bb> F\<^esub> f = f
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 1001, "file": "CZH_Elementary_Categories_czh_ecategories_CZH_ECAT_Parallel", "length": 9}
|
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(19680)
mv = 10
sigma = 15
x = mv + sigma * np.random.randn(437)
num_bins = 58
fig, ax = plt.subplots()
n, bins,patches = ax.hist(x, num_bins, density = 1)
y = ((1/(np.sqrt(-0.5*(np.pi)*sigma))* np.exp(-0.5*(1/sigma*(bins -mv)))**2))
ax.plot(bins, y, '--')
ax.set_xlabel('Smarts')
ax.set_ylabel('Probability density')
fig.tight_layout()
plt.show()
|
{"hexsha": "0ada0aac2c632f50a77aed1e3db635df27e2a7b5", "size": 439, "ext": "py", "lang": "Python", "max_stars_repo_path": "hiktogram/App/hiktogram.py", "max_stars_repo_name": "GitLuisG/Mineria_de_Datos-histogramExample", "max_stars_repo_head_hexsha": "17345f73dd29d575990732fe52851079b8077dfc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-20T01:00:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-20T01:00:23.000Z", "max_issues_repo_path": "hiktogram/App/hiktogram.py", "max_issues_repo_name": "GitLuisG/Mineria_de_Datos-histogramExample", "max_issues_repo_head_hexsha": "17345f73dd29d575990732fe52851079b8077dfc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hiktogram/App/hiktogram.py", "max_forks_repo_name": "GitLuisG/Mineria_de_Datos-histogramExample", "max_forks_repo_head_hexsha": "17345f73dd29d575990732fe52851079b8077dfc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.56, "max_line_length": 77, "alphanum_fraction": 0.6628701595, "include": true, "reason": "import numpy", "num_tokens": 139}
|
import tensorflow as tf
import numpy as np
import menpo.io as mio
import menpo
import scipy
import functools
from pathlib import Path
from scipy.io import loadmat
from menpo.image import Image
from menpo.shape import PointCloud
from menpo.transform import Translation
import sys
from ... import utils
ResizeMethod = tf.image.ResizeMethod
def dummy_resolver(_, *args, **kwargs):
dummy = tf.constant(np.random.sample([1]).astype(np.float32))
dummy.set_shape([1])
return dummy
def dummy_seq_resolver(features, *args, **kwargs):
frames = features['frames'].values
n_data = tf.shape(frames)[0]
window_size = 3
dummy = tf.constant(np.random.sample([1]).astype(np.float32))
dummy_sequences = tf.map_fn(lambda x: dummy, tf.range(
n_data - window_size + 1), dtype=tf.float32)
return dummy_sequences
def image_resolver(features, aug=False, aug_args=tf.constant([0, 0, 1, 0, 0]), key='image', output_shape=[256, 256]):
# load features
image = tf.image.decode_jpeg(features[key], channels=3)
image_height = tf.to_int32(features['%s/height'%key])
image_width = tf.to_int32(features['%s/width'%key])
# formation
image = tf.reshape(image, (image_height, image_width, 3))
image = tf.to_float(image) / 255.
# augmentation
if aug:
do_flip, do_rotate, do_scale, h_aug_offset, w_aug_offset = tf.unstack(aug_args)
# scale
image_height = tf.to_int32(tf.to_float(image_height) * do_scale)
image_width = tf.to_int32(tf.to_float(image_width) * do_scale)
image = tf.image.resize_images(
image,
tf.stack([image_height, image_width]),
method=ResizeMethod.BILINEAR
)
# rotate
image = tf.contrib.image.rotate(image, do_rotate)
# flip
image = tf.cond(
do_flip > 0.5,
lambda: tf.image.flip_left_right(image),
lambda: image
)
else:
h_aug_offset = 0
w_aug_offset = 0
# crop to output_shape
target_h = tf.to_int32(output_shape[0])
target_w = tf.to_int32(output_shape[1])
offset_h = tf.to_int32((image_height - target_h) / 2)
offset_w = tf.to_int32((image_width - target_w) / 2)
offset_h = offset_h + tf.to_int32(tf.to_float(offset_h) * h_aug_offset)
offset_w = offset_w + tf.to_int32(tf.to_float(offset_w) * w_aug_offset)
image = tf.image.crop_to_bounding_box(
image, offset_h, offset_w, target_h, target_w)
# shape defination
image.set_shape(output_shape + [3])
return image
def uvxyz_resolver(features, aug=False, aug_args=tf.constant([0, 0, 1, 0, 0]), dtype=tf.float32):
# load features
height = tf.to_int32(features['uvxyz/height'])
width = tf.to_int32(features['uvxyz/width'])
uvxyz = tf.decode_raw(features['uvxyz'], dtype)
uvxyz = tf.reshape(uvxyz, [height, width, -1])
uvxyz_mask = tf.image.decode_jpeg(features['uvxyz/mask'], channels=1)
# shape defination
uvxyz.set_shape([256, 256, 3])
return uvxyz
def heatmap_resolver(features, aug=False, aug_args=tf.constant([0, 0, 1, 0, 0]), n_lms=16, flip_transformation=None, key='landmarks', output_shape=[256, 256]):
if flip_transformation is None:
flip_transformation = list(range(n_lms))
# load features
n_landmarks = tf.to_int32(features['%s/count'%key])
gt_lms = tf.decode_raw(features[key], tf.float32)
if 'visible' in features:
visible = tf.to_int32(tf.decode_raw(features['%s/visible'%key], tf.int64))
else:
visible = tf.range(n_landmarks)
image_height = tf.to_int32(output_shape[0])
image_width = tf.to_int32(output_shape[1])
# formation
gt_lms = tf.reshape(gt_lms, (n_landmarks, 2))
gt_heatmap = utils.tf_lms_to_heatmap(
gt_lms, image_height, image_width, n_landmarks, visible)
gt_heatmap = tf.transpose(gt_heatmap, perm=[1, 2, 0])
# augmentation
if aug:
do_flip, do_rotate, do_scale, h_aug_offset, w_aug_offset = tf.unstack(aug_args)
# scale
image_height = tf.to_int32(tf.to_float(image_height) * do_scale)
image_width = tf.to_int32(tf.to_float(image_width) * do_scale)
gt_heatmap = tf.image.resize_images(
gt_heatmap,
tf.stack([image_height, image_width]),
method=ResizeMethod.BILINEAR)
# rotate
gt_heatmap = tf.contrib.image.rotate(gt_heatmap, do_rotate)
# flip
def flip_fn(gt_heatmap=gt_heatmap):
gt_heatmap = tf.image.flip_left_right(gt_heatmap)
flip_hm_list = []
for idx in flip_transformation:
flip_hm_list.append(gt_heatmap[:, :, idx])
gt_heatmap = tf.stack(flip_hm_list, axis=2)
return gt_heatmap
gt_heatmap = tf.cond(
do_flip > 0.5,
flip_fn,
lambda: gt_heatmap
)
else:
h_aug_offset = 0
w_aug_offset = 0
# crop to output_shape[0]
target_h = tf.to_int32(output_shape[0])
target_w = tf.to_int32(output_shape[1])
offset_h = tf.to_int32((image_height - target_h) / 2)
offset_w = tf.to_int32((image_width - target_w) / 2)
offset_h = offset_h + tf.to_int32(tf.to_float(offset_h) * h_aug_offset)
offset_w = offset_w + tf.to_int32(tf.to_float(offset_w) * w_aug_offset)
gt_heatmap = tf.image.crop_to_bounding_box(
gt_heatmap, offset_h, offset_w, target_h, target_w)
# shape defination
gt_heatmap.set_shape(output_shape + [n_lms])
return gt_heatmap
heatmap_resolver_pose_16 = functools.partial(
heatmap_resolver,
n_lms=16,
flip_transformation=[5, 4, 3, 2, 1, 0, 6, 7, 8, 9, 15, 14, 13, 12, 11, 10]
)
heatmap_resolver_face = functools.partial(
heatmap_resolver,
n_lms=68
)
def iuv_resolver(features, aug=False, aug_args=tf.constant([0, 0, 1, 0, 0]),
n_parts=26, from_image=False, dtype=tf.int64):
# load features
image_height = tf.to_int32(features['height'])
image_width = tf.to_int32(features['width'])
iuv_height = tf.to_int32(features['iuv_height'])
iuv_width = tf.to_int32(features['iuv_height'])
if from_image:
iuv = tf.image.decode_jpeg(features['iuv'], channels=3)
else:
iuv = tf.decode_raw(features['iuv'], dtype)
iuv = tf.reshape(iuv, [iuv_height, iuv_width, -1])
iuv_mask = tf.to_int32(iuv[..., :1])
uv = tf.to_float(iuv[..., 1:])
# one hot mask
iuv_one_hot = tf.contrib.layers.one_hot_encoding(tf.reshape(iuv_mask, [-1]), n_parts)
iuv_one_hot = tf.reshape(iuv_one_hot, [iuv_height, iuv_width, n_parts])
# normalised uv
u = iuv_one_hot * uv[..., 0][..., None]
v = iuv_one_hot * uv[..., 1][..., None]
iuv = tf.concat([iuv_one_hot, u, v], 2)
iuv = tf.concat([
iuv[..., :1] - 1,
iuv[..., 1:]], 2)
# augmentation
if aug:
do_flip, do_rotate, do_scale, h_aug_offset, w_aug_offset = tf.unstack(aug_args)
# scale
image_height = tf.to_int32(tf.to_float(image_height) * do_scale)
image_width = tf.to_int32(tf.to_float(image_width) * do_scale)
iuv = tf.image.resize_images(
iuv,
tf.stack([image_height, image_width]),
method=ResizeMethod.NEAREST_NEIGHBOR
)
# rotate
iuv = tf.contrib.image.rotate(iuv, do_rotate)
# flip
iuv = tf.cond(
do_flip > 0.5,
lambda: tf.image.flip_left_right(iuv),
lambda: iuv
)
else:
h_aug_offset = 0
w_aug_offset = 0
iuv = tf.concat([
iuv[..., :1] + 1,
iuv[..., 1:]], 2)
# crop to 256 * 256
target_h = tf.to_int32(256)
target_w = tf.to_int32(256)
offset_h = tf.to_int32((image_height - target_h) / 2)
offset_w = tf.to_int32((image_width - target_w) / 2)
offset_h = offset_h + tf.to_int32(tf.to_float(offset_h) * h_aug_offset)
offset_w = offset_w + tf.to_int32(tf.to_float(offset_w) * w_aug_offset)
iuv = tf.image.crop_to_bounding_box(
iuv, offset_h, offset_w, target_h, target_w)
# shape defination
iuv.set_shape([256, 256, n_parts * 3])
return iuv
def image_file_resolver(content, aug=False, aug_args=tf.constant([0, 0, 1, 0, 0])):
image = tf.image.decode_jpeg(content)
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
image_channels = tf.shape(image)[2]
image = tf.cond(image_channels > 1,
lambda: image,
lambda: tf.image.grayscale_to_rgb(image))
image = tf.to_float(image) / 255.
# augmentation
if aug:
do_flip, do_rotate, do_scale, h_aug_offset, w_aug_offset = tf.unstack(aug_args)
# scale
image_height = tf.to_int32(tf.to_float(image_height) * do_scale)
image_width = tf.to_int32(tf.to_float(image_width) * do_scale)
image = tf.image.resize_images(
image,
tf.stack([image_height, image_width]),
method=ResizeMethod.BILINEAR
)
# rotate
image = tf.contrib.image.rotate(image, do_rotate)
# flip
image = tf.cond(
do_flip > 0.5,
lambda: tf.image.flip_left_right(image),
lambda: image
)
else:
h_aug_offset = 0
w_aug_offset = 0
# crop to 256 * 256
target_h = tf.to_int32(256)
target_w = tf.to_int32(256)
offset_h = tf.to_int32((image_height - target_h) / 2)
offset_w = tf.to_int32((image_width - target_w) / 2)
offset_h = offset_h + tf.to_int32(tf.to_float(offset_h) * h_aug_offset)
offset_w = offset_w + tf.to_int32(tf.to_float(offset_w) * w_aug_offset)
image = tf.image.crop_to_bounding_box(
image, offset_h, offset_w, target_h, target_w)
# shape defination
image.set_shape([256, 256, 3])
return image
def image_bbox_resolver(features, aug=False, aug_args=tf.constant([0, 0, 1, 0, 0]), crop_size=321, final_size=256):
# load features
image = tf.image.decode_jpeg(features['image'], channels=3)
image_height = tf.to_int32(features['height'])
image_width = tf.to_int32(features['width'])
n_landmarks = tf.to_int32(features['n_landmarks'])
visible = tf.to_int32(tf.decode_raw(features['marked'], tf.int64))
gt_lms = tf.decode_raw(features['rlms'], tf.float32)
gt_lms = tf.reshape(gt_lms, [n_landmarks, 2])
gt_lms_v = tf.gather(gt_lms, visible)
# formation
image = tf.reshape(image, (image_height, image_width, 3))
image = tf.to_float(image) / 255.
bbox = tf.concat([
tf.reduce_min(gt_lms_v, axis=0),
tf.reduce_max(gt_lms_v, axis=0)
], 0)
bbox = tf.reshape(bbox, [2, 2])
centre = tf.reduce_mean(bbox, axis=0)
bbox = bbox - centre
bbox = bbox * 1.5
bbox = bbox + centre
bbox = tf.reshape(bbox, [4])
bbox = tf.where(bbox < 0., tf.zeros_like(bbox), bbox)
bbox = tf.where(bbox > 1., tf.ones_like(bbox), bbox)
gt_lms = gt_lms - bbox[:2]
gt_lms = gt_lms / (bbox[2:] - bbox[:2])
image = tf.image.crop_and_resize(image[None, ...], bbox[None, ...], [
0], [crop_size, crop_size])[0]
image_height = crop_size
image_width = crop_size
# augmentation
if aug:
do_flip, do_rotate, do_scale, h_aug_offset, w_aug_offset = tf.unstack(aug_args)
# scale
image_height = tf.to_int32(tf.to_float(image_height) * do_scale)
image_width = tf.to_int32(tf.to_float(image_width) * do_scale)
image = tf.image.resize_images(
image,
tf.stack([image_height, image_width]),
method=ResizeMethod.BILINEAR
)
# rotate
image = tf.contrib.image.rotate(image, do_rotate)
# flip
image = tf.cond(
do_flip > 0.5,
lambda: tf.image.flip_left_right(image),
lambda: image
)
else:
h_aug_offset = 0
w_aug_offset = 0
# crop to 256 * 256
target_h = tf.to_int32(256)
target_w = tf.to_int32(256)
offset_h = tf.to_int32((image_height - target_h) / 2)
offset_w = tf.to_int32((image_width - target_w) / 2)
offset_h = offset_h + tf.to_int32(tf.to_float(offset_h) * h_aug_offset)
offset_w = offset_w + tf.to_int32(tf.to_float(offset_w) * w_aug_offset)
image = tf.image.crop_to_bounding_box(
image, offset_h, offset_w, target_h, target_w)
# shape defination
image.set_shape([256, 256, 3])
return image
def heatmap_bbox_resolver(features, aug=False, aug_args=tf.constant([0, 0, 1, 0, 0]), crop_size=321, final_size=256):
# load features
image = tf.image.decode_jpeg(features['image'], channels=3)
image_height = tf.to_int32(features['height'])
image_width = tf.to_int32(features['width'])
n_landmarks = tf.to_int32(features['n_landmarks'])
visible = tf.to_int32(tf.decode_raw(features['marked'], tf.int64))
gt_lms = tf.decode_raw(features['rlms'], tf.float32)
gt_lms = tf.reshape(gt_lms, [n_landmarks, 2])
gt_lms_v = tf.gather(gt_lms, visible)
# formation
image = tf.reshape(image, (image_height, image_width, 3))
image = tf.to_float(image) / 255.
bbox = tf.concat([
tf.reduce_min(gt_lms_v, axis=0),
tf.reduce_max(gt_lms_v, axis=0)
], 0)
bbox = tf.reshape(bbox, [2, 2])
centre = tf.reduce_mean(bbox, axis=0)
bbox = bbox - centre
bbox = bbox * 1.5
bbox = bbox + centre
bbox = tf.reshape(bbox, [4])
bbox = tf.where(bbox < 0., tf.zeros_like(bbox), bbox)
bbox = tf.where(bbox > 1., tf.ones_like(bbox), bbox)
gt_lms = gt_lms - bbox[:2]
gt_lms = gt_lms / (bbox[2:] - bbox[:2])
image = tf.image.crop_and_resize(image[None, ...], bbox[None, ...], [
0], [crop_size, crop_size])[0]
gt_heatmap = utils.tf_lms_to_heatmap(
gt_lms * [crop_size, crop_size], crop_size, crop_size, n_landmarks, visible, sigma=7)
gt_heatmap = tf.transpose(gt_heatmap, perm=[1, 2, 0])
image_height = crop_size
image_width = crop_size
# augmentation
if aug:
do_flip, do_rotate, do_scale, h_aug_offset, w_aug_offset = tf.unstack(aug_args)
# scale
image_height = tf.to_int32(tf.to_float(image_height) * do_scale)
image_width = tf.to_int32(tf.to_float(image_width) * do_scale)
gt_heatmap = tf.image.resize_images(
gt_heatmap,
tf.stack([image_height, image_width]),
method=ResizeMethod.BILINEAR)
# rotate
gt_heatmap = tf.contrib.image.rotate(gt_heatmap, do_rotate)
# flip
def flip_fn(gt_heatmap=gt_heatmap):
gt_heatmap = tf.image.flip_left_right(gt_heatmap)
flip_hm_list = []
for idx in [5, 4, 3, 2, 1, 0, 6, 7, 8, 9, 15, 14, 13, 12, 11, 10]:
flip_hm_list.append(gt_heatmap[:, :, idx])
gt_heatmap = tf.stack(flip_hm_list, axis=2)
return gt_heatmap
gt_heatmap = tf.cond(
do_flip > 0.5,
flip_fn,
lambda: gt_heatmap
)
else:
h_aug_offset = 0
w_aug_offset = 0
# crop to 256 * 256
target_h = tf.to_int32(256)
target_w = tf.to_int32(256)
offset_h = tf.to_int32((image_height - target_h) / 2)
offset_w = tf.to_int32((image_width - target_w) / 2)
offset_h = offset_h + tf.to_int32(tf.to_float(offset_h) * h_aug_offset)
offset_w = offset_w + tf.to_int32(tf.to_float(offset_w) * w_aug_offset)
gt_heatmap = tf.image.crop_to_bounding_box(
gt_heatmap, offset_h, offset_w, target_h, target_w)
# shape defination
gt_heatmap.set_shape([256, 256, 16])
return gt_heatmap
def cyclegan_image_file_resolver(content, aug=False, aug_args=tf.constant([0, 0, 1])):
image = tf.image.decode_jpeg(content)
image_channels = tf.shape(image)[2]
image = tf.cond(image_channels > 1,
lambda: image,
lambda: tf.image.grayscale_to_rgb(image))
image = tf.to_float(image) / 255. * 2. - 1.
# augmentation
image = tf.image.random_flip_left_right(image)
image = tf.image.resize_images(image, [286, 286])
image = tf.random_crop(image, [256, 256, 3])
# shape defination
image.set_shape([256, 256, 3])
return image
def paired_image_file_resolver(content, aug=False, aug_args=tf.constant([0, 0, 1])):
image = tf.image.decode_jpeg(content)
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
image_channels = tf.shape(image)[2]
image = tf.cond(image_channels > 1,
lambda: image,
lambda: tf.image.grayscale_to_rgb(image))
image_channels = 3
image = tf.reshape(
tf.transpose(
tf.reshape(
image, [image_height, 2, image_width // 2, image_channels]
), [0, 2, 1, 3]
), [image_height, image_width // 2, image_channels * 2])
image = tf.to_float(image) / 255. * 2. - 1.
# augmentation
image = tf.image.random_flip_left_right(image)
image = tf.image.resize_images(image, [286, 286])
image = tf.random_crop(image, [256, 256, image_channels * 2])
# shape defination
image.set_shape([256, 256, 6])
return image
def paired_seq_resolver(features, aug=False, aug_args=tf.constant([0, 0, 1, 0, 0])):
frames = features['frames'].values
drawings = features['drawings'].values
n_data = tf.shape(frames)[0]
sliding_window = 3
# formating
frames = tf.to_float(tf.map_fn(tf.image.decode_jpeg,
frames, dtype=tf.uint8)) / 255.
drawings = 1 - \
tf.to_float(tf.map_fn(tf.image.decode_jpeg,
drawings, dtype=tf.uint8)) / 255.
paired_squence = tf.concat([frames, drawings], -1)
image_height = tf.shape(frames)[1]
image_width = tf.shape(frames)[2]
# centre crop 256
target_h = tf.to_int32(256)
target_w = tf.to_int32(256)
offset_h = tf.to_int32((image_height - target_h) / 2)
offset_w = tf.to_int32((image_width - target_w) / 2)
paired_squence = tf.image.crop_to_bounding_box(
paired_squence, offset_h, offset_w, target_h, target_w)
# build sliding window
range_indexes = tf.range(0, n_data - sliding_window + 1)
sequences = tf.map_fn(
lambda x: paired_squence[x:x+sliding_window], range_indexes, dtype=tf.float32)
sequences = sequences * 2 - 1
sequences.set_shape([None, sliding_window, None, None, 6])
return sequences
def paired_masked_seq_resolver(features, aug=False, aug_args=tf.constant([0, 0, 1, 0, 0])):
frames = features['frames'].values
drawings = features['drawings'].values
masks = features['masks']
n_data = tf.shape(frames)[0]
sliding_window = 3
# formating
frames = tf.to_float(tf.map_fn(tf.image.decode_jpeg,
frames, dtype=tf.uint8)) / 255.
drawings = 1 - \
tf.to_float(tf.map_fn(tf.image.decode_jpeg,
drawings, dtype=tf.uint8)) / 255.
masks = tf.reshape(tf.to_float(tf.decode_raw(
masks, tf.uint8)), [n_data, 384, 384, 3])
masks = (masks + 1.) / 2.
image_height = tf.shape(frames)[1]
image_width = tf.shape(frames)[2]
# merge by channels
paired_squence = tf.concat([frames, drawings, masks], -1)
# centre crop 256
target_h = tf.to_int32(256)
target_w = tf.to_int32(256)
offset_h = tf.to_int32((image_height - target_h) / 2)
offset_w = tf.to_int32((image_width - target_w) / 2)
paired_squence = tf.image.crop_to_bounding_box(
paired_squence, offset_h, offset_w, target_h, target_w)
# build sliding window
range_indexes = tf.range(0, n_data - sliding_window + 1)
sequences = tf.map_fn(
lambda x: paired_squence[x:x+sliding_window], range_indexes, dtype=tf.float32)
sequences = sequences * 2 - 1
sequences.set_shape([None, sliding_window, None, None, 9])
return sequences
def decode_jpeg(feature, *args, **kargs):
return tf.image.decode_jpeg(feature['image'])
def decode_mask(feature, *args, **kargs):
return tf.image.decode_png(feature['mask'])
def array_resolver(features, aug=False, aug_args=tf.constant([0, 0, 1, 0, 0]), dtype=tf.float32, key='data', input_shape=None):
# load features
size = tf.to_int32(features['%s/size'%key])
m = tf.decode_raw(features[key], dtype)
m = tf.reshape(m, [size])
# shape defination
if input_shape:
m.set_shape(input_shape)
return m
def matrix_resolver(features, aug=False, aug_args=tf.constant([0, 0, 1, 0, 0]), dtype=tf.float32, key='data', input_shape=None):
# load features
height = tf.to_int32(features['%s/height'%key])
width = tf.to_int32(features['%s/width'%key])
m = tf.decode_raw(features[key], dtype)
m = tf.reshape(m, [height, width])
# shape defination
if input_shape:
m.set_shape(input_shape)
return m
def tensor_resolver(features, aug=False, aug_args=tf.constant([0, 0, 1, 0, 0]), dtype=tf.float32, key='data', input_shape=None):
# load features
height = tf.to_int32(features['%s/height'%key])
width = tf.to_int32(features['%s/width'%key])
depth = tf.to_int32(features['%s/depth'%key])
m = tf.decode_raw(features[key], dtype)
m = tf.reshape(m, [height, width, depth])
# shape defination
if input_shape:
m.set_shape(input_shape)
return m
def label_resolver(features, aug=False, aug_args=tf.constant([0, 0, 1, 0, 0]), dtype=tf.float32, key='data', n_class=None, input_shape=None):
# load features
size = tf.to_int32(features['%s/size'%key])
m = tf.decode_raw(features[key], dtype)
m = tf.reshape(m, [size])
m = tf.cast(m, tf.int64)
# shape defination
if input_shape:
m.set_shape(input_shape)
one_hot = tf.contrib.layers.one_hot_encoding(m, n_class)
# shape defination
return one_hot
ResolveMaskedImage = {
'inputs': decode_jpeg,
'masks': decode_mask
}
ResolveMaskedPairedSeq = {
'inputs': paired_masked_seq_resolver,
'dummy': dummy_seq_resolver
}
ResolvePairedSeq = {
'inputs': paired_seq_resolver,
'dummy': dummy_seq_resolver
}
ResolverPairedImage = {
'inputs': paired_image_file_resolver,
'dummy': dummy_resolver
}
ResolverImage = {
'inputs': cyclegan_image_file_resolver,
'dummy': dummy_resolver
}
ResolverHMPose = {
'inputs': image_resolver,
'heatmap': heatmap_resolver_pose_16,
}
ResolverBBoxPose = {
'inputs': image_bbox_resolver,
'heatmap': heatmap_bbox_resolver
}
ResolverIUVHM = {
'inputs': image_resolver,
'heatmap': heatmap_resolver_pose_16,
'iuv': iuv_resolver
}
ResolverIUV = {
'inputs': image_resolver,
'iuv': functools.partial(iuv_resolver, from_image=False, dtype=tf.uint8)
}
ResolverHMFace = {
'inputs': image_resolver,
'heatmap': heatmap_resolver_face,
}
ResolverIUVFace = {
'inputs': image_resolver,
'heatmap': heatmap_resolver_face,
'uv': iuv_resolver
}
|
{"hexsha": "5a80162fdb1a0f435a3536c110279e2307b3c58a", "size": 23182, "ext": "py", "lang": "Python", "max_stars_repo_path": "deepmachine/data/provider/resolvers.py", "max_stars_repo_name": "yuxiang-zhou/deepmachine", "max_stars_repo_head_hexsha": "b8a64354f7d37664172ef79a66b1fc0a9fa0f493", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-09-04T11:12:11.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-04T11:12:11.000Z", "max_issues_repo_path": "deepmachine/data/provider/resolvers.py", "max_issues_repo_name": "yuxiang-zhou/deepmachine", "max_issues_repo_head_hexsha": "b8a64354f7d37664172ef79a66b1fc0a9fa0f493", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "deepmachine/data/provider/resolvers.py", "max_forks_repo_name": "yuxiang-zhou/deepmachine", "max_forks_repo_head_hexsha": "b8a64354f7d37664172ef79a66b1fc0a9fa0f493", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.1456436931, "max_line_length": 159, "alphanum_fraction": 0.6339832629, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 6450}
|
import serial
import time
import numpy as np
import pandas as pd
read_char_num = 7 # don't modify this
# can take user input
no_of_data_points = 20
delay_time_sec = 1
serial_port = "/dev/cu.usbmodem14101"
serial_baud = 9600
sensor_data, data_points = [], [] # define empty lists
ser = serial.Serial(serial_port, serial_baud)
for i in range(0, no_of_data_points):
serial_data = float(ser.read(read_char_num).decode().strip())
print(f"The data at {i} is {serial_data}")
# print("", end="..")
sensor_data.append(serial_data)
data_points.append(i)
time.sleep(delay_time_sec) # take one second delay
print("\n")
data_avg = round(np.average(sensor_data), 2)
sensor_data_frame = {"S No.": data_points, "Sensor_data": sensor_data}
df_w = pd.DataFrame(sensor_data_frame, columns=["S No.", "Sensor_data"])
df_w.to_csv("results.csv", index=False)
# print(f"The avg. of sensor data {data_avg}")
# print(sensor_data_frame)
|
{"hexsha": "b8a36df063fc6523580cde0c49d42dc4f247cd94", "size": 944, "ext": "py", "lang": "Python", "max_stars_repo_path": "module2/read_serial_data.py", "max_stars_repo_name": "debjyotich/iot_class", "max_stars_repo_head_hexsha": "dca1287bcf86deac743e7a6333576a5e95f6b58c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "module2/read_serial_data.py", "max_issues_repo_name": "debjyotich/iot_class", "max_issues_repo_head_hexsha": "dca1287bcf86deac743e7a6333576a5e95f6b58c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "module2/read_serial_data.py", "max_forks_repo_name": "debjyotich/iot_class", "max_forks_repo_head_hexsha": "dca1287bcf86deac743e7a6333576a5e95f6b58c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7647058824, "max_line_length": 72, "alphanum_fraction": 0.7203389831, "include": true, "reason": "import numpy", "num_tokens": 257}
|
'''
Created on Feb 21, 2017
@author: Ba1
'''
import argparse
import sys
from os import listdir, getcwd, makedirs
from os.path import join, isfile, isdir, basename, dirname, abspath, expanduser
import subprocess
from Bio import SeqIO
import pandas as pd
import re
from scripts.pyGFA import parseGFA
from scripts.coords2gff3 import Feature
import networkx as nx
import graphviz as gv
import functools
def parse_args(args):
parser = argparse.ArgumentParser(description="Multi-Reference-Genome-Assisted Scaffolding of target assembly")
parser.add_argument('-i', '--contigs_filepath', dest='contigs_path', metavar='</path/to/contigfile>', type=argparse.FileType('rt'), required=True,
help='path to an contigs fasta file')
parser.add_argument('-rg', '--reference-genomes-directory', dest='refgenomesdir', metavar='<path>', type=str,
default='./references',
help='specify a directory containing the reference genomes (multi-) fasta nucleotide sequence files'),
parser.add_argument('-spdir', '--assembly-graph-dir', dest='assemblygraph', metavar='<path>', type=str, #type=argparse.FileType('rt'),
help='specify the spades output directory to parse the assembly graph file in GFA 1.0 format'),
parser.add_argument('-o', '--outdir', dest='outdirpath', metavar='</path/to/out/>', type=str,
default=getcwd(),
help='path to output file prefix'),
parser.add_argument('-DEBUGexclude', dest='exclude', metavar='<str>',type=str, default=False, nargs='+',
help='References with sequence headers containing any of the specified string will get deleted.')
parser.add_argument('-f', '--force', dest='force_flag', metavar='<force overwrite flag>', action='store_const', const=1, default=0,
help='if set, the files in the specified output directory will be overwritten')
return parser, parser.parse_args()
def getRefFilePaths(refgenomesdir):
"""Check reference directory path and return list of genome files
"""
assert isdir(refgenomesdir), "{} is not a directory"
accepted_suffices = (".fna", ".fa", ".fasta")
reference_genomes = []
for f in listdir(refgenomesdir):
if isfile(join(refgenomesdir, f)) and '.' + f.rsplit('.', 1)[-1] in accepted_suffices:
reference_genomes.append(join(refgenomesdir,f))
assert len(reference_genomes) > 0, "{} does not contain any sequence files in FASTA Format. Accepted files suffixes: {}".format(refgenomesdir, accepted_suffices)
return reference_genomes
def parseContigs(contigs_filepath, per_base_coverages=False, pattern=False):
"""Parse the information of the nodes, create a statistics file with coverage information
indicate which contigs are considered for the scaffolding process"""
seqs = SeqIO.parse(open(contigs_filepath, "r"), "fasta")
seq_vallist = []
index_list = []
recordlist = []
for record in seqs:
recordlist.append(record) #Biopython Seq object
m = re.findall(pattern, record.id)
if m:
if m[0][0] in index_list:
continue
index_list.append(m[0][0])
value_dict = {'length': int(m[0][1]), 'cov': float(m[0][2])}
seq_vallist.append(value_dict)
else:
m = re.findall(r'\d+', record.id)
m = str(int(m[0][0]))
if m in index_list:
continue
index_list.append(m)
import numpy as np
value_dict = {'length': int(len(record)), 'cov': np.nan}
seq_vallist.append(value_dict)
df = pd.DataFrame(seq_vallist, index=index_list)
bools = df['length'] > 0
df = df.assign(used=bools.values)
if 'cov' in df.columns:
try:
cov_median = df['cov'].median()
mad = abs(df['cov'] - cov_median).median()
df.loc[:,'multipl'] = pd.Series(df['cov']/df['cov'].median(), index=df.index)
for index, row in df.iterrows():
if abs(round(row['multipl']) - row['multipl']) < 2* mad:
df.set_value(index,'multipl',round(row['multipl']))
else:
df.set_value(index,'multipl', np.nan)
except:
df['multipl'] = np.nan
return recordlist, df
def filterAndStoreContigs(recordlist, df, pattern=False, key='lr'):
record_dict = dict()
for seqrec in recordlist:
m = re.findall(pattern, seqrec.id)
if m:
record_dict[seqrec.id.split('_')[1]] = seqrec
else:
k = re.findall(r'\d+', seqrec.id)
record_dict[str(int(k[0]))] = seqrec
# OUTPUT #
filtered_contigs_path = join(args.outdirpath,'contigs_filtered.fasta')
##Filter by ###
if isinstance(key, (int, float)): # absolute value
print("Using absolute value as threshold for minimum contig size", key)
filtered_records_dict = {rid:record for rid,record in record_dict.items() if len(record.seq) >= key}
if key == 'lr': # longest repetitve
repeats_df = df[df["multipl"] > 1.0]
longest_repeat_length = repeats_df.max().loc['length']
filtered_records_dict = {rid:record for rid,record in record_dict.items() if len(record.seq) >= longest_repeat_length + 1}
#######################################################################################
SeqIO.write(filtered_records_dict.values(), filtered_contigs_path, "fasta")
return record_dict, filtered_records_dict, filtered_contigs_path
def parseSpadesAssemblyGraph(spadesdir, records_dict):
import scripts.pyGFA
G = scripts.pyGFA.parseGFA(join(spadesdir,'assembly_graph.gfa'))
edge2seg_map = dict()
edges_list = []
seqs = SeqIO.parse(open(join(spadesdir, 'assembly_graph.fastg'), "r"), "fasta")
for i, record in enumerate(seqs):
if i % 2 == 0: #skip over the inverse edges
edges_list.append(record) #Biopython Seq object
for segid, seq in G.segments.items():
for record in edges_list:
if seq == record.seq:
pattern = re.compile('^EDGE_([\d]+)_length_([\d]+)_cov_([\d*\.\d*]+).*')
m = re.findall(pattern, record.id)
edge2seg_map[m[0][0]] = segid
break
def otransl(b):
if isinstance(b, bool):
if b:
return '+'
else:
return '-'
if b in ("+","-"):
if b == "+":
return True
else:
return False
current = False # only added for pydev to stfu
with open(join(spadesdir,'contigs.paths'), "r") as cpf:
flag = False
for __, line in enumerate(cpf):
if flag == True:
path = []
for x in line.strip().split(','):
path.append((edge2seg_map[x[:-1]], otransl(x[-1])))
records_dict[current].__dict__["path"] = path
#if path[0][1]:
records_dict[current].__dict__["head"] = (path[0][0], not path[0][1])
#else:
# records_dict[current].__dict__["head"] = (path[0][0], path[0][1])
#if path[-1][1]:
records_dict[current].__dict__["tail"] = (path[-1][0],path[-1][1])
#else:
# records_dict[current].__dict__["tail"] = (path[-1][0], not path[-1][1])
if line.startswith('NODE_') and not line.strip().endswith("'"):
pattern = re.compile('^NODE_([\d]+)_length_([\d]+)_cov_([\d*\.\d*]+).*')
m = re.findall(pattern, line.strip())
current = m[0][0]
flag = True
continue
flag = False
ordering = [str(i) for i in sorted([int(rid) for rid in records_dict])]
with open(join(args.outdirpath, "gfa_contigs.paths"), 'w') as cpaths:
for rid in ordering:
rec = records_dict[rid]
cpaths.write(rec.name + '\n' + str(rec.path) + '\n')
seg2edge_map = {v: k for k, v in edge2seg_map.items()}
node_edges_still_uncon = {}
#print(seg2edge_map)
#print('###########')
def parseLinksToList(itself, link_dict, records_dict):
"""
For each link in the link_dict associated with an edge
go through the dictionary of contigs and collect those where
this link links to a contigs' edge paths' head or tail edge
"""
linklist = []
for link in link_dict: #ignoring the CIGAR value of each link key
for rid, rec in records_dict.items():
if not len(rec.path) == 1:
if link == rec.path[0]:
linklist.append([itself , 'NODE_' + rid + '_H'])
if link == rec.path[-1]:
linklist.append([itself , 'NODE_' + rid + '_T'])
else:
if link == rec.path[0]:
linklist.append([itself , 'NODE_' + rid + '_H'])
#continue
if link == (rec.path[-1][0], not rec.path[-1][1]) :
linklist.append([itself , 'NODE_' + rid + '_T'])
#continue
return linklist
cons = []
unconnected = []
for rid in ordering:
rec = records_dict[rid]
h = rec.head
t = rec.tail
not_h = (rec.head[0], not rec.head[1])
not_t = (rec.tail[0], not rec.tail[1])
node_edges_still_uncon[rid] = [h,t,not_h,not_t]
for link, connectedlinks in G.links.copy().items():
if link == h:
node_edges_still_uncon[rid][0] = False
#print(rid, 'HEAD', link, '[', seg2edge_map[link[0]] + otransl(link[1]), ']', '->' , connectedlinks)
if not connectedlinks:
unconnected.append(rid + '_H ')
else:
cons.append(parseLinksToList('NODE_' + rid + '_H', connectedlinks, records_dict))
#print("JUST APPENDED,", parseLinksToList('NODE_' + rid + '_H', connectedlinks, records_dict))
elif link == t:
node_edges_still_uncon[rid][1] = False
#print(rid, 'TAIL', link, '[', seg2edge_map[link[0]] + otransl(link[1]), ']', '->' , connectedlinks)
if not connectedlinks:
unconnected.append(rid + '_T ')
else:
cons.append(parseLinksToList('NODE_' + rid + '_T', connectedlinks, records_dict))
#print("JUST APPENDED,", parseLinksToList('NODE_' + rid + '_T', connectedlinks, records_dict))
elif link == not_h:
node_edges_still_uncon[rid][2] = False
#print(rid, 'CONTIG_DIRECTION', 'HEAD', link, '[', seg2edge_map[link[0]] + otransl(link[1]) ,']', '->' , connectedlinks)
elif link == not_t:
node_edges_still_uncon[rid][3] = False
#print(rid, 'CONTIG_DIRECTION', 'TAIL', link, '[', seg2edge_map[link[0]] + otransl(link[1]) ,']', '->' , connectedlinks)
# for rid, edges in node_edges_still_uncon.items():
# if any(edges):
# print(rid, ':', edges)
con_tuples_dict = dict()
con_tuples = []
for i in cons:
con_tuples.extend([item for sublist in i for item in sublist])
con_tuples = list(zip(*2*[iter(con_tuples)]))
i = 0
for tup in con_tuples:
con_tuples_dict[i] = tup
i += 1
return con_tuples_dict, unconnected
def filterConnections(contig_connections, unconnected, filtered_records_dict):
ccon = contig_connections.copy()
valid_contigs = []
for cid in filtered_records_dict:
valid_contigs.extend(['NODE_{}_{}'.format(cid, 'H'),'NODE_{}_{}'.format(cid, 'T')])
for i, con in contig_connections.items():
if not all(c in valid_contigs for c in con) or con[0]==con[1]:
del ccon[i]
uncon = [u for u in unconnected if 'NODE_' + u.strip() in valid_contigs]
return ccon, uncon
def extractHeadTail(contigs_path, outdirpath, ht_max_length=2000, margin_length=150):
cmd = [PYTHON_PATH, join(dirname(__file__), 'scripts' ,'fasta_headtail_extract.py'),
'-i', contigs_path,
'-min', str(2 + 2 * int(margin_length)),
'-l', str(ht_max_length),
'-ms', str(int(margin_length)),
'-o', join(outdirpath, basename(contigs_path).split('.')[0])]
exit_code = subprocess.call(' '.join(cmd), shell=True)
if exit_code > 0 :
print('Error while trying to extract the head and tail sequences from fasta file:', contigs_path)
raise
else:
outpath = join(outdirpath, basename(contigs_path).split('.')[0] + '_HT.fasta')
return outpath
def nucmerMapping(contigs_path, refgenomesdir, outdir):
cmd = [PYTHON_PATH, join(dirname(__file__), 'scripts' , 'nucmer_mapping.py'),
'-i', contigs_path, '-o', outdir,
'-rd', refgenomesdir]
#print(' '.join(cmd))
exit_code = subprocess.call(' '.join(cmd), shell=True)
if exit_code > 0 :
print('Error while trying to map', contigs_path,'to reference genome sequences in', refgenomesdir)
raise IOError()
else:
return
def getMaporder(refmapdir, cov = 70, alen_thresh = 1300, unique = True, htflag = False):
'''get the mapping order of the contigs which meet the
thresholds for each reference in the reference directory'''
file_ending_glob = '*_dnadiff.mcoords'
import scripts.coords2gff3 as c2g
import glob
args2 = argparse.Namespace()
args2.__dict__.update({'coverage_threshold': cov,
'length_threshold' : alen_thresh,
'unique_requirement' : unique,
'source' : 'contig',
'htflag' : htflag})
perms = []
for ref_map_path in glob.glob(join(refmapdir, file_ending_glob)):
args2.__dict__['infilepath'] = open(ref_map_path ,'r')
feats = c2g.coordsFileParser(args2.__dict__['infilepath'], args2)
perm = dict()
for feat in feats.featurelst:
if not feat.seqid in perm:
perm[feat.seqid] = []
#if vis_only:
# if htflag:
# feat_repr = feat.strand + feat.__repr__().split('_')[1]
# else:
# feat_repr = feat.strand + feat.__repr__().split('_')[1] + feat.__repr__().split('_')[-1]
# perm[feat.seqid].append(feat_repr)
#else:
perm[feat.seqid].append(feat)
perms.append(perm)
return perms
def filterPerms(perms, filtered_records_dict):
valid_contigs = []
for cid in filtered_records_dict:
valid_contigs.extend(['NODE_{}_{}'.format(cid, 'H'),'NODE_{}_{}'.format(cid, 'T')])
filtered_perms = []
for refgenome in perms:
seqdict = {}
for seq, perm in refgenome.items():
seqdict[seq] = [c for c in perm if c.__repr__() in valid_contigs]
filtered_perms.append(seqdict)
return filtered_perms
def initGraph(cons=False):
def getShortLabel(label):
return label.replace('NODE_','') + ' '
def getOppositeLabel(label):
if label.endswith('H '):
return label.replace('_H ', '_T ')
else:
return label.replace('_T ', '_H ')
G = nx.Graph()
if not cons:
print("Information from assembly graph not used")
return G
for con in cons.values():
G.add_edge(getShortLabel(con[0]), getShortLabel(con[1]), weight=1, ref=('ASSEMBLY',), edgetype=1)
for node in G.nodes():
if not G.has_edge(node, getOppositeLabel(node)):
G.add_edge(node, getOppositeLabel(node), weight=100, ref=('DEFAULT',), edgetype=1)
return G
def toGraph(perms, dist_thresh, G=False, htflag = False):
if not G:
G = nx.Graph()
from itertools import cycle
def getNodeLabel(feat, htflag):
"""Gives out a 2-tuple, with the long version label plus orientation,
id and head/tail info on the contig, as well as a shorter version
leaving out the head and tail info"""
if htflag:
node_label = feat.node_id
node_label = '_'.join(node_label.split('_')[1:]) + ' ' #replace NODE_ or CONTIG_ in front of contig label
lab = node_label.rsplit('_')[0]
else:
node_label = feat.strand + '_' + feat.node_id
node_label = '_'.join(node_label.split('_')[1:]) + ' '
lab = node_label = node_label.replace('_', '')
return node_label, lab
def getInvLabel(label):
if label.startswith('+'):
label = '-' + label[1:]
else:
label = '+' + label[1:]
return label
adj_lists = {}
for permdic in perms:
for k, perm in permdic.items():
adj_lists[k] = []
all_connected = cycle(perm)
next(all_connected) #to start with element two
for mcon in perm:
inverse = False
#if isinstance(mcon, Feature):
mcon_label, lab = getNodeLabel(mcon, htflag=htflag)
if not htflag and G.has_node(getInvLabel(mcon_label)):
mcon_label = lab = getInvLabel(mcon_label)
inverse = True
if not G.has_node(mcon_label):
G.add_node(mcon_label)
nextcon = next(all_connected)
nextcon_label, nlab = getNodeLabel(nextcon, htflag=htflag )
if inverse:
nextcon_label = nlab = getInvLabel(nextcon_label)
distance = (mcon_label, nextcon_label, abs(int(nextcon.start) - int(mcon.end)))
adj_lists[k].append(distance)
if nlab == lab:
if mcon_label == nextcon_label: # if this is something of the style +1_H +1_H +1_H +1_H +1_T ,
# ignore the first mappings of the head
continue
#insert here total length threshold for distance between head and tail in comparison to total
if G.has_edge(mcon_label, nextcon_label):
G[mcon_label][nextcon_label]["weight"] += 1
G[mcon_label][nextcon_label]["ref"] += (k,)
else:
G.add_edge(mcon_label, nextcon_label, weight=1, ref=(k,), edgetype=1)
elif (abs(int(nextcon.start) - int(mcon.end)) < dist_thresh):# or nextcon.overlapflag: #threshold + margin
if G.has_edge(mcon_label, nextcon_label):
G[mcon_label][nextcon_label]["weight"] += 1.0
G[mcon_label][nextcon_label]["ref"] += (k,)
else:
G.add_edge(mcon_label, nextcon_label, weight=1.0, ref=(k,), edgetype=1)
# else:
# print('No edge drawn. Too far apart: ', mcon_label, mcon.end, 'and', nextcon_label, nextcon.start, \
# abs(int(nextcon.start) - int(mcon.end)), 'bp')
# toremove = []
# for e in G.edges_iter(data=True):
# e[2]['weight'] -= 1
# if e[2]['weight'] < 1:
# toremove.append((e[0],e[1]))
# for e in toremove:
# G.remove_edge(*e)
for permdic in perms: #second iteration, now going for contigs spanning two
for k, perm in permdic.items():
#print(k)
all_connected = cycle(perm)
all_connected2 = cycle(perm)
all_connected3 = cycle(perm)
[next(all_connected) for _ in range(1)] #to later start with element two
[next(all_connected2) for _ in range(2)] #to later start with element three
[next(all_connected3) for _ in range(3)] #to later start with element four
for mcon in perm:
mcon_label, lab = getNodeLabel(mcon, htflag= htflag)
nextcon = next(all_connected)
nextcon2 = next(all_connected2)
nextcon3 = next(all_connected3)
nextcon_label, nlab = getNodeLabel(nextcon, htflag= htflag)
nextcon2_label, nlab2 = getNodeLabel(nextcon2, htflag= htflag)
nextcon3_label, nlab3 = getNodeLabel(nextcon3, htflag= htflag)
if not htflag and not G.has_node(mcon_label):
mcon_label = lab = getInvLabel(mcon_label)
nextcon_label = nlab = getInvLabel(nextcon_label)
nextcon2_label = nlab2 = getInvLabel(nextcon2_label)
nextcon3_label = nlab3 = getInvLabel(nextcon3_label)
if G.has_edge(mcon_label, nextcon_label) and k in G[mcon_label][nextcon_label]["ref"]:
if G.has_edge(nextcon_label, nextcon2_label) and k in G[nextcon_label][nextcon2_label]["ref"]:
if G.has_edge(nextcon2_label, nextcon3_label) and k in G[nextcon2_label][nextcon3_label]["ref"]:
if len(set([lab,nlab,nlab2,nlab3])) > 2:
if "color" in G[mcon_label] and "color" in G[nextcon3_label]:
G.node[mcon_label]["color"].update(nextcon3_label)
G.node[nextcon3_label]["color"].update(mcon_label)
else:
G.node[mcon_label]["color"] = set([nextcon3_label,])
G.node[nextcon3_label]["color"] = set([mcon_label,])
# else:
# print('Not of same color: ', mcon_label, mcon.end, 'and', nextcon3_label, nextcon3.start, \
# abs(int(nextcon3.start) - int(mcon.end)), 'bp')
return G, adj_lists
def visualizeConnectivityGraph(G, outfilepath, smallest_contig_length='"default" --> smallest considered contig size in'):
graph = functools.partial(gv.Graph, format='svg')
G1 = graph()
#digraph = functools.partial(gv.Digraph, format='svg')
#G1 = digraph()
styles = {
'graph': {
'label': 'Mapped Contigs Graph, threshold_distance: ' + str(smallest_contig_length) + ' bp',
'fontsize': '12',
'fontcolor': 'black',
'bgcolor': '#FFFFFF',
'rankdir': 'BT',
'size' : '7.75,10.25',
},
'nodes': {
'fontname': 'Helvetica',
'shape': 'rarrow',
'fontcolor': 'black',
'color': 'black',
'style': 'filled',
'fillcolor': '#e8e8e8',
},
'edges': {
'style': 'solid',
'color': 'black',
'fontname': 'Courier',
'fontsize': '12',
'fontcolor': 'black',
#'arrowhead' :'vee',
#'dir':'both',
}
}
def apply_styles(graph, styles):
graph.graph_attr.update(
('graph' in styles and styles['graph']) or {}
)
graph.node_attr.update(
('nodes' in styles and styles['nodes']) or {}
)
graph.edge_attr.update(
('edges' in styles and styles['edges']) or {}
)
return graph
G1 = apply_styles(G1, styles)
def add_nodes(graph, nodes):
for n in nodes:
if isinstance(n, tuple):
graph.node(n[0], **n[1])
else:
graph.node(n)
return graph
def add_edges(graph, edges):
for e in edges:
if isinstance(e[0], tuple):
graph.edge(*e[0], **e[1])
else:
graph.edge(*e)
return graph
#G1 = add_nodes(G1, G.nodes())
#G1 = add_edges(G1, G.edges())
for __, n in enumerate(G.nodes()):
temp = 0
if n.startswith('+'):
#print(n, type(n))
temp+=1
if '_H' in n:
temp+=2
if temp == 0: #-T
G1.node(n, shape='rect', fillcolor='#779ECB')
elif temp == 1: #+T
G1.node(n, shape='larrow', fillcolor='#779ECB')
elif temp == 2: #-H
G1.node(n, shape='rect', fillcolor='#DEA5A4')
elif temp == 3: #+H
G1.node(n, shape='larrow', fillcolor='#DEA5A4')
import math
for __, e in enumerate(G.edges()):
edge_weight = G[e[0]][e[1]]['weight']
if edge_weight >= 15:
G1.edge(*e, label= ' ' + str(edge_weight), weight=str(edge_weight),
color='#CB6D51', penwidth = str(math.log(edge_weight, 2) + 1 ))
else:
G1.edge(*e, label= ' ' + str(edge_weight), weight=str(edge_weight),
color='#CB99C9', penwidth = str(math.log(edge_weight, 2) + 1 ))
for __,n in enumerate(G.nodes()):
if "color" in G.node[n]:
dist_indegree = len(G.node[n]["color"])
if dist_indegree != 1:
G1.node(n, shape='rarrow', fillcolor='#FFFFFF')
else:
(con_node,) = G.node[n]["color"]
e = (con_node, n)
if not G.has_edge(n, con_node):
G.add_edge(*e, edgetype=2)
G1.edge( *e, color='#E2E2E2')
#G1.node(n, shape='rarrow', fillcolor='#E2E2E2')
#print(G1.node)
#print(G1.source)
filename = G1.render(filename=outfilepath)
return filename
def buildNodeNeighborDict(G):
diff_neighbors = {}
for adj_tuple in G.adjacency_iter():
n = adj_tuple[0]
neighbordict = adj_tuple[1]
pink_adj = []
grey_adj = []
for neighbor, edge_attrib_dict in neighbordict.items():
if edge_attrib_dict['edgetype'] == 1:
pink_adj.append(neighbor)
else:
grey_adj.append(neighbor)
diff_neighbors[n] = {'pink_neighbors': pink_adj,
'grey_neighbors': grey_adj}
return diff_neighbors
def getCovUsedContigDict(df):
cov_dict = {k +'_H ':v for k,v in df['multipl'].to_dict().items()}
cov_dict.update({k +'_T ':v for k,v in df['multipl'].to_dict().items()})
used_dict = {k:0 for k, v in cov_dict.copy().items()}
return cov_dict, used_dict
def findPath(n, diff_neighbors, current_path, cov_dict, used_dict, MAX=sys.maxsize, G=False, htflag=False, guides=False):
new_vale = used_dict[n] + 1
if guides and new_vale > cov_dict[n]:
print('COVERAGE EXCEEDED')
return current_path
else:
used_dict[n] += 1
current_path.append(n)
if len(current_path) >= MAX or len(set(current_path)) * 3 < len(current_path) : #Protection against infinite loopS
print('INFINITE LOOP STOP')
return current_path
#print("Currently: ",current_path)
if htflag:
n_id, n_ht = n.split('_')
if n_ht == 'H ':
cn = '_'.join([n_id, 'T '])
else:
cn = '_'.join([n_id, 'H '])
pn = diff_neighbors[n]['pink_neighbors'][:]
if len(current_path) > 1:
prev = current_path[-2]
#print("Previous Node: ",prev,"Pink Neighbor Nodes: ", pn, "of current Node", n)
#THE PROBLEM: THERE IS NO EDGE BETWEEN 28
if prev in pn:
pn.remove(prev)
if len (current_path) > 2:
pre_prev = current_path[-3] #print("Preprevious Node: ",pre_prev)
else:
pre_prev = None
### Algorithm ###
if len(current_path) > 1 and n == current_path[0]:
print("When path returns to its starting node, break and return")
return current_path[:-1]
if len(pn) == 0:
print('Reached a dead end', current_path)
return current_path
elif len(pn) == 1:
if guides and pn[0] == guides[0]:
guides = guides[1:]
return findPath(pn[0], diff_neighbors, current_path, cov_dict, used_dict, MAX=MAX, G=G, htflag=htflag, guides=guides)
elif len(pn) > 1:
if htflag:
if cn in pn: #directly go to its opposite if connected
if guides and cn == guides[0]:
guides = guides[1:]
return findPath(cn, diff_neighbors, current_path, cov_dict, used_dict, MAX=MAX, G=G, htflag=htflag, guides=guides)
assembly_pns = [p for p in pn if "ASSEMBLY" in G[n][p]["ref"]] #Is there a definitive vote from the assembly?
highvote_pns = [p for p in pn if not G[n][p]["weight"] == 1] #only those neighbors with higher vote than 1
if pre_prev and diff_neighbors[pre_prev]['grey_neighbors']:
gn = diff_neighbors[pre_prev]['grey_neighbors']
print("OK", n, pn, gn, guides)
print("Current Path:", current_path)
if not guides and len(gn) == 1 and gn[0] in pn: # If there is exactly one grey neigbor, continue
return findPath(gn[0], diff_neighbors, current_path, cov_dict, used_dict, MAX=MAX, G=G,htflag=htflag, guides=guides)
if len(assembly_pns) == 1 and guides and not assembly_pns[0] == guides[0]:
print("Assembly graph supports an intermediate node")
return findPath(assembly_pns[0], diff_neighbors, current_path, cov_dict, used_dict, MAX=MAX, G=G, htflag=htflag, guides=guides)
if guides and ((guides[0] in gn and guides[0] in pn) or (not gn and guides[0] in pn) or (guides[0] in pn)):
guidenode, guides = guides[0], guides[1:]
print("Guide's next element same as target of grey edge from pre-previous node, or using guide because no grey edge")
return findPath(guidenode, diff_neighbors, current_path, cov_dict, used_dict, MAX=MAX, G=G,htflag=htflag, guides=guides)
if len(assembly_pns) == 1 and guides and not assembly_pns[0] == guides[0]:
return findPath(assembly_pns[0], diff_neighbors, current_path, cov_dict, used_dict, MAX=MAX, G=G, htflag=htflag, guides=guides)
if len(highvote_pns) == 1:
return findPath(highvote_pns[0], diff_neighbors, current_path, cov_dict, used_dict, MAX=MAX, G=G, htflag=htflag, guides=guides)
elif guides and guides[0] in pn:
# if len(assembly_pns) == 1 and not assembly_pns[0] == guides[0]:
# return findPath(assembly_pns[0], diff_neighbors, current_path, cov_dict, used_dict, MAX=MAX, G=G, htflag=htflag, guides=guides)
#
# if len(highvote_pns) == 1:
# return findPath(highvote_pns[0], diff_neighbors, current_path, cov_dict, used_dict, MAX=MAX, G=G, htflag=htflag, guides=guides)
#
# else:
guidenode, guides = guides[0], guides[1:]
print("Using guides to prolong path to", guidenode)
return findPath(guidenode, diff_neighbors, current_path, cov_dict, used_dict, MAX=MAX, G=G, htflag=htflag, guides=guides)
else:
if guides:
if pre_prev:
gn = diff_neighbors[pre_prev]['grey_neighbors']
print('AMBIGUOUS PINK AND NO OR MULTIPLE GREY EDGES 2', n, pn, gn, cn, current_path, guides)
else: print('AMBIGUOUS PINK AND NO GREY EDGES', n, pn, cn, current_path, guides)
print("No test postive, stop path elongation",current_path,n, cn, pn, pre_prev, guides)
return current_path
# else:
#
# print("the reason")
def findPaths(G, df, startnodes=False, htflag=False):
"""Start at every node and run through with the developed algorithm"""
#print("startnodes",startnodes)
diff_neighbors = buildNodeNeighborDict(G)
cov_dict, used_dict = getCovUsedContigDict(df)
sorted_nodes = sorted(G.nodes(), key=lambda x: len(diff_neighbors[x]['pink_neighbors']))
MAX = len(sorted_nodes) * 2
if not startnodes:
startnodes = sorted_nodes
all_paths = []
for n in startnodes:
current_path = []
if G.has_node(n):
print("Start from", n)
all_paths.append(findPath(n, diff_neighbors, current_path, cov_dict, used_dict, G=G, MAX=MAX, htflag=htflag))
for n in sorted_nodes:
if used_dict[n] == 0:
current_path = []
all_paths.append(findPath(n, diff_neighbors, current_path, cov_dict, used_dict, G=G, htflag=htflag))
all_paths = [p for p in all_paths if p] # filter out 'None'
all_paths = [p for p in all_paths if len(p) > 1]
### try to delete duplicates and subsets
nr_paths = []
for p1 in sorted(all_paths, key=lambda x: len(x), reverse=True):
p1_str = ''.join(p1).strip()
p1_str_r = ''.join(p1[::-1]).strip()
current_paths_str = [''.join(p).strip() for p in nr_paths]
flag = True
for pstr in current_paths_str:
circ_pstr = pstr + ' ' + pstr
if not p1_str in pstr \
and not p1_str_r in pstr \
and not p1_str in circ_pstr \
and not p1_str_r in circ_pstr:
continue
else:
flag = False
break
if flag:
nr_paths.append(p1)
### try to merge the ends of paths
nr_paths = sorted(nr_paths, key=lambda x: len(x), reverse=True)
from itertools import takewhile
def longestPrefix(a, b):
return [x for (x, _) in takewhile(lambda x: x[0] == x[1], zip(a, b))]
def longestSuffix(a, b):
return longestPrefix(reversed(a), reversed(b))[::-1]
from functools import reduce
def getAllSuffixes(a):
return [a[i-len(a):] for i in reversed(range(len(a)))]
#def getAllPrefixes(a):
# return [x[::-1] for x in getAllSuffixes(a[::-1])]
def longestPrefixThatIsSuffix(a,b):
lcp = []
for suf in getAllSuffixes(b):
if len(longestPrefix(a, suf)) == len(suf):
lcp = suf
return lcp
def mergeSeqs(a, b, c = None, merge_mode = None):
#print('MMM', "MODE", merge_mode, '\nA: ', a, '\nB: ', b, '\n overlap: ', c)
if merge_mode == 'lcp':
return b[:-len(c)] + a
if merge_mode == 'inv_lcp':
return a + b[len(c):]
if merge_mode == 'lcs':
return a + list(reversed(b))[len(c):]
if merge_mode == 'inv_lcs':
return list(reversed(a))+ b[len(c):]
def findAndMerge(a, b):
d = {'lcp': longestPrefixThatIsSuffix(a, b),
'inv_lcp': longestPrefixThatIsSuffix(b, a),
'lcs': longestPrefixThatIsSuffix(list(reversed(a)), b),
'inv_lcs': longestPrefixThatIsSuffix(b, list(reversed(a)))}
m = max(d, key=lambda k: len(d[k]))
if len(d[m]) > 2:
a = mergeSeqs(a, b, c=d[m], merge_mode=m)
return a
def getContigID(contigend):
for e in contigend.split('_'):
if e.isdigit():
return e
def getInvLabel(label):
if label.endswith('_H '):
return label.replace('_H ', '_T ')
else:
return label.replace('_T ', '_H ')
def correctScaffoldEnds(nr_paths, used_dict):
for i, p in enumerate(nr_paths[:]):
if p[0] != getInvLabel(p[1]):
nr_paths[i] = [getInvLabel(p[0])] + nr_paths[i]
used_dict[nr_paths[i][0]] += 1
if p[-2] != getInvLabel(p[-1]):
nr_paths[i] += [getInvLabel(p[-1])]
used_dict[nr_paths[i][-1]] += 1
for j, c in enumerate(p):
used_dict[c] += 1
marked = []
for i, p in enumerate(nr_paths):
for j, c in enumerate(p):
if used_dict[c] > 1:
if c in marked:
nr_paths[i][j] = False
else:
marked.append(c)
nr_paths[i] = [x for x in nr_paths[i] if x]
return nr_paths, used_dict
paths = nr_paths[:]
used_dict = {k:0 for k, v in cov_dict.copy().items()}
nr_paths, used_dict = correctScaffoldEnds(list(sorted(nr_paths, key= lambda x: len(x), reverse=True)), used_dict)
pos_to_delete = []
for i, p in enumerate(nr_paths):
a = p
if i in pos_to_delete:
continue
#print('i:',i)
for j, p1 in enumerate(nr_paths[:]):
if j in pos_to_delete or i >= j :
continue
#print('j:',j)
new_a = findAndMerge(a, p1)
if len(new_a) > len(a):
a = new_a
pos_to_delete.append(j)
paths[i] = a
df1 = pd.DataFrame.from_dict(used_dict, orient='index')
df1.columns = ['count']
df = df.join(df1)
return all_paths, nr_paths, sorted([p for i, p in enumerate(paths) if not i in pos_to_delete], key=lambda x: len(x), reverse=True), df
def adjlistsToFile(adj_lists, outfilepath):
for k, li in adj_lists.items():
for i, elem in enumerate(li):
adj_lists[k][i] = (adj_lists[k][i][0], adj_lists[k][i][1], abs(adj_lists[k][i][2] - 2 * READ_LENGTH))
with open(outfilepath, 'w') as of:
for k, v in adj_lists.items():
of.write('{}\t{}\n'.format(k, v))
def checkAndConvertHTpaths(non_redundant_paths, ):
"""First checks whether the ordering in each path
shows consistent pairs of H and T of the same
contig ID. Second: converts the notion of the nodes
to a notion containing + and - for strandedness info"""
#from itertools import tee, islice, chain, zip
def grouped(s, n):
"""s transformed to (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1),
(s2n,s2n+1,s2n+2,...s3n-1), ..."""
return zip(*[iter(s)]*n)
converted_paths = []
for j, p in enumerate(non_redundant_paths):
current_id = []
current_ht = []
orientations = []
# here check whether the first contig is incomplete and if yes infer orientation
# and let the rest of the path be iterated starting from the second element to make
# the modulo 2 criterion work
gr_path = list(grouped(p, 2))
print(p)
print(list(gr_path))
print("""####\n####\n####\n####""")
flag = 0
#gr = next(gr_path)
if not gr[0].split('_')[0] == gr[1].split('_')[0]:
current_id.append(gr[0].split('_')[0])
if gr[0].split('_')[1].startswith('T'):
orientations.append('+')
else:
orientations.append('-')
flag = 1
for i, e in enumerate(p[flag:]):
contig_id, ht = e.split('_')
if i%2 == 0:
current_ht.append(ht)
current_id.append(contig_id)
if ht.startswith('T'):
orientations.append('-')
else:
orientations.append('+')
else:
if not contig_id == current_id[-1] or (contig_id == current_id[-1] and ht == current_ht[-1]):
print('Warning. Inconsistency in path number', j ,', at position', i)
current_ht.append(ht)
current_id.append(contig_id)
if ht.startswith('T'):
orientations.append('-')
else:
orientations.append('+')
converted_paths.append(['{}{}'.format(orientations[i], current_id[i]) for i,__ in enumerate(current_id)])
return converted_paths
def checkAndCountOccurences(non_redundant_paths, df):
from collections import Counter
counts = Counter(non_redundant_paths[0])
for p in non_redundant_paths[1:]:
counts += Counter(p)
count_dict = {k.split('_')[0]:int(v) for k,v in counts.items()}
cov_dict = df['multipl'].to_dict()
df1 = pd.DataFrame.from_dict(count_dict, orient='index')
df1.columns = ['count']
df = df.join(df1)
for k,v in cov_dict.items():
if k in count_dict:
if not v == count_dict[k]:
print('Warning: Coverage_criterion not fulfilled: contig', k, ' #predicted:', v, '#observed:', count_dict[k] )
else:
pass
return df
def produceScaffoldRecords(final_paths, record_dict, mode='100N'):
""" Constructs the actual scaffolded contigs and writes em in a fasta file """
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
used_ids = []
# print('\nRECORD DICT\n')
# for k,v in record_dict.items():
# print(k ,':', v)
# print('END')
scaffolds_records = []
for i, path in enumerate(final_paths):
scaf_id = ['SCAFFOLD', str(i+1), 'path', '['] #contruct fasta scaffold header indicating underlying contig path
scaf_seq = Seq("")
for j, e in enumerate(path):
scaf_id.append(e)
used_ids.append(e[1:])
if e.startswith('-'):
contig_seq = record_dict[e[1:]].seq.reverse_complement()
else:
contig_seq = record_dict[e[1:]].seq
if mode == '100N':
scaf_seq += Seq('N'*100)
scaf_seq += contig_seq
elif mode == 'K127': ##############HERE NEEDS IMPLEMENTATION OF CHECK BREAKS, IF OVERLAP BREAK THEN DO THIS ELSE N MODE
scaf_seq += contig_seq[127:]
scaffolds_records.append( SeqRecord( scaf_seq, id = '_'.join(scaf_id) + '_]' + '_length_' + str(len(scaf_seq)), description=""))
for k,v in record_dict.items():
if not k in used_ids:
scaffolds_records.append(v)
return scaffolds_records
if __name__ == '__main__':
################################### TEST INPUT ########################################
# parser, args = parse_args(['-i','/home/bardya/usr/data/Reference_Assisted_Scaffolding/Assemblies/GCF_000018445.1_ASM1844v1_genomic/assembly.spades/contigs.fasta',
# '-rg', '/home/bardya/usr/data/Reference_Assisted_Scaffolding/References',
# '-spdir', '/home/bardya/usr/data/Reference_Assisted_Scaffolding/Assemblies/GCF_000018445.1_ASM1844v1_genomic/assembly.spades',
# '-o', '/home/bardya/usr/data/Reference_Assisted_Scaffolding/Scaffolding',
# '-f'])
parser, args = parse_args([])
LENGTH_THRESHOLD = 601
READ_LENGTH = 150
PYTHON_PATH = sys.executable
#spades contig name pattern
PATTERN = re.compile('^NODE_([\d]+)_length_([\d]+)_cov_([\d*\.\d*]+).*')
#######################################################################################
################################### PREPARE OUTPUT DIR ################################
try:
assert isdir(args.outdirpath), "Output directory does not exist"
except:
makedirs(args.outdirpath, 0o777)
if not args.force_flag:
assert len(listdir(args.outdirpath)) == 0, "Output directory not empty, try the '-f' option to force overwriting"
args.outdirpath = abspath(expanduser(args.outdirpath))
#######################################################################################
################################### GET & PARSE INPUT FILES ###########################
#a1) contigs file
recordlist, df = parseContigs(args.contigs_path.name, pattern=PATTERN)
#a2) store and filter contigs
records_dict, filtered_records_dict, filtered_contigs_filepath = filterAndStoreContigs(recordlist, df, pattern=PATTERN, key='lr')
#b) reference genomes
reference_genomes = getRefFilePaths(args.refgenomesdir)
#c) (optional) spades assembly graph (needs the directory, as several files need to be parsed)
contig_connections, unconnected = parseSpadesAssemblyGraph(args.assemblygraph, records_dict)
with open(join(args.outdirpath, "gfa_contig_connections.txt"), 'w') as ccon:
for i in sorted(contig_connections.keys(), key=lambda x: int(x)):
ccon.write("{}\n".format(contig_connections[i]))
#######################################################################################
################################### Filter Contig Connections #########################
filtered_contig_connections, filtered_unconnected = filterConnections(contig_connections, unconnected, filtered_records_dict)
#######################################################################################
############################ Prepare the Contig Heads and Tails #######################
head_tail_filepath = extractHeadTail(filtered_contigs_filepath, args.outdirpath,
ht_max_length = 2000, margin_length=READ_LENGTH*1.5)
#######################################################################################
############################## Perform Mapping using Nucmer ###########################
map_outdir = join(args.outdirpath, 'mapped')
if not isdir(map_outdir):
makedirs(map_outdir, 0o777)
#nucmerMapping(head_tail_filepath, args.refgenomesdir, map_outdir)
#######################################################################################
#### DETERMINE MINIMAL PHYSICAL DISTANCE OF TWO CONTIG ENDS TO BE CALLED CONNECTED ####
smallest_contig_length = min([len(rec.seq) for __,rec in filtered_records_dict.items() if len(rec.seq) >= LENGTH_THRESHOLD ])
#######################################################################################
############################## Extract Contig Permutations ############################
#TODO: Iterative approach to also include the smaller contigs, with length smaller than alen_thresh
perms_ht = getMaporder(join(args.outdirpath, 'mapped'), cov = 90,
alen_thresh = 1300 + 2 * READ_LENGTH * 1.5,
#alen_thresh = smallest_contig_length + 2 * READ_LENGTH * 1.5,
unique = False, htflag = True)
#######################################################################################
############################## Filter Perms according to Records Dict #################
filtered_perms_ht = filterPerms(perms_ht, filtered_records_dict)
#######################################################################################
############################## Build the Graph ########################################
G = initGraph(filtered_contig_connections)
G, adj_lists = toGraph(filtered_perms_ht, G=G,
dist_thresh = smallest_contig_length + 2 * READ_LENGTH * 1.5,
htflag = True)
#######################################################################################
#### SAVE TO FILE THE DISTANCES OF MAPPED HEAD AND TAIL REGIONS IN ALL REFERENCES #####
adjlistsToFile(adj_lists, join(args.outdirpath, 'ht_mapping_distances.tsv'))
#######################################################################################
############################## VISUALIZE THE GRAPH ####################################
visualizeConnectivityGraph(G, join(args.outdirpath, 'ht_connectivity_graph'),
smallest_contig_length=smallest_contig_length)
#######################################################################################
################################### FIND PATHS ########################################
filtered_df = df[df["length"] >= smallest_contig_length]
all_paths, overlap_paths, non_redundant_paths, filtered_df = findPaths(G, filtered_df, startnodes=filtered_unconnected,
htflag=True)
for p in non_redundant_paths:
print(p)
#######################################################################################
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
print("Starting Refinement...")
#######################################################################################
## DETERMINE NEW MINIMAL PHYSICAL DISTANCE OF TWO CONTIG ENDS TO BE CALLED CONNECTED ##
smallest_contig_length = min([len(rec.seq) for __,rec in records_dict.items() if len(rec.seq) >= LENGTH_THRESHOLD ])
#######################################################################################
############################## Build 2nd Graph ########################################
G2 = initGraph(contig_connections)
G2, adj_lists = toGraph(perms_ht, G=G2,
dist_thresh = smallest_contig_length + 2 * READ_LENGTH * 1.5,
htflag = True)
#######################################################################################
############################## VISUALIZE 2nd GRAPH ####################################
visualizeConnectivityGraph(G2, join(args.outdirpath, 'ht_connectivity_graph2'),
smallest_contig_length=smallest_contig_length)
################################### FIND 2nd PATHS ########################################
extended_paths = []
diff_neighbors = buildNodeNeighborDict(G2)
cov_dict, used_dict = getCovUsedContigDict(df)
for guide_path in sorted(non_redundant_paths, key=lambda x: len(x), reverse=True):
startnode = guide_path[1]
current_path = [guide_path[0]]
if G2.has_node(startnode):
used_dict[guide_path[0]] += 1
#print(guide_path)
extended_paths.append(findPath(startnode, diff_neighbors, current_path, cov_dict, used_dict, MAX=len(guide_path*3), G=G2, htflag=True, guides=guide_path[2:]))
for n in sorted(G2.nodes()):
if used_dict[n] == 0:
current_path = []
extended_paths.append(findPath(n, diff_neighbors, current_path, cov_dict, used_dict, G=G2, htflag=True))
for p in extended_paths:
print(p)
#######################################################################################
##### Finalize and produce output files ####################
df = checkAndCountOccurences(extended_paths, df)
df.to_csv(join(args.outdirpath, 'contigs_stats.tsv'), sep= '\t')
final_paths = checkAndConvertHTpaths(extended_paths)
print("Final_paths:")
for p in final_paths:
print(p)
scaffold_records = produceScaffoldRecords(final_paths, records_dict)
outfile = open(join(args.outdirpath, 'scaffolds.fasta'), 'w')
for record in scaffold_records:
SeqIO.write(record, outfile, "fasta")
outfile.close()
###################### PRINT CONTIG FOLD COVERAGES & STATS ############################
df.to_csv(join(args.outdirpath, 'contigs_stats.tsv'), sep= '\t')
|
{"hexsha": "f4e4816f1b5f047b6d60fc9d327d67e6c139e347", "size": 53714, "ext": "py", "lang": "Python", "max_stars_repo_path": "rscaffolder.py", "max_stars_repo_name": "ba1/RScaffolder", "max_stars_repo_head_hexsha": "89a9e7894c0e279a4de813689bc38728fcadd77c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-03-09T16:59:07.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-09T18:43:23.000Z", "max_issues_repo_path": "rscaffolder.py", "max_issues_repo_name": "ba1/RScaffolder", "max_issues_repo_head_hexsha": "89a9e7894c0e279a4de813689bc38728fcadd77c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rscaffolder.py", "max_forks_repo_name": "ba1/RScaffolder", "max_forks_repo_head_hexsha": "89a9e7894c0e279a4de813689bc38728fcadd77c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-02T07:40:11.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-02T07:40:11.000Z", "avg_line_length": 38.6153846154, "max_line_length": 170, "alphanum_fraction": 0.5147633764, "include": true, "reason": "import numpy,import networkx", "num_tokens": 12540}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from abra.config import DEFAULT_ALPHA
from abra.stats import Samples, RateComparison
from abra.inference.frequentist.results import FrequentistTestResults
from abra.inference import FrequentistProcedure
import numpy as np
from scipy.stats import norm
class RatesRatio(FrequentistProcedure):
"""
Runs frequentist inference procedure to test for the difference in two sample
rates.
Parameters
----------
hypothesis : str
the althernative hypothesis:
- 'larger': one-tailed test, assume variation is larger than null
- 'smaller': one-tailed test, assume variation is smaller than null
- 'unequal': two-tailed test, variaion mean is different than null
var_assumptions : str
whether to use pooled or unequal variance assumptions
- 'pooled': assume the same variance
- 'unequal': use Smith-Satterthwait dof when cal1ualting t-stat
"""
def __init__(self, *args, **kwargs):
super(RatesRatio, self).__init__(*args, **kwargs)
def run(self, control_samples, variation_samples,
alpha=DEFAULT_ALPHA, inference_kwargs=None):
"""
Run the inference procedure over the samples with a selected alpha
value.
alpha : float in [0, 1]
the assumed Type I error rate
"""
if isinstance(control_samples, (list, np.ndarray)):
control_samples = Samples(control_samples)
if isinstance(variation_samples, (list, np.ndarray)):
variation_samples = Samples(variation_samples)
self.alpha = alpha
self.comparison = RateComparison(
samples_a=variation_samples,
samples_b=control_samples,
alpha=self.alpha,
hypothesis=self.hypothesis
)
@property
def stats(self):
if not hasattr(self, '_stats'):
self._stats = self.comparison.rates_test()
return self._stats
@property
def ci(self):
"""
Calculate confidence interval for rates ratio. Intervals outside of 1
support the alternative hypothesis.
Calculation follows Li, Tang, & Wong, 2008, using the MOVER-R method,
and Rao Score intervals (Altman et al, 2000) for individual rate interval
estimates (aka "FRSI")
Returns
-------
CIs : list
[(CI_lower, CI_upper), (CI_lower_percentile, CI_upper_percentile)]
References
----------
Li H.Q, Tang ML, Wong WK (2008) Confidence intervals for ratio of two
Poisson rates using the method of variance estimates recovery.
Biometrical Journal 50 (2008)
Altman D., Machin D., Bryant TN. et al. (2000) "Statistics with confidence"
(2nd). BMJ Books: Bristol.
"""
def rao_score_interval(X, z, t):
# individual rate interval method 2 (Altman et al., 2000)
a = X + .5 * z**2.
b = z * np.sqrt(X + .25 * z**2.)
return (a - b) / t, (a + b) / t
if self.hypothesis == 'larger':
z = norm.ppf(1 - self.alpha)
elif self.hypothesis == 'smaller':
z = norm.ppf(self.alpha)
elif self.hypothesis == 'unequal':
z = np.abs(norm.ppf(1 - self.alpha / 2.))
control = self.comparison.d2
variation = self.comparison.d1
X1, t1 = control.data.sum(), control.nobs
X2, t2 = variation.data.sum(), variation.nobs
lam_1 = X1 / t1
lam_2 = X2 / t2
lam_2_lam_1 = lam_2 * lam_1
l2, u2 = rao_score_interval(X1, z, t1)
l1, u1 = rao_score_interval(X2, z, t2)
# Gu et al, 2008; Eq 3
L = (lam_2_lam_1 - np.sqrt(lam_2_lam_1 ** 2 - l1 * (2 * lam_2 - l1) * (u2 * (2 * lam_1 - u2)))) / (u2 * (2 * lam_1 - u2))
# Gu et al, 2008; Eq 4
U = (lam_2_lam_1 + np.sqrt(lam_2_lam_1 ** 2 - u1 * (2 * lam_2 - u1) * (l2 * (2 * lam_1 - l2)))) / (l2 * (2 * lam_1 - l2))
return [(L, U), self.ci_percents]
def make_results(self):
"""
Package up inference results
"""
statistic_value, p_value = self.stats
accept_hypothesis = self.accept_hypothesis(statistic_value)
return FrequentistTestResults(
control=self.comparison.d2,
variation=self.comparison.d1,
delta=self.comparison.delta,
delta_relative=self.comparison.delta_relative,
effect_size=self.comparison.effect_size,
alpha=self.comparison.alpha,
confidence_interval=self.ci,
test_statistic=self.test_statistic,
statistic_value=statistic_value,
p_value=p_value,
hypothesis=self.hypothesis_text,
accept_hypothesis=accept_hypothesis,
inference_procedure=self,
power=self.comparison.power
)
|
{"hexsha": "f4ce2b48d251b495499839f4f05e7b1a9b3a86ba", "size": 4934, "ext": "py", "lang": "Python", "max_stars_repo_path": "abra/inference/frequentist/rates.py", "max_stars_repo_name": "quizlet/abracadabra", "max_stars_repo_head_hexsha": "eda599bd02f14b96efdc521f53132d93c9100ede", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2020-06-12T16:12:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-01T12:25:38.000Z", "max_issues_repo_path": "abra/inference/frequentist/rates.py", "max_issues_repo_name": "quizlet/abracadabra", "max_issues_repo_head_hexsha": "eda599bd02f14b96efdc521f53132d93c9100ede", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2020-06-12T06:26:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:57:51.000Z", "max_forks_repo_path": "abra/inference/frequentist/rates.py", "max_forks_repo_name": "quizlet/abracadabra", "max_forks_repo_head_hexsha": "eda599bd02f14b96efdc521f53132d93c9100ede", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-06-14T12:14:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-28T15:36:44.000Z", "avg_line_length": 34.7464788732, "max_line_length": 130, "alphanum_fraction": 0.6009323064, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1240}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.