text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
|---|---|
\section{Defining Engineering Design}
\newthought{Over the years}, there has been many attempts to define Engineering Design.
This has been a non-trivial task and is mainly due to the increasingly complex and multi-disciplinary nature of design.
An element that continue to increase over the years with the rate of technological development.
\marginnote{fielden report}In the first two years of the Bath Engineering Degree, we will be focusing on the design of mechanical components, systems and machines.
Therefore, we will be using Fielden's definition of Engineering Design:
\begin{center}
``The use of scientific principles, technical information and imagination in the definition of a mechanical structure, machine or system to perform pre-specified function with the maximum economy and efficiency.''~\cite{fielden1963}
\end{center}
\citeauthor{pahl2013}~\cite{pahl2013} also provides an interesting perspective on Engineering Design and how it forms the bridge between the sciences (\cref{fig-ed}). It is being able to analyse and critique designs both objectively and subjectively, which is key to being a Design Engineer.
\begin{figure*}
\centering
\includestandalone[width=0.75\textwidth, mode=buildnew]{02_engineering_design/cross-over}
\caption[Positioning Engineering Design within the sciences]{Positioning Engineering Design within the sciences~\citep{pahl2013}}\label{fig-ed}
\end{figure*}
|
{"hexsha": "0b53bd608532bb87fdc5af943520cc7d7c03a305", "size": 1427, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "02_engineering_design/section.tex", "max_stars_repo_name": "JamesGopsill/DesignAndMakeCourseNotes", "max_stars_repo_head_hexsha": "ac52e37b77ad2200088677efef5356b8ecaac990", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "02_engineering_design/section.tex", "max_issues_repo_name": "JamesGopsill/DesignAndMakeCourseNotes", "max_issues_repo_head_hexsha": "ac52e37b77ad2200088677efef5356b8ecaac990", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "02_engineering_design/section.tex", "max_forks_repo_name": "JamesGopsill/DesignAndMakeCourseNotes", "max_forks_repo_head_hexsha": "ac52e37b77ad2200088677efef5356b8ecaac990", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 71.35, "max_line_length": 291, "alphanum_fraction": 0.8037841626, "num_tokens": 318}
|
function chanind = selectchannels(this, channels)
% Method for getting channel indices based on labels and/or types
% FORMAT res = selectchannels(this, label)
% this - MEEG object
% channels - string or cell array of labels that may also include
% 'all', or types ('EEG', 'MEG' etc.)
%
% res - vector of channel indices matching labels
%__________________________________________________________________________
% Copyright (C) 2010-2012 Wellcome Trust Centre for Neuroimaging
% Vladimir Litvak
% $Id: selectchannels.m 7253 2018-02-04 17:20:57Z vladimir $
if ischar(channels)
channels = {channels};
end
chanind = [];
for i = 1:numel(channels)
if strncmpi('regexp_', channels{i}, 7)
re = channels{i}(8:end);
match = regexp(chanlabels(this), re);
chanind = [chanind find(~cellfun('isempty', match))];
else
cind = indchannel(this, channels{i});
if ~isempty(cind)
chanind = [chanind cind];
elseif ismember(upper(channels{i}), ...
{'ALL','MEG', 'MEGPLANAR', 'MEGMAG', 'MEGGRAD', 'MEGCOMB','EEG',...
'EOG', 'ECG', 'EMG', 'LFP', 'SRC', 'PHYS', 'ILAM', 'OTHER', 'REF', 'REFMAG', 'REFGRAD'})
chanind = [chanind indchantype(this, upper(channels{i}))];
end
end
if any(size(chanind) == 0)
chanind = [];
end
end
chanind = unique(chanind);
|
{"author": "spm", "repo": "spm12", "sha": "3085dac00ac804adb190a7e82c6ef11866c8af02", "save_path": "github-repos/MATLAB/spm-spm12", "path": "github-repos/MATLAB/spm-spm12/spm12-3085dac00ac804adb190a7e82c6ef11866c8af02/@meeg/selectchannels.m"}
|
mutable struct DQMCStack{
GreensElType <: Number,
HoppingElType <: Number,
GreensMatType <: AbstractArray{GreensElType},
HoppingMatType <: AbstractArray{HoppingElType},
InteractionMatType <: AbstractArray
} <: AbstractDQMCStack
u_stack::Vector{GreensMatType}
d_stack::Vector{Vector{Float64}}
t_stack::Vector{GreensMatType}
Ul::GreensMatType
Ur::GreensMatType
Dl::Vector{Float64}
Dr::Vector{Float64}
Tl::GreensMatType
Tr::GreensMatType
pivot::Vector{Int64}
tempv::Vector{GreensElType}
tempvf::Vector{Float64}
greens::GreensMatType
greens_temp::GreensMatType
tmp1::GreensMatType
tmp2::GreensMatType
ranges::Array{UnitRange, 1}
n_elements::Int
current_slice::Int # running internally over 0:mc.parameters.slices+1, where 0 and mc.parameters.slices+1 are artifcial to prepare next sweep direction.
direction::Int
# # -------- Global update backup
# gb_u_stack::Array{GreensElType, 3}
# gb_d_stack::Matrix{Float64}
# gb_t_stack::Array{GreensElType, 3}
# gb_greens::GreensMatType
# gb_log_det::Float64
# gb_conf::Array{Float64, 3}
# # --------
# preallocated, reused arrays
curr_U::GreensMatType
eV::InteractionMatType
# hopping matrices (mu included)
hopping_matrix::HoppingMatType
hopping_matrix_exp::HoppingMatType
hopping_matrix_exp_inv::HoppingMatType
hopping_matrix_exp_squared::HoppingMatType
hopping_matrix_exp_inv_squared::HoppingMatType
# checkerboard hopping matrices
checkerboard::Matrix{Int} # src, trg, bondid
groups::Vector{UnitRange}
n_groups::Int
chkr_hop_half::Vector{SparseMatrixCSC{HoppingElType, Int64}}
chkr_hop_half_inv::Vector{SparseMatrixCSC{HoppingElType, Int64}}
chkr_hop_half_dagger::Vector{SparseMatrixCSC{HoppingElType, Int64}}
chkr_hop::Vector{SparseMatrixCSC{HoppingElType, Int64}} # without prefactor 0.5 in matrix exponentials
chkr_hop_inv::Vector{SparseMatrixCSC{HoppingElType, Int64}}
chkr_hop_dagger::Vector{SparseMatrixCSC{HoppingElType, Int64}}
chkr_mu_half::SparseMatrixCSC{HoppingElType, Int64}
chkr_mu_half_inv::SparseMatrixCSC{HoppingElType, Int64}
chkr_mu::SparseMatrixCSC{HoppingElType, Int64}
chkr_mu_inv::SparseMatrixCSC{HoppingElType, Int64}
function DQMCStack{GET, HET, GMT, HMT, IMT}() where {
GET<:Number, HET<:Number,
GMT<:AbstractArray{GET}, HMT<:AbstractArray{HET}, IMT<:AbstractArray
}
@assert isconcretetype(GET);
@assert isconcretetype(HET);
@assert isconcretetype(GMT);
@assert isconcretetype(HMT);
@assert isconcretetype(IMT);
@assert eltype(GMT) == GET;
@assert eltype(HMT) == HET;
new{GET, HET, GMT, HMT, IMT}()
end
end
# type helpers
geltype(::DQMCStack{GET, HET, GMT, HMT, IMT}) where {GET, HET, GMT, HMT, IMT} = GET
heltype(::DQMCStack{GET, HET, GMT, HMT, IMT}) where {GET, HET, GMT, HMT, IMT} = HET
gmattype(::DQMCStack{GET, HET, GMT, HMT, IMT}) where {GET, HET, GMT, HMT, IMT} = GMT
hmattype(::DQMCStack{GET, HET, GMT, HMT, IMT}) where {GET, HET, GMT, HMT, IMT} = HMT
imattype(::DQMCStack{GET, HET, GMT, HMT, IMT}) where {GET, HET, GMT, HMT, IMT} = IMT
geltype(mc::DQMC) = geltype(mc.stack)
heltype(mc::DQMC) = heltype(mc.stack)
gmattype(mc::DQMC) = gmattype(mc.stack)
hmattype(mc::DQMC) = hmattype(mc.stack)
imattype(mc::DQMC) = imattype(mc.stack)
################################################################################
### Stack Initialization
################################################################################
function initialize_stack(mc::DQMC, ::DQMCStack)
GreensElType = geltype(mc)
GreensMatType = gmattype(mc)
HoppingElType = heltype(mc)
N = length(lattice(mc))
flv = nflavors(mc.model)
mc.stack.n_elements = convert(Int, mc.parameters.slices / mc.parameters.safe_mult) + 1
mc.stack.u_stack = [GreensMatType(undef, flv*N, flv*N) for _ in 1:mc.stack.n_elements]
mc.stack.d_stack = [zeros(Float64, flv*N) for _ in 1:mc.stack.n_elements]
mc.stack.t_stack = [GreensMatType(undef, flv*N, flv*N) for _ in 1:mc.stack.n_elements]
mc.stack.greens = GreensMatType(undef, flv*N, flv*N)
mc.stack.greens_temp = GreensMatType(undef, flv*N, flv*N)
# used in calculate_greens
# do not change in slice_matrices.jl or interaction_matrix_exp!
mc.stack.Ul = GreensMatType(I, flv*N, flv*N)
mc.stack.Ur = GreensMatType(I, flv*N, flv*N)
mc.stack.Tl = GreensMatType(I, flv*N, flv*N)
mc.stack.Tr = GreensMatType(I, flv*N, flv*N)
mc.stack.Dl = ones(Float64, flv*N)
mc.stack.Dr = ones(Float64, flv*N)
# can be changed anywhere
mc.stack.pivot = Vector{Int64}(undef, flv*N)
mc.stack.tempv = Vector{GreensElType}(undef, flv*N)
mc.stack.tempvf = Vector{Float64}(undef, flv*N)
# can be changed anywhere
mc.stack.tmp1 = GreensMatType(undef, flv*N, flv*N)
mc.stack.tmp2 = GreensMatType(undef, flv*N, flv*N)
# # Global update backup
# mc.stack.gb_u_stack = zero(mc.stack.u_stack)
# mc.stack.gb_d_stack = zero(mc.stack.d_stack)
# mc.stack.gb_t_stack = zero(mc.stack.t_stack)
# mc.stack.gb_greens = zero(mc.stack.greens)
# mc.stack.gb_log_det = 0.
# mc.stack.gb_conf = zero(mc.conf)
mc.stack.ranges = UnitRange[]
for i in 1:mc.stack.n_elements - 1
push!(mc.stack.ranges, 1 + (i - 1) * mc.parameters.safe_mult:i * mc.parameters.safe_mult)
end
mc.stack.curr_U = GreensMatType(undef, flv*N, flv*N)
mc.stack.eV = init_interaction_matrix(mc.model)
nothing
end
# hopping
function init_hopping_matrices(mc::DQMC{M,CB}, m::Model) where {M, CB<:Checkerboard}
init_hopping_matrix_exp(mc, m)
CB <: CheckerboardTrue && init_checkerboard_matrices(mc, m)
nothing
end
function init_hopping_matrix_exp(mc::DQMC, m::Model)
N = length(lattice(m))
flv = nflavors(m)
dtau = mc.parameters.delta_tau
T = hopping_matrix(mc, m)
size(T) == (flv*N, flv*N) || error("Hopping matrix should have size "*
"$((flv*N, flv*N)) but has size $(size(T)) .")
mc.stack.hopping_matrix = T
mc.stack.hopping_matrix_exp = exp(-0.5 * dtau * T)
mc.stack.hopping_matrix_exp_inv = exp(0.5 * dtau * T)
mc.stack.hopping_matrix_exp_squared = mc.stack.hopping_matrix_exp * mc.stack.hopping_matrix_exp
mc.stack.hopping_matrix_exp_inv_squared = mc.stack.hopping_matrix_exp_inv * mc.stack.hopping_matrix_exp_inv
nothing
end
# checkerboard
rem_eff_zeros!(X::AbstractArray) = map!(e -> abs.(e)<1e-15 ? zero(e) : e,X,X)
function init_checkerboard_matrices(mc::DQMC, m::Model)
s = mc.stack
l = lattice(m)
flv = nflavors(m)
H = heltype(mc)
N = length(l)
dtau = mc.parameters.delta_tau
mu = m.mu
s.checkerboard, s.groups, s.n_groups = build_checkerboard(l)
n_grps = s.n_groups
cb = s.checkerboard
T = reshape(hopping_matrix(mc, m), (N, flv, N, flv))
s.chkr_hop_half = Vector{SparseMatrixCSC{H, Int}}(undef, n_grps)
s.chkr_hop_half_inv = Vector{SparseMatrixCSC{H, Int}}(undef, n_grps)
s.chkr_hop = Vector{SparseMatrixCSC{H, Int}}(undef, n_grps)
s.chkr_hop_inv = Vector{SparseMatrixCSC{H, Int}}(undef, n_grps)
for (g, gr) in enumerate(s.groups)
Tg = zeros(H, N, flv, N, flv)
for i in gr
src, trg = cb[1:2,i]
for f1 in 1:flv, f2 in 1:flv
Tg[trg, f1, src, f2] = T[trg, f1, src, f2]
end
end
Tgg = reshape(Tg, (N*flv, N*flv))
s.chkr_hop_half[g] = sparse(rem_eff_zeros!(exp(-0.5 * dtau * Tgg)))
s.chkr_hop_half_inv[g] = sparse(rem_eff_zeros!(exp(0.5 * dtau * Tgg)))
s.chkr_hop[g] = sparse(rem_eff_zeros!(exp(- dtau * Tgg)))
s.chkr_hop_inv[g] = sparse(rem_eff_zeros!(exp(dtau * Tgg)))
end
s.chkr_hop_half_dagger = adjoint.(s.chkr_hop_half)
s.chkr_hop_dagger = adjoint.(s.chkr_hop)
mus = diag(reshape(T, (N*flv, N*flv)))
s.chkr_mu_half = spdiagm(0 => exp.(-0.5 * dtau * mus))
s.chkr_mu_half_inv = spdiagm(0 => exp.(0.5 * dtau * mus))
s.chkr_mu = spdiagm(0 => exp.(-dtau * mus))
s.chkr_mu_inv = spdiagm(0 => exp.(dtau * mus))
# hop_mat_exp_chkr = foldl(*,s.chkr_hop_half) * sqrt.(s.chkr_mu)
# r = effreldiff(s.hopping_matrix_exp,hop_mat_exp_chkr)
# r[find(x->x==zero(x),hop_mat_exp_chkr)] = 0.
# println("Checkerboard - Exact ≈ ", round(maximum(absdiff(s.hopping_matrix_exp,hop_mat_exp_chkr)), 4))
nothing
end
"""
build_stack(mc::DQMC)
Build slice matrix stack from scratch.
"""
@bm function build_stack(mc::DQMC, ::DQMCStack)
copyto!(mc.stack.u_stack[1], I)
mc.stack.d_stack[1] .= one(eltype(mc.stack.d_stack[1]))
copyto!(mc.stack.t_stack[1], I)
@inbounds for i in 1:length(mc.stack.ranges)
add_slice_sequence_left(mc, i)
end
mc.stack.current_slice = mc.parameters.slices + 1
mc.stack.direction = -1
nothing
end
@bm function reverse_build_stack(mc::DQMC, ::DQMCStack)
copyto!(mc.stack.u_stack[end], I)
mc.stack.d_stack[end] .= one(eltype(mc.stack.d_stack[end]))
copyto!(mc.stack.t_stack[end], I)
@inbounds for i in length(mc.stack.ranges):-1:1
add_slice_sequence_right(mc, i)
end
mc.stack.current_slice = 0
mc.stack.direction = 1
nothing
end
################################################################################
### Slice matrix stack manipulations/updates
################################################################################
"""
add_slice_sequence_left(mc::DQMC, idx)
Computes the next `mc.parameters.safe_mult` slice matrix products from the current `idx`
and writes them to `idx+1`. The index `idx` does not refer to the slice index,
but `mc.parameters.safe_mult` times the slice index.
"""
@bm function add_slice_sequence_left(mc::DQMC, idx::Int)
@inbounds begin
copyto!(mc.stack.curr_U, mc.stack.u_stack[idx])
# println("Adding slice seq left $idx = ", mc.stack.ranges[idx])
for slice in mc.stack.ranges[idx]
multiply_slice_matrix_left!(mc, mc.model, slice, mc.stack.curr_U)
end
vmul!(mc.stack.tmp1, mc.stack.curr_U, Diagonal(mc.stack.d_stack[idx]))
udt_AVX_pivot!(
mc.stack.u_stack[idx + 1], mc.stack.d_stack[idx + 1], mc.stack.tmp1,
mc.stack.pivot, mc.stack.tempv
)
vmul!(mc.stack.t_stack[idx + 1], mc.stack.tmp1, mc.stack.t_stack[idx])
end
end
"""
add_slice_sequence_right(mc::DQMC, idx)
Computes the next `mc.parameters.safe_mult` slice matrix products from the current
`idx+1` and writes them to `idx`. The index `idx` does not refer to the slice
index, but `mc.parameters.safe_mult` times the slice index.
"""
@bm function add_slice_sequence_right(mc::DQMC, idx::Int)
@inbounds begin
copyto!(mc.stack.curr_U, mc.stack.u_stack[idx + 1])
for slice in reverse(mc.stack.ranges[idx])
multiply_daggered_slice_matrix_left!(mc, mc.model, slice, mc.stack.curr_U)
end
vmul!(mc.stack.tmp1, mc.stack.curr_U, Diagonal(mc.stack.d_stack[idx + 1]))
udt_AVX_pivot!(
mc.stack.u_stack[idx], mc.stack.d_stack[idx], mc.stack.tmp1, mc.stack.pivot, mc.stack.tempv
)
vmul!(mc.stack.t_stack[idx], mc.stack.tmp1, mc.stack.t_stack[idx + 1])
end
end
################################################################################
### Green's function calculation
################################################################################
"""
calculate_greens_AVX!(Ul, Dl, Tl, Ur, Dr, Tr, G[, pivot, temp])
Calculates the effective Greens function matrix `G` from two UDT decompositions
`Ul, Dl, Tl` and `Ur, Dr, Tr`. Additionally a `pivot` vector can be given. Note
that all inputs will be overwritten.
The UDT should follow from a set of slice_matrix multiplications, such that
`Ur, Dr, Tr = udt(B(slice)' ⋯ B(M)')` and `Ul, Dl, Tl = udt(B(slice-1) ⋯ B(1))`.
The computed Greens function is then given as `G = inv(I + Ul Dl Tl Tr Dr Ur)`
and computed here.
`Ul, Tl, Ur, Tr, G` should be square matrices, `Dl, Dr` real Vectors (from
Diagonal matrices), `pivot` an integer Vector and `temp` a Vector with the same
element type as the matrices.
"""
@bm function calculate_greens_AVX!(
Ul, Dl, Tl, Ur, Dr, Tr, G::AbstractArray{T},
pivot = Vector{Int64}(undef, length(Dl)),
temp = Vector{T}(undef, length(Dl))
) where T
# @bm "B1" begin
# Used: Ul, Dl, Tl, Ur, Dr, Tr
# TODO: [I + Ul Dl Tl Tr^† Dr Ur^†]^-1
# Compute: Dl * ((Tl * Tr) * Dr) -> Tr * Dr * G (UDT)
vmul!(G, Tl, adjoint(Tr))
vmul!(Tr, G, Diagonal(Dr))
vmul!(G, Diagonal(Dl), Tr)
udt_AVX_pivot!(Tr, Dr, G, pivot, temp, Val(false)) # Dl available
# end
# @bm "B2" begin
# Used: Ul, Ur, G, Tr, Dr (Ul, Ur, Tr unitary (inv = adjoint))
# TODO: [I + Ul Tr Dr G Ur^†]^-1
# = [(Ul Tr) ((Ul Tr)^-1 (G Ur^†) + Dr) (G Ur)]^-1
# = Ur G^-1 [(Ul Tr)^† Ur G^-1 + Dr]^-1 (Ul Tr)^†
# Compute: Ul Tr -> Tl
# (Ur G^-1) -> Ur
# ((Ul Tr)^† Ur G^-1) -> Tr
vmul!(Tl, Ul, Tr)
rdivp!(Ur, G, Ul, pivot) # requires unpivoted udt decompostion (Val(false))
vmul!(Tr, adjoint(Tl), Ur)
# end
# @bm "B3" begin
# Used: Tl, Ur, Tr, Dr
# TODO: Ur [Tr + Dr]^-1 Tl^† -> Ur [Tr]^-1 Tl^†
rvadd!(Tr, Diagonal(Dr))
# end
# @bm "B4" begin
# Used: Ur, Tr, Tl
# TODO: Ur [Tr]^-1 Tl^† -> Ur [Ul Dr Tr]^-1 Tl^†
# -> Ur Tr^-1 Dr^-1 Ul^† Tl^† -> Ur Tr^-1 Dr^-1 (Tl Ul)^†
# Compute: Ur Tr^-1 -> Ur, Tl Ul -> Tr
udt_AVX_pivot!(Ul, Dr, Tr, pivot, temp, Val(false)) # Dl available
rdivp!(Ur, Tr, G, pivot) # requires unpivoted udt decompostion (false)
vmul!(Tr, Tl, Ul)
# end
# @bm "B5" begin
@turbo for i in eachindex(Dr)
Dl[i] = 1.0 / Dr[i]
end
# end
# @bm "B6" begin
# Used: Ur, Tr, Dl, Ul, Tl
# TODO: (Ur Dl) Tr^† -> G
vmul!(Ul, Ur, Diagonal(Dl))
vmul!(G, Ul, adjoint(Tr))
# end
end
"""
calculate_greens(mc::DQMC)
Computes the effective greens function from the current state of the stack and
saves the result to `mc.stack.greens`.
This assumes the `mc.stack.Ul, mc.stack.Dl, mc.stack.Tl = udt(B(slice-1) ⋯ B(1))` and
`mc.stack.Ur, mc.stack.Dr, mc.stack.Tr = udt(B(slice)' ⋯ B(M)')`.
This should only used internally.
"""
@bm function calculate_greens(mc::DQMC, output::AbstractMatrix = mc.stack.greens)
calculate_greens_AVX!(
mc.stack.Ul, mc.stack.Dl, mc.stack.Tl,
mc.stack.Ur, mc.stack.Dr, mc.stack.Tr,
output, mc.stack.pivot, mc.stack.tempv
)
output
end
"""
calculate_greens(mc::DQMC, slice[, output=mc.stack.greens, safe_mult])
Compute the effective equal-time greens function from scratch at a given `slice`.
This does not invalidate the stack, but it does overwrite `mc.stack.greens`.
"""
@bm function calculate_greens(
mc::DQMC, slice::Int, output::AbstractMatrix = mc.stack.greens,
conf::AbstractArray = mc.conf, safe_mult::Int = mc.parameters.safe_mult
)
copyto!(mc.stack.curr_U, I)
copyto!(mc.stack.Ur, I)
mc.stack.Dr .= one(eltype(mc.stack.Dr))
copyto!(mc.stack.Tr, I)
# Calculate Ur,Dr,Tr=B(slice)' ... B(M)'
if slice+1 <= mc.parameters.slices
start = slice+1
stop = mc.parameters.slices
for k in reverse(start:stop)
if mod(k,safe_mult) == 0
multiply_daggered_slice_matrix_left!(mc, mc.model, k, mc.stack.curr_U, conf)
vmul!(mc.stack.tmp1, mc.stack.curr_U, Diagonal(mc.stack.Dr))
udt_AVX_pivot!(mc.stack.curr_U, mc.stack.Dr, mc.stack.tmp1, mc.stack.pivot, mc.stack.tempv)
copyto!(mc.stack.tmp2, mc.stack.Tr)
vmul!(mc.stack.Tr, mc.stack.tmp1, mc.stack.tmp2)
else
multiply_daggered_slice_matrix_left!(mc, mc.model, k, mc.stack.curr_U, conf)
end
end
vmul!(mc.stack.tmp1, mc.stack.curr_U, Diagonal(mc.stack.Dr))
udt_AVX_pivot!(mc.stack.Ur, mc.stack.Dr, mc.stack.tmp1, mc.stack.pivot, mc.stack.tempv)
copyto!(mc.stack.tmp2, mc.stack.Tr)
vmul!(mc.stack.Tr, mc.stack.tmp1, mc.stack.tmp2)
end
copyto!(mc.stack.curr_U, I)
copyto!(mc.stack.Ul, I)
mc.stack.Dl .= one(eltype(mc.stack.Dl))
copyto!(mc.stack.Tl, I)
# Calculate Ul,Dl,Tl=B(slice-1) ... B(1)
if slice >= 1
start = 1
stop = slice
for k in start:stop
if mod(k,safe_mult) == 0
multiply_slice_matrix_left!(mc, mc.model, k, mc.stack.curr_U, conf)
vmul!(mc.stack.tmp1, mc.stack.curr_U, Diagonal(mc.stack.Dl))
udt_AVX_pivot!(mc.stack.curr_U, mc.stack.Dl, mc.stack.tmp1, mc.stack.pivot, mc.stack.tempv)
copyto!(mc.stack.tmp2, mc.stack.Tl)
vmul!(mc.stack.Tl, mc.stack.tmp1, mc.stack.tmp2)
else
multiply_slice_matrix_left!(mc, mc.model, k, mc.stack.curr_U, conf)
end
end
vmul!(mc.stack.tmp1, mc.stack.curr_U, Diagonal(mc.stack.Dl))
udt_AVX_pivot!(mc.stack.Ul, mc.stack.Dl, mc.stack.tmp1, mc.stack.pivot, mc.stack.tempv)
copyto!(mc.stack.tmp2, mc.stack.Tl)
vmul!(mc.stack.Tl, mc.stack.tmp1, mc.stack.tmp2)
end
return calculate_greens(mc, output)
end
################################################################################
### Stack update
################################################################################
# Green's function propagation
@inline @bm function wrap_greens!(mc::DQMC, gf, curr_slice::Int, direction::Int)
if direction == -1
multiply_slice_matrix_inv_left!(mc, mc.model, curr_slice - 1, gf)
multiply_slice_matrix_right!(mc, mc.model, curr_slice - 1, gf)
else
multiply_slice_matrix_left!(mc, mc.model, curr_slice, gf)
multiply_slice_matrix_inv_right!(mc, mc.model, curr_slice, gf)
end
nothing
end
@bm function propagate(mc::DQMC)
@inbounds if mc.stack.direction == 1
if mod(mc.stack.current_slice, mc.parameters.safe_mult) == 0
mc.stack.current_slice +=1 # slice we are going to
if mc.stack.current_slice == 1
copyto!(mc.stack.Ur, mc.stack.u_stack[1])
copyto!(mc.stack.Dr, mc.stack.d_stack[1])
copyto!(mc.stack.Tr, mc.stack.t_stack[1])
copyto!(mc.stack.u_stack[1], I)
mc.stack.d_stack[1] .= one(eltype(mc.stack.d_stack[1]))
copyto!(mc.stack.t_stack[1], I)
copyto!(mc.stack.Ul, mc.stack.u_stack[1])
copyto!(mc.stack.Dl, mc.stack.d_stack[1])
copyto!(mc.stack.Tl, mc.stack.t_stack[1])
calculate_greens(mc) # greens_1 ( === greens_{m+1} )
elseif 1 < mc.stack.current_slice <= mc.parameters.slices
idx = Int((mc.stack.current_slice - 1)/mc.parameters.safe_mult)
copyto!(mc.stack.Ur, mc.stack.u_stack[idx+1])
copyto!(mc.stack.Dr, mc.stack.d_stack[idx+1])
copyto!(mc.stack.Tr, mc.stack.t_stack[idx+1])
add_slice_sequence_left(mc, idx)
copyto!(mc.stack.Ul, mc.stack.u_stack[idx+1])
copyto!(mc.stack.Dl, mc.stack.d_stack[idx+1])
copyto!(mc.stack.Tl, mc.stack.t_stack[idx+1])
if mc.parameters.check_propagation_error
copyto!(mc.stack.greens_temp, mc.stack.greens)
end
# Should this be mc.stack.greens_temp?
# If so, shouldn't this only run w/ mc.parameters.all_checks = true?
wrap_greens!(mc, mc.stack.greens_temp, mc.stack.current_slice - 1, 1)
calculate_greens(mc) # greens_{slice we are propagating to}
if mc.parameters.check_propagation_error
# OPT: could probably be optimized through explicit loop
greensdiff = maximum(abs.(mc.stack.greens_temp - mc.stack.greens))
if greensdiff > 1e-7
push!(mc.analysis.propagation_error, greensdiff)
mc.parameters.silent || @printf(
"->%d \t+1 Propagation instability\t %.1e\n",
mc.stack.current_slice, greensdiff
)
end
end
else # we are going to mc.parameters.slices+1
idx = mc.stack.n_elements - 1
add_slice_sequence_left(mc, idx)
mc.stack.direction = -1
mc.stack.current_slice = mc.parameters.slices+1 # redundant
propagate(mc)
end
else
# Wrapping
wrap_greens!(mc, mc.stack.greens, mc.stack.current_slice, 1)
mc.stack.current_slice += 1
end
else # mc.stack.direction == -1
if mod(mc.stack.current_slice-1, mc.parameters.safe_mult) == 0
mc.stack.current_slice -= 1 # slice we are going to
if mc.stack.current_slice == mc.parameters.slices
copyto!(mc.stack.Ul, mc.stack.u_stack[end])
copyto!(mc.stack.Dl, mc.stack.d_stack[end])
copyto!(mc.stack.Tl, mc.stack.t_stack[end])
copyto!(mc.stack.u_stack[end], I)
mc.stack.d_stack[end] .= one(eltype(mc.stack.d_stack[end]))
copyto!(mc.stack.t_stack[end], I)
copyto!(mc.stack.Ur, mc.stack.u_stack[end])
copyto!(mc.stack.Dr, mc.stack.d_stack[end])
copyto!(mc.stack.Tr, mc.stack.t_stack[end])
calculate_greens(mc) # greens_{mc.parameters.slices+1} === greens_1
# wrap to greens_{mc.parameters.slices}
wrap_greens!(mc, mc.stack.greens, mc.stack.current_slice + 1, -1)
elseif 0 < mc.stack.current_slice < mc.parameters.slices
idx = Int(mc.stack.current_slice / mc.parameters.safe_mult) + 1
copyto!(mc.stack.Ul, mc.stack.u_stack[idx])
copyto!(mc.stack.Dl, mc.stack.d_stack[idx])
copyto!(mc.stack.Tl, mc.stack.t_stack[idx])
add_slice_sequence_right(mc, idx)
copyto!(mc.stack.Ur, mc.stack.u_stack[idx])
copyto!(mc.stack.Dr, mc.stack.d_stack[idx])
copyto!(mc.stack.Tr, mc.stack.t_stack[idx])
if mc.parameters.check_propagation_error
copyto!(mc.stack.greens_temp, mc.stack.greens)
end
calculate_greens(mc)
if mc.parameters.check_propagation_error
# OPT: could probably be optimized through explicit loop
greensdiff = maximum(abs.(mc.stack.greens_temp - mc.stack.greens))
if greensdiff > 1e-7
push!(mc.analysis.propagation_error, greensdiff)
mc.parameters.silent || @printf(
"->%d \t-1 Propagation instability\t %.1e\n",
mc.stack.current_slice, greensdiff
)
end
end
wrap_greens!(mc, mc.stack.greens, mc.stack.current_slice + 1, -1)
else # we are going to 0
idx = 1
add_slice_sequence_right(mc, idx)
mc.stack.direction = 1
mc.stack.current_slice = 0 # redundant
propagate(mc)
end
else
# Wrapping
wrap_greens!(mc, mc.stack.greens, mc.stack.current_slice, -1)
mc.stack.current_slice -= 1
end
end
nothing
end
|
{"hexsha": "55ea4ab9b810aedd0b31c726734e4ac29de7471c", "size": 24077, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/flavors/DQMC/stack.jl", "max_stars_repo_name": "crstnbr/MonteCarlo.jl", "max_stars_repo_head_hexsha": "1c3a678a9991ce9770222e658aee358d39f3693e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 121, "max_stars_repo_stars_event_min_datetime": "2018-03-06T16:43:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-08T18:01:19.000Z", "max_issues_repo_path": "src/flavors/DQMC/stack.jl", "max_issues_repo_name": "crstnbr/MonteCarlo.jl", "max_issues_repo_head_hexsha": "1c3a678a9991ce9770222e658aee358d39f3693e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 88, "max_issues_repo_issues_event_min_datetime": "2018-08-08T12:36:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-16T14:46:36.000Z", "max_forks_repo_path": "src/flavors/DQMC/stack.jl", "max_forks_repo_name": "crstnbr/MonteCarlo.jl", "max_forks_repo_head_hexsha": "1c3a678a9991ce9770222e658aee358d39f3693e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 14, "max_forks_repo_forks_event_min_datetime": "2018-02-05T16:17:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-07T16:13:07.000Z", "avg_line_length": 37.0986132512, "max_line_length": 156, "alphanum_fraction": 0.5923495452, "num_tokens": 6950}
|
import argparse, time, os
from networks import create_model, define_net
import numpy as np
import torch
import imageio
from data import common
import options.options as option
from utils import util
from solvers import SRSolver
from data import create_dataloader
from data import create_dataset
networks= {
"which_model": "SRFBN",
"num_features": 64,
"in_channels": 3,
"out_channels": 3,
"num_steps": 4,
"num_groups": 6
}
def run(pretrained_path, output_path, model_name='SRFBN', scale=4, degrad='BI', opt='options/test/test_SRFBN_example.json'):
opt = option.parse(opt)
opt = option.dict_to_nonedict(opt)
# model = create_model(opt)
model = define_net({
"scale": scale,
"which_model": "SRFBN",
"num_features": 64,
"in_channels": 3,
"out_channels": 3,
"num_steps": 4,
"num_groups": 6
})
img = common.read_img('./results/LR/MyImage/chip.png', 'img')
np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1)))
tensor = torch.from_numpy(np_transpose).float()
lr_tensor = torch.unsqueeze(tensor, 0)
checkpoint = torch.load(pretrained_path)
if 'state_dict' in checkpoint.keys():
checkpoint = checkpoint['state_dict']
load_func = model.load_state_dict
load_func(checkpoint)
torch.save(model, './model.pt')
with torch.no_grad():
SR = model(lr_tensor)[0]
# visuals = np.transpose(SR.data[0].float().cpu().numpy(), (1, 2, 0)).astype(np.uint8)
visuals = np.transpose(SR.data[0].float().cpu().numpy(), (1, 2, 0)).astype(np.uint8)
imageio.imwrite(output_path, visuals)
|
{"hexsha": "aed3e895c483e9593e2df1ee3ddbbce6f874647e", "size": 1642, "ext": "py", "lang": "Python", "max_stars_repo_path": "run.py", "max_stars_repo_name": "thekevinscott/SRFBN_CVPR19", "max_stars_repo_head_hexsha": "be4bd02e77c0f40419cc8d885febbec517ef9234", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "run.py", "max_issues_repo_name": "thekevinscott/SRFBN_CVPR19", "max_issues_repo_head_hexsha": "be4bd02e77c0f40419cc8d885febbec517ef9234", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run.py", "max_forks_repo_name": "thekevinscott/SRFBN_CVPR19", "max_forks_repo_head_hexsha": "be4bd02e77c0f40419cc8d885febbec517ef9234", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.3103448276, "max_line_length": 124, "alphanum_fraction": 0.6626065773, "include": true, "reason": "import numpy", "num_tokens": 449}
|
module Stat
import Gadfly
import StatsBase
import Contour
using Colors
using Compat
using Compose
using DataArrays
using DataStructures
using Distributions
using Hexagons
using Loess
using CoupledFields # It is registered in METADATA.jl
using IndirectArrays
import Gadfly: Scale, Coord, input_aesthetics, output_aesthetics,
default_scales, isconcrete, setfield!, discretize_make_ia
import KernelDensity
# import Distributions: Uniform, Distribution, qqbuild
import IterTools: chain, distinct
import Compat.Iterators: cycle, product
include("bincount.jl")
# Apply a series of statistics.
#
# Args:
# stats: Statistics to apply in order.
# scales: Scales used by the plot.
# aes: A Aesthetics instance.
#
# Returns:
# Nothing, modifies aes.
#
function apply_statistics(stats::Vector{Gadfly.StatisticElement},
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
for stat in stats
apply_statistic(stat, scales, coord, aes)
end
nothing
end
struct Nil <: Gadfly.StatisticElement end
const nil = Nil
struct Identity <: Gadfly.StatisticElement end
apply_statistic(stat::Identity,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics) = nothing
const identity = Identity
# Determine bounds of bars positioned at the given values.
function barminmax(vals, iscontinuous::Bool)
minvalue, maxvalue = extrema(vals)
span_type = typeof((maxvalue - minvalue) / 1.0)
barspan = one(span_type)
if iscontinuous && length(vals) > 1
sorted_vals = sort(vals)
T = typeof(sorted_vals[2] - sorted_vals[1])
z = zero(T)
minspan = z
for i in 2:length(vals)
span = sorted_vals[i] - sorted_vals[i-1]
if span > z && (span < minspan || minspan == z)
minspan = span
end
end
barspan = minspan
end
position_type = promote_type(typeof(barspan/2.0), eltype(vals))
minvals = Array{position_type}(length(vals))
maxvals = Array{position_type}(length(vals))
for (i, x) in enumerate(vals)
minvals[i] = x - barspan/2.0
maxvals[i] = x + barspan/2.0
end
return minvals, maxvals
end
struct RectbinStatistic <: Gadfly.StatisticElement end
const rectbin = RectbinStatistic
input_aesthetics(stat::RectbinStatistic) = [:x, :y]
output_aesthetics(stat::RectbinStatistic) = [:xmin, :xmax, :ymin, :ymax]
function apply_statistic(stat::RectbinStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
Gadfly.assert_aesthetics_defined("RectbinStatistic", aes, :x, :y)
isxcontinuous = haskey(scales, :x) && isa(scales[:x], Scale.ContinuousScale)
isycontinuous = haskey(scales, :y) && isa(scales[:y], Scale.ContinuousScale)
xminvals, xmaxvals = barminmax(aes.x, isxcontinuous)
yminvals, ymaxvals = barminmax(aes.y, isycontinuous)
aes.xmin = xminvals
aes.xmax = xmaxvals
aes.ymin = yminvals
aes.ymax = ymaxvals
if !isxcontinuous
aes.pad_categorical_x = Nullable(false)
end
if !isycontinuous
aes.pad_categorical_y = Nullable(false)
end
end
struct BarStatistic <: Gadfly.StatisticElement
position::Symbol # :dodge or :stack
orientation::Symbol # :horizontal or :vertical
end
BarStatistic(; position=:stack, orientation=:vertical) = BarStatistic(position, orientation)
input_aesthetics(stat::BarStatistic) = stat.orientation == :vertical ? [:x] : [:y]
output_aesthetics(stat::BarStatistic) =
stat.orientation == :vertical ? [:xmin, :xmax] : [:ymin, :ymax]
default_scales(stat::BarStatistic) = stat.orientation == :vertical ?
[Gadfly.Scale.y_continuous()] : [Gadfly.Scale.x_continuous()]
const bar = BarStatistic
function apply_statistic(stat::BarStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
if stat.orientation == :horizontal
in(:y, Gadfly.defined_aesthetics(aes)) || return
var = :y
othervar = :x
minvar = :ymin
maxvar = :ymax
viewminvar = :xviewmin
viewmaxvar = :xviewmax
other_viewminvar = :yviewmin
other_viewmaxvar = :yviewmax
labelvar = :x_label
else
in(:x, Gadfly.defined_aesthetics(aes)) || return
var = :x
othervar = :y
minvar = :xmin
maxvar = :xmax
viewminvar = :yviewmin
viewmaxvar = :yviewmax
other_viewminvar = :xviewmin
other_viewmaxvar = :xviewmax
labelvar = :y_label
end
vals = getfield(aes, var)
if isempty(vals)
setfield!(aes, minvar, Float64[1.0])
setfield!(aes, maxvar, Float64[1.0])
setfield!(aes, var, Float64[1.0])
setfield!(aes, othervar, Float64[0.0])
return
end
iscontinuous = haskey(scales, var) && isa(scales[var], Scale.ContinuousScale)
if getfield(aes, minvar) == nothing || getfield(aes, maxvar) == nothing
minvals, maxvals = barminmax(vals, iscontinuous)
setfield!(aes, minvar, minvals)
setfield!(aes, maxvar, maxvals)
end
z = zero(eltype(getfield(aes, othervar)))
if getfield(aes, viewminvar) == nothing && z < minimum(getfield(aes, othervar))
setfield!(aes, viewminvar, z)
elseif getfield(aes, viewmaxvar) == nothing && z > maximum(getfield(aes, othervar))
setfield!(aes, viewmaxvar, z)
end
if stat.position == :stack && aes.color !== nothing
groups = Dict{Any,Float64}()
for (x, y) in zip(getfield(aes, othervar), vals)
Gadfly.isconcrete(x) || continue
if !haskey(groups, y)
groups[y] = Float64(x)
else
groups[y] += Float64(x)
end
end
viewmin, viewmax = extrema(values(groups))
aes_viewminvar = getfield(aes, viewminvar)
if aes_viewminvar === nothing || aes_viewminvar > viewmin
setfield!(aes, viewminvar, viewmin)
end
aes_viewmaxvar = getfield(aes, viewmaxvar)
if aes_viewmaxvar === nothing || aes_viewmaxvar < viewmax
setfield!(aes, viewmaxvar, viewmax)
end
end
if !iscontinuous
if stat.orientation == :horizontal
aes.pad_categorical_y = Nullable(false)
else
aes.pad_categorical_x = Nullable(false)
end
end
end
struct HistogramStatistic <: Gadfly.StatisticElement
minbincount::Int
maxbincount::Int
position::Symbol # :dodge or :stack
orientation::Symbol
density::Bool
end
function HistogramStatistic(; bincount=nothing,
minbincount=3,
maxbincount=150,
position=:stack,
orientation=:vertical,
density=false)
if bincount != nothing
HistogramStatistic(bincount, bincount, position, orientation, density)
else
HistogramStatistic(minbincount, maxbincount, position, orientation, density)
end
end
input_aesthetics(stat::HistogramStatistic) = stat.orientation == :vertical ? [:x] : [:y]
output_aesthetics(stat::HistogramStatistic) =
stat.orientation == :vertical ? [:x, :y, :xmin, :xmax] : [:y, :x, :ymin, :ymax]
default_scales(stat::HistogramStatistic) = stat.orientation == :vertical ?
[Gadfly.Scale.y_continuous()] : [Gadfly.Scale.x_continuous()]
const histogram = HistogramStatistic
function apply_statistic(stat::HistogramStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
if stat.orientation == :horizontal
var = :y
othervar = :x
minvar = :ymin
maxvar = :ymax
viewminvar = :xviewmin
viewmaxvar = :xviewmax
labelvar = :x_label
else
var = :x
othervar = :y
minvar = :xmin
maxvar = :xmax
viewminvar = :yviewmin
viewmaxvar = :yviewmax
labelvar = :y_label
end
Gadfly.assert_aesthetics_defined("HistogramStatistic", aes, var)
vals = getfield(aes, var)
stat.minbincount > stat.maxbincount && error("Histogram minbincount > maxbincount")
if isempty(getfield(aes, var))
setfield!(aes, minvar, Float64[1.0])
setfield!(aes, maxvar, Float64[1.0])
setfield!(aes, var, Float64[1.0])
setfield!(aes, othervar, Float64[0.0])
return
end
if haskey(scales, var) && isa(scales[var], Scale.DiscreteScale)
isdiscrete = true
x_min, x_max = extrema(vals)
d = x_max - x_min + 1
bincounts = zeros(Int, d)
for x in vals
bincounts[x - x_min + 1] += 1
end
x_min -= 0.5 # adjust the left side of the bar
binwidth = 1.0
else
x_min = Gadfly.concrete_minimum(vals)
isdiscrete = false
if estimate_distinct_proportion(vals) <= 0.9
value_set = sort!(collect(Set(vals[Bool[Gadfly.isconcrete(v) for v in vals]])))
d, bincounts, x_max = choose_bin_count_1d_discrete(
vals, value_set, stat.minbincount, stat.maxbincount)
else
d, bincounts, x_max = choose_bin_count_1d(
vals, stat.minbincount, stat.maxbincount)
end
if stat.density
x_min = Gadfly.concrete_minimum(vals)
span = x_max - x_min
binwidth = span / d
bincounts = bincounts ./ (sum(bincounts) * binwidth)
end
binwidth = (x_max - x_min) / d
end
if aes.color === nothing
T = typeof(x_min + 1*binwidth)
setfield!(aes, othervar, Array{Float64}(d))
setfield!(aes, minvar, Array{T}(d))
setfield!(aes, maxvar, Array{T}(d))
setfield!(aes, var, Array{T}(d))
for j in 1:d
getfield(aes, minvar)[j] = x_min + (j - 1) * binwidth
getfield(aes, maxvar)[j] = x_min + j * binwidth
getfield(aes, var)[j] = x_min + (j - 0.5) * binwidth
getfield(aes, othervar)[j] = bincounts[j]
end
else
groups = Dict()
for (x, c) in zip(vals, cycle(aes.color))
Gadfly.isconcrete(x) || continue
if !haskey(groups, c)
groups[c] = Float64[x]
else
push!(groups[c], x)
end
end
T = typeof(x_min + 1*binwidth)
setfield!(aes, minvar, Array{T}(d * length(groups)))
setfield!(aes, maxvar, Array{T}(d * length(groups)))
setfield!(aes, var, Array{T}(d * length(groups)))
setfield!(aes, othervar, Array{Float64}(d * length(groups)))
colors = Array{RGB{Float32}}(d * length(groups))
x_span = x_max - x_min
stack_height = zeros(Int, d)
for (i, (c, xs)) in enumerate(groups)
fill!(bincounts, 0)
for x in xs
Gadfly.isconcrete(x) || continue
if isdiscrete
bincounts[round(Int,x)] += 1
else
bin = max(1, min(d, (ceil(Int, (x - x_min) / binwidth))))
bincounts[bin] += 1
end
end
if stat.density
binwidth = x_span / d
bincounts = bincounts ./ (sum(bincounts) * binwidth)
end
stack_height += bincounts[1:d]
if isdiscrete
for j in 1:d
idx = (i-1)*d + j
getfield(aes, var)[idx] = j
getfield(aes, othervar)[idx] = bincounts[j]
colors[idx] = c
end
else
for j in 1:d
idx = (i-1)*d + j
getfield(aes, minvar)[idx] = x_min + (j - 1) * binwidth
getfield(aes, maxvar)[idx] = x_min + j * binwidth
getfield(aes, var)[idx] = x_min + (j - 0.5) * binwidth
getfield(aes, othervar)[idx] = bincounts[j]
colors[idx] = c
end
end
end
if stat.position == :stack
viewmax = Float64(maximum(stack_height))
aes_viewmax = getfield(aes, viewmaxvar)
if aes_viewmax === nothing || aes_viewmax < viewmax
setfield!(aes, viewmaxvar, viewmax)
end
end
aes.color = discretize_make_ia(colors)
end
getfield(aes, viewminvar) === nothing && setfield!(aes, viewminvar, 0.0)
if haskey(scales, othervar)
data = Gadfly.Data()
setfield!(data, othervar, getfield(aes, othervar))
setfield!(data, viewmaxvar, getfield(aes, viewmaxvar))
Scale.apply_scale(scales[othervar], [aes], data)
# See issue #560. Stacked histograms on a non-linear y scale are a strange
# thing. After some discussion, the least confusing thing is to make the stack
# partitioned linearly. Here we make that adjustment.
if stat.position == :stack && aes.color != nothing
# A little trickery to figure out the scale stack height.
data = Gadfly.Data()
setfield!(data, othervar, stack_height)
scaled_stackheight_aes = Gadfly.Aesthetics()
Scale.apply_scale(scales[othervar], [scaled_stackheight_aes], data)
scaled_stackheight = getfield(scaled_stackheight_aes, othervar)
othervals = getfield(aes, othervar)
for j in 1:d
naive_stackheight = 0
for i in 1:length(groups)
idx = (i-1)*d + j
naive_stackheight += othervals[idx]
end
naive_stackheight == 0 && continue
for i in 1:length(groups)
idx = (i-1)*d + j
othervals[idx] = scaled_stackheight[j] * othervals[idx] / naive_stackheight
end
end
end
else
setfield!(aes, labelvar, Scale.identity_formatter)
end
end
struct Density2DStatistic <: Gadfly.StatisticElement
n::Tuple{Int,Int} # Number of points sampled
bw::Tuple{Real,Real} # Bandwidth used for the kernel density estimation
levels::Union{Int,Vector,Function}
end
Density2DStatistic(; n=(256,256), bandwidth=(-Inf,-Inf), levels=15) =
Density2DStatistic(n, bandwidth, levels)
const density2d = Density2DStatistic
input_aesthetics(stat::Density2DStatistic) = [:x, :y]
output_aesthetics(stat::Density2DStatistic) = [:x, :y, :z]
default_scales(::Density2DStatistic) = [Gadfly.Scale.y_continuous()]
function apply_statistic(stat::Density2DStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
Gadfly.assert_aesthetics_defined("Density2DStatistic", aes, :x, :y)
window = (stat.bw[1] <= 0.0 ? KernelDensity.default_bandwidth(aes.x) : stat.bw[1],
stat.bw[2] <= 0.0 ? KernelDensity.default_bandwidth(aes.y) : stat.bw[2])
k = KernelDensity.kde((aes.x,aes.y), bandwidth=window, npoints=stat.n)
aes.z = k.density
aes.x = collect(k.x)
aes.y = collect(k.y)
apply_statistic(ContourStatistic(levels=stat.levels), scales, coord, aes)
end
struct DensityStatistic <: Gadfly.StatisticElement
# Number of points sampled
n::Int
# Bandwidth used for the kernel density estimation
bw::Real
end
DensityStatistic(; n=256, bandwidth=-Inf) = DensityStatistic(n, bandwidth)
const density = DensityStatistic
input_aesthetics(stat::DensityStatistic) = [:x]
output_aesthetics(stat::DensityStatistic) = [:x, :y]
default_scales(::DensityStatistic) = [Gadfly.Scale.y_continuous()]
function apply_statistic(stat::DensityStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
Gadfly.assert_aesthetics_defined("DensityStatistic", aes, :x)
if aes.color === nothing
isa(aes.x[1], Real) || error("Kernel density estimation only works on Real types.")
x_f64 = collect(Float64, aes.x)
window = stat.bw <= 0.0 ? KernelDensity.default_bandwidth(x_f64) : stat.bw
f = KernelDensity.kde(x_f64, bandwidth=window, npoints=stat.n)
aes.x = collect(Float64, f.x)
aes.y = f.density
else
groups = Dict()
for (x, c) in zip(aes.x, cycle(aes.color))
if !haskey(groups, c)
groups[c] = Float64[x]
else
push!(groups[c], x)
end
end
colors = Array{RGB{Float32}}(0)
aes.x = Array{Float64}(0)
aes.y = Array{Float64}(0)
for (c, xs) in groups
window = stat.bw <= 0.0 ? KernelDensity.default_bandwidth(xs) : stat.bw
f = KernelDensity.kde(xs, bandwidth=window, npoints=stat.n)
append!(aes.x, f.x)
append!(aes.y, f.density)
for _ in 1:length(f.x)
push!(colors, c)
end
end
aes.color = discretize_make_ia(colors)
end
aes.y_label = Gadfly.Scale.identity_formatter
end
struct Histogram2DStatistic <: Gadfly.StatisticElement
xminbincount::Int
xmaxbincount::Int
yminbincount::Int
ymaxbincount::Int
end
function Histogram2DStatistic(; xbincount=nothing,
xminbincount=3,
xmaxbincount=150,
ybincount=nothing,
yminbincount=3,
ymaxbincount=150)
if xbincount != nothing
xminbincount = xbincount
xmaxbincount = xbincount
end
if ybincount != nothing
yminbincount = ybincount
ymaxbincount = ybincount
end
Histogram2DStatistic(xminbincount, xmaxbincount, yminbincount, ymaxbincount)
end
input_aesthetics(stat::Histogram2DStatistic) = [:x, :y]
output_aesthetics(stat::Histogram2DStatistic) = [:xmin, :ymax, :ymin, :ymax, :color]
default_scales(::Histogram2DStatistic, t::Gadfly.Theme=Gadfly.current_theme()) =
[t.continuous_color_scale]
const histogram2d = Histogram2DStatistic
function apply_statistic(stat::Histogram2DStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
Gadfly.assert_aesthetics_defined("Histogram2DStatistic", aes, :x, :y)
x_min, x_max = Gadfly.concrete_minimum(aes.x), Gadfly.concrete_maximum(aes.x)
y_min, y_max = Gadfly.concrete_minimum(aes.y), Gadfly.concrete_maximum(aes.y)
if haskey(scales, :x) && isa(scales[:x], Scale.DiscreteScale)
x_categorial = true
xminbincount = x_max - x_min + 1
xmaxbincount = xminbincount
else
x_categorial = false
xminbincount = stat.xminbincount
xmaxbincount = stat.xmaxbincount
end
if haskey(scales, :y) && isa(scales[:y], Scale.DiscreteScale)
y_categorial = true
yminbincount = y_max - y_min + 1
ymaxbincount = yminbincount
else
y_categorial = false
yminbincount = stat.yminbincount
ymaxbincount = stat.ymaxbincount
end
dy, dx, bincounts = choose_bin_count_2d(aes.x, aes.y,
xminbincount, xmaxbincount,
yminbincount, ymaxbincount)
wx = x_categorial ? 1 : (x_max - x_min) / dx
wy = y_categorial ? 1 : (y_max - y_min) / dy
n = 0
for cnt in bincounts
if cnt > 0
n += 1
end
end
if x_categorial
aes.x = Array{Int64}(n)
else
aes.xmin = Array{Float64}(n)
aes.xmax = Array{Float64}(n)
end
if y_categorial
aes.y = Array{Int64}(n)
else
aes.ymin = Array{Float64}(n)
aes.ymax = Array{Float64}(n)
end
k = 1
for i in 1:dy, j in 1:dx
cnt = bincounts[i, j]
if cnt > 0
if x_categorial
aes.x[k] = x_min + (j - 1)
else
aes.xmin[k] = x_min + (j - 1) * wx
aes.xmax[k] = x_min + j * wx
end
if y_categorial
aes.y[k] = y_min + (i - 1)
else
aes.ymin[k] = y_min + (i - 1) * wy
aes.ymax[k] = y_min + i * wy
end
k += 1
end
end
@assert k - 1 == n
haskey(scales, :color) || error("Histogram2DStatistic requires a color scale.")
color_scale = scales[:color]
typeof(color_scale) <: Scale.ContinuousColorScale ||
error("Histogram2DStatistic requires a continuous color scale.")
aes.color_key_title = "Count"
data = Gadfly.Data()
data.color = Array{Int}(n)
k = 1
for cnt in transpose(bincounts)
if cnt > 0
data.color[k] = cnt
k += 1
end
end
if x_categorial
aes.xmin, aes.xmax = barminmax(aes.x, false)
aes.x = discretize_make_ia(aes.x)
aes.pad_categorical_x = Nullable(false)
end
if y_categorial
aes.ymin, aes.ymax = barminmax(aes.y, false)
aes.y = discretize_make_ia(aes.y)
aes.pad_categorical_y = Nullable(false)
end
Scale.apply_scale(color_scale, [aes], data)
nothing
end
# Find reasonable places to put tick marks and grid lines.
struct TickStatistic <: Gadfly.StatisticElement
in_vars::Vector{Symbol}
out_var::AbstractString
granularity_weight::Float64
simplicity_weight::Float64
coverage_weight::Float64
niceness_weight::Float64
# fixed ticks, or nothing
ticks::Union{Symbol, AbstractArray}
end
@deprecate xticks(ticks) xticks(ticks=ticks)
xticks(; ticks=:auto,
granularity_weight=1/4,
simplicity_weight=1/6,
coverage_weight=1/3,
niceness_weight=1/4) =
TickStatistic([:x, :xmin, :xmax, :xintercept], "x",
granularity_weight, simplicity_weight, coverage_weight, niceness_weight, ticks)
@deprecate yticks(ticks) yticks(ticks=ticks)
yticks(; ticks=:auto,
granularity_weight=1/4,
simplicity_weight=1/6,
coverage_weight=1/3,
niceness_weight=1/4) =
TickStatistic([:y, :ymin, :ymax, :yintercept, :middle, :lower_hinge, :upper_hinge,
:lower_fence, :upper_fence], "y",
granularity_weight, simplicity_weight, coverage_weight, niceness_weight, ticks)
# Apply a tick statistic.
# Args:
# stat: statistic.
# aes: aesthetics.
# Returns:
# nothing
# Modifies:
# aes
#
function apply_statistic(stat::TickStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
isa(stat.ticks, Symbol) && stat.ticks != :auto &&
error("Invalid value $(stat.ticks) for ticks parameter.")
isa(coord, Coord.SubplotGrid) &&
error("TickStatistic cannot be applied to subplot coordinates.")
# don't clobber existing ticks
getfield(aes, Symbol(stat.out_var, "tick")) == nothing || return
in_group_var = Symbol(stat.out_var, "group")
minval, maxval = nothing, nothing
in_vals = Any[]
categorical = (:x in stat.in_vars && Scale.iscategorical(scales, :x)) ||
(:y in stat.in_vars && Scale.iscategorical(scales, :y))
for var in stat.in_vars
categorical && !in(var,[:x,:y]) && continue
vals = getfield(aes, var)
if vals != nothing && eltype(vals) != Function
if minval == nothing
minval = first(vals)
end
if maxval == nothing
maxval = first(vals)
end
T = promote_type(typeof(minval), typeof(maxval))
T = promote_type(T, eltype(vals))
minval = convert(T, minval)
maxval = convert(T, maxval)
if stat.out_var == "x"
dsize = aes.xsize === nothing ? [nothing] : aes.xsize
elseif stat.out_var == "y"
dsize = aes.ysize === nothing ? [nothing] : aes.ysize
else
dsize = [nothing]
end
size = aes.size === nothing ? [nothing] : aes.size
minval, maxval = apply_statistic_typed(minval, maxval, vals, size, dsize)
push!(in_vals, vals)
end
end
isempty(in_vals) && return
in_vals = chain(in_vals...)
# consider forced tick marks
if stat.ticks != :auto
minval = min(minval, minimum(stat.ticks))
maxval = max(maxval, maximum(stat.ticks))
end
# TODO: handle the outliers aesthetic
n = Gadfly.concrete_length(in_vals)
# check the x/yviewmin/max pesudo-aesthetics
if stat.out_var == "x"
if aes.xviewmin != nothing
minval = min(minval, aes.xviewmin)
end
if aes.xviewmax != nothing
maxval = max(maxval, aes.xviewmax)
end
elseif stat.out_var == "y"
if aes.yviewmin != nothing
minval = min(minval, aes.yviewmin)
end
if aes.yviewmax != nothing
maxval = max(maxval, aes.yviewmax)
end
end
# take into account a forced viewport in cartesian coordinates.
strict_span = false
if typeof(coord) == Coord.Cartesian
if stat.out_var == "x"
if coord.xmin !== nothing
minval = coord.xmin
strict_span = true
end
if coord.xmax !== nothing
maxval = coord.xmax
strict_span = true
end
elseif stat.out_var == "y"
if coord.ymin !== nothing
minval = coord.ymin
strict_span = true
end
if coord.ymax !== nothing
maxval = coord.ymax
strict_span = true
end
end
end
# all the input values in order.
if stat.ticks != :auto
grids = ticks = stat.ticks
viewmin = minval
viewmax = maxval
tickvisible = fill(true, length(ticks))
tickscale = fill(1.0, length(ticks))
elseif categorical
ticks = Set{Int}()
for val in in_vals
val>0 && push!(ticks, round(Int, val))
end
ticks = Int[t for t in ticks]
sort!(ticks)
grids = (ticks .- 0.5)[2:end]
viewmin = minimum(ticks)
viewmax = maximum(ticks)
tickvisible = fill(true, length(ticks))
tickscale = fill(1.0, length(ticks))
else
minval, maxval = promote(minval, maxval)
ticks, viewmin, viewmax = Gadfly.optimize_ticks(minval, maxval, extend_ticks=true,
granularity_weight=stat.granularity_weight,
simplicity_weight=stat.simplicity_weight,
coverage_weight=stat.coverage_weight,
niceness_weight=stat.niceness_weight,
strict_span=strict_span)
grids = ticks
multiticks = Gadfly.multilevel_ticks(viewmin - (viewmax - viewmin),
viewmax + (viewmax - viewmin))
tickcount = length(ticks) + sum([length(ts) for ts in values(multiticks)])
tickvisible = Array{Bool}(tickcount)
tickscale = Array{Float64}(tickcount)
i = 1
for t in ticks
tickscale[i] = 1.0
tickvisible[i] = viewmin <= t <= viewmax
i += 1
end
for (scale, ts) in multiticks, t in ts
push!(ticks, t)
tickvisible[i] = false
tickscale[i] = scale
i += 1
end
end
# We use the first label function we find for any of the aesthetics. I'm not
# positive this is the right thing to do, or would would be.
labeler = getfield(aes, Symbol(stat.out_var, "_label"))
setfield!(aes, Symbol(stat.out_var, "tick"), ticks)
setfield!(aes, Symbol(stat.out_var, "grid"), grids)
setfield!(aes, Symbol(stat.out_var, "tick_label"), labeler)
setfield!(aes, Symbol(stat.out_var, "tickvisible"), tickvisible)
setfield!(aes, Symbol(stat.out_var, "tickscale"), tickscale)
viewmin_var = Symbol(stat.out_var, "viewmin")
if getfield(aes, viewmin_var) === nothing || getfield(aes, viewmin_var) > viewmin
setfield!(aes, viewmin_var, viewmin)
end
viewmax_var = Symbol(stat.out_var, "viewmax")
if getfield(aes, viewmax_var) === nothing || getfield(aes, viewmax_var) < viewmax
setfield!(aes, viewmax_var, viewmax)
end
nothing
end
function apply_statistic_typed(minval::T, maxval::T, vals, size, dsize) where T
# for (val, s, ds) in zip(vals, cycle(size), cycle(dsize))
lensize = length(size)
lendsize = length(dsize)
for (i, val) in enumerate(vals)
(!Gadfly.isconcrete(val) || !isfinite(val)) && continue
s = size[mod1(i, lensize)]
ds = dsize[mod1(i, lendsize)]
minval, maxval = minvalmaxval(minval, maxval, convert(T, val), s, ds)
end
minval, maxval
end
function apply_statistic_typed(minval::T, maxval::T, vals::DataArray{T}, size, dsize) where T
lensize = length(size)
lendsize = length(dsize)
for i = 1:length(vals)
vals.na[i] && continue
val::T = vals.data[i]
s = size[mod1(i, lensize)]
ds = dsize[mod1(i, lendsize)]
minval, maxval = minvalmaxval(minval, maxval, val, s, ds)
end
minval, maxval
end
function minvalmaxval(minval::T, maxval::T, val, s, ds) where T
if val < minval || !isfinite(minval)
minval = val
end
if val > maxval || !isfinite(maxval)
maxval = val
end
if s != nothing && typeof(s) <: AbstractFloat
minval = min(minval, val - s)::T
maxval = max(maxval, val + s)::T
end
if ds != nothing
minval = min(minval, val - ds)::T
maxval = max(maxval, val + ds)::T
end
minval, maxval
end
struct BoxplotStatistic <: Gadfly.StatisticElement
method::Union{Symbol, Vector}
end
BoxplotStatistic(; method=:tukey) = BoxplotStatistic(method)
input_aesthetics(stat::BoxplotStatistic) = [:x, :y]
output_aesthetics(stat::BoxplotStatistic) =
[:x, :middle, :lower_hinge, :upper_hinge, :lower_fence, :upper_fence, :outliers]
const boxplot = BoxplotStatistic
function apply_statistic(stat::BoxplotStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
if aes.y === nothing
Gadfly.assert_aesthetics_defined("BoxplotStatistic", aes,
:x, :lower_hinge, :upper_hinge, :lower_fence, :upper_fence)
aes_color = aes.color === nothing ? [nothing] : aes.color
groups = Any[]
for (x, c) in zip(aes.x, cycle(aes_color))
push!(groups, (x, c))
end
if aes.color !== nothing
aes.color = discretize_make_ia([c for (x, c) in groups],
filter(!ismissing, aes.color.values))
end
return
end
if aes.x === nothing
aes_x = [1]
aes.x_label = x -> fill("", length(x))
else
aes_x = aes.x
end
aes_color = aes.color === nothing ? [nothing] : aes.color
T = isempty(aes.y) ? eltype(aes.y) : typeof(aes.y[1] / 1)
groups = DefaultOrderedDict(() -> T[])
for (x, y, c) in zip(cycle(aes_x), aes.y, cycle(aes_color))
push!(groups[(x, c)], y)
end
if aes.y != nothing
m = length(groups)
aes.x = Array{eltype(aes.x)}(m)
aes.middle = Array{T}(m)
aes.lower_hinge = Array{T}(m)
aes.upper_hinge = Array{T}(m)
aes.lower_fence = Array{T}(m)
aes.upper_fence = Array{T}(m)
aes.outliers = Vector{T}[]
for (i, ((x, c), ys)) in enumerate(groups)
sort!(ys)
aes.x[i] = x
if stat.method == :tukey
aes.lower_hinge[i], aes.middle[i], aes.upper_hinge[i] =
quantile(ys, [0.25, 0.5, 0.75])
iqr = aes.upper_hinge[i] - aes.lower_hinge[i]
idx = searchsortedfirst(ys, aes.lower_hinge[i] - 1.5iqr)
aes.lower_fence[i] = ys[idx]
idx = searchsortedlast(ys, aes.upper_hinge[i] + 1.5iqr)
aes.upper_fence[i] = ys[idx]
elseif isa(stat.method, Vector)
qs = stat.method
if length(qs) != 5
error("Stat.boxplot requires exactly five quantiles.")
end
aes.lower_fence[i], aes.lower_hinge[i], aes.middle[i],
aes.upper_hinge[i], aes.upper_fence[i] = quantile!(ys, qs)
else
error("Invalid method specified for State.boxplot")
end
push!(aes.outliers,
filter(y -> y < aes.lower_fence[i] || y > aes.upper_fence[i], ys))
end
end
if length(aes.x) > 1 && (haskey(scales, :x) && isa(scales[:x], Scale.ContinuousScale))
xmin, xmax = minimum(aes.x), maximum(aes.x)
minspan = minimum([xj - xi for (xi, xj) in zip(aes.x[1:end-1], aes.x[2:end])])
xviewmin = xmin - minspan / 2
xviewmax = xmax + minspan / 2
if aes.xviewmin === nothing || aes.xviewmin > xviewmin
aes.xviewmin = xviewmin
end
if aes.xviewmax === nothing || aes.xviewmax < xviewmax
aes.xviewmax = xviewmax
end
end
if isa(aes_x, IndirectArray)
aes.x = discretize_make_ia(aes.x, aes_x.values)
end
if aes.color !== nothing
aes.color = discretize_make_ia(RGB{Float32}[c for (x, c) in keys(groups)],
aes.color.values)
end
nothing
end
struct SmoothStatistic <: Gadfly.StatisticElement
method::Symbol
smoothing::Float64
end
SmoothStatistic(; method=:loess, smoothing=0.75) = SmoothStatistic(method, smoothing)
const smooth = SmoothStatistic
input_aesthetics(::SmoothStatistic) = [:x, :y]
output_aesthetics(::SmoothStatistic) = [:x, :y]
function apply_statistic(stat::SmoothStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
Gadfly.assert_aesthetics_defined("Stat.smooth", aes, :x, :y)
Gadfly.assert_aesthetics_equal_length("Stat.smooth", aes, :x, :y)
stat.method in [:loess,:lm] ||
error("The only Stat.smooth methods currently supported are loess and lm.")
max_num_steps = 750
aes_color = aes.color === nothing ? [nothing] : aes.color
groups = Dict(c => (eltype(aes.x)[], eltype(aes.y)[]) for c in unique(aes_color))
for (x, y, c) in zip(aes.x, aes.y, cycle(aes_color))
push!(groups[c][1], x)
push!(groups[c][2], y)
end
local xs, ys, xsp
aes.x = eltype(aes.x)[]
# For aes.y returning a Float is ok if `y` is an Int or a Float
# There does not seem to be strong demand for other types of `y`
aes.y = Float64[]
colors = eltype(aes_color)[]
for (c, (xv, yv)) in groups
x_min, x_max = minimum(xv), maximum(xv)
x_min == x_max && error("Stat.smooth requires more than one distinct x value")
try
xs = Float64.( eltype(xv) <: Dates.TimeType ? Dates.value.(xv) : xv )
ys = Float64.( eltype(yv) <: Dates.TimeType ? Dates.value.(yv) : yv )
catch e
error("Stat.loess and Stat.lm require that x and y be bound to arrays of plain numbers.")
end
nudge = 1e-5 * (x_max - x_min)
dx = (x_max-x_min)*(1/max_num_steps)
# For a Date, dx might be 0 days, so correct
# For Ints, correct dx
if isa(xv[1], Date)
dx = max(dx, Dates.Day(1))
elseif isa(xv[1], Int)
dx = ceil(Int, dx)
nudge = 0
end
steps = collect((x_min + nudge):dx:(x_max - nudge))
xsp = Float64.( eltype(steps) <: Dates.TimeType ? Dates.value.(steps) : steps )
if stat.method == :loess
smoothys = Loess.predict(loess(xs, ys, span=stat.smoothing), xsp)
elseif stat.method == :lm
lmcoeff = linreg(xs,ys)
smoothys = lmcoeff[2].*xsp .+ lmcoeff[1]
end
# New aes
append!(aes.x, steps)
append!(aes.y, smoothys)
append!(colors, fill(c, length(steps)))
end
if !(aes.color===nothing)
aes.color = discretize_make_ia(colors)
end
end
struct HexBinStatistic <: Gadfly.StatisticElement
xbincount::Int
ybincount::Int
end
HexBinStatistic(; xbincount=50, ybincount=50) = HexBinStatistic(xbincount, ybincount)
const hexbin = HexBinStatistic
function apply_statistic(stat::HexBinStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
xmin, xmax = minimum(aes.x), maximum(aes.x)
ymin, ymax = minimum(aes.y), maximum(aes.y)
xspan, yspan = xmax - xmin, ymax - ymin
xsize = xspan / stat.xbincount
ysize = yspan / stat.ybincount
counts = Dict{(Tuple{Int, Int}), Int}()
for (x, y) in zip(aes.x, aes.y)
h = convert(HexagonOffsetOddR, cube_round(x - xmin + xspan/2,
y - ymin + yspan/2,
xsize, ysize))
idx = (h.q, h.r)
if !haskey(counts, idx)
counts[idx] = 1
else
counts[idx] += 1
end
end
N = length(counts)
aes.x = Array{Float64}(N)
aes.y = Array{Float64}(N)
data = Gadfly.Data()
data.color = Array{Int}(N)
k = 1
for (idx, cnt) in counts
x, y = center(HexagonOffsetOddR(idx[1], idx[2]), xsize, ysize,
xmin - xspan/2, ymin - yspan/2)
aes.x[k] = x
aes.y[k] = y
data.color[k] = cnt
k += 1
end
aes.xsize = [xsize]
aes.ysize = [ysize]
color_scale = scales[:color]
typeof(color_scale) <: Scale.ContinuousColorScale ||
error("HexBinGeometry requires a continuous color scale.")
aes.color_key_title = "Count"
Scale.apply_scale(color_scale, [aes], data)
end
default_scales(::HexBinStatistic, t::Gadfly.Theme) = [t.continuous_color_scale]
struct StepStatistic <: Gadfly.StatisticElement
direction::Symbol
end
StepStatistic(; direction=:hv) = StepStatistic(direction)
const step = StepStatistic
input_aesthetics(::StepStatistic) = [:x, :y]
output_aesthetics(::StepStatistic) = [:x, :y]
function apply_statistic(stat::StepStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
Gadfly.assert_aesthetics_defined("StepStatistic", aes, :x)
Gadfly.assert_aesthetics_defined("StepStatistic", aes, :y)
Gadfly.assert_aesthetics_equal_length("StepStatistic", aes, :x, :y)
p = sortperm(aes.x, alg=MergeSort)
permute!(aes.x, p)
permute!(aes.y, p)
aes.group != nothing && permute!(aes.group, p)
aes.color != nothing && permute!(aes.color, p)
if aes.group != nothing
Gadfly.assert_aesthetics_equal_length("StepStatistic", aes, :x, :group)
permute!(aes.x, p)
permute!(aes.y, p)
permute!(aes.group, p)
aes.color != nothing && permute!(aes.color, p)
end
if aes.color != nothing
Gadfly.assert_aesthetics_equal_length("StepStatistic", aes, :x, :color)
# TODO: use this when we switch to 0.4
# sortperm!(p, aes.color, alg=MergeSort, lt=Gadfly.color_isless)
p = sortperm(aes.color, alg=MergeSort, lt=Gadfly.color_isless)
permute!(aes.x, p)
permute!(aes.y, p)
permute!(aes.color, p)
aes.group != nothing && permute!(aes.group, p)
end
x_step = Array{eltype(aes.x)}(0)
y_step = Array{eltype(aes.y)}(0)
color_step = aes.color == nothing ? nothing : Array{eltype(aes.color)}(0)
group_step = aes.group == nothing ? nothing : Array{eltype(aes.group)}(0)
i = 1
i_offset = 1
while true
u = i_offset + div(i - 1, 2) + (isodd(i) || stat.direction != :hv ? 0 : 1)
v = i_offset + div(i - 1, 2) + (isodd(i) || stat.direction != :vh ? 0 : 1)
(u > length(aes.x) || v > length(aes.y)) && break
if (aes.color != nothing &&
(aes.color[u] != aes.color[i_offset] || aes.color[v] != aes.color[i_offset])) ||
(aes.group != nothing &&
(aes.group[u] != aes.color[i_offset] || aes.color[v] != aes.group[i_offset]))
i_offset = max(u, v)
i = 1
else
push!(x_step, aes.x[u])
push!(y_step, aes.y[v])
aes.color != nothing && push!(color_step, aes.color[i_offset])
aes.group != nothing && push!(group_step, aes.group[i_offset])
i += 1
end
end
aes.x = x_step
aes.y = y_step
aes.color = color_step
aes.group = group_step
end
struct FunctionStatistic <: Gadfly.StatisticElement
# Number of points to evaluate the function at
num_samples::Int
end
FunctionStatistic(; num_samples=250) = FunctionStatistic(num_samples)
const func = FunctionStatistic
default_scales(::FunctionStatistic) = [Gadfly.Scale.x_continuous(), Gadfly.Scale.y_continuous()]
input_aesthetics(::FunctionStatistic) = [:y, :xmin, :xmax]
output_aesthetics(::FunctionStatistic) = [:x, :y, :group]
function apply_statistic(stat::FunctionStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
Gadfly.assert_aesthetics_defined("FunctionStatistic", aes, :y)
Gadfly.assert_aesthetics_defined("FunctionStatistic", aes, :xmin)
Gadfly.assert_aesthetics_defined("FunctionStatistic", aes, :xmax)
Gadfly.assert_aesthetics_equal_length("FunctionStatistic", aes, :xmin, :xmax)
aes.x = Array{Float64}(length(aes.y) * stat.num_samples)
ys = Array{Float64}(length(aes.y) * stat.num_samples)
i = 1
for (f, xmin, xmax) in zip(aes.y, cycle(aes.xmin), cycle(aes.xmax))
for x in linspace(xmin, xmax, stat.num_samples)
aes.x[i] = x
ys[i] = f(x)
i += 1
end
end
# color was bound explicitly
if aes.color != nothing
func_color = aes.color
aes.color = Array{eltype(aes.color)}(length(aes.y) * stat.num_samples)
groups = DataArray(Int, length(aes.y) * stat.num_samples)
for i in 1:length(aes.y)
aes.color[1+(i-1)*stat.num_samples:i*stat.num_samples] = func_color[i]
groups[1+(i-1)*stat.num_samples:i*stat.num_samples] = i
end
aes.group = discretize_make_ia(groups)
elseif length(aes.y) > 1 && haskey(scales, :color)
data = Gadfly.Data()
data.color = Array{AbstractString}(length(aes.y) * stat.num_samples)
groups = DataArray(Int, length(aes.y) * stat.num_samples)
for i in 1:length(aes.y)
fname = "f<sub>$(i)</sub>"
data.color[1+(i-1)*stat.num_samples:i*stat.num_samples] = fname
groups[1+(i-1)*stat.num_samples:i*stat.num_samples] = i
end
Scale.apply_scale(scales[:color], [aes], data)
aes.group = discretize_make_ia(groups)
end
data = Gadfly.Data()
data.y = ys
Scale.apply_scale(scales[:y], [aes], data)
end
struct ContourStatistic <: Gadfly.StatisticElement
levels::Union{Int,Vector,Function}
samples::Int
end
ContourStatistic(; levels=15, samples=150) = ContourStatistic(levels, samples)
input_aesthetics(::ContourStatistic) = [:z, :xmin, :xmax, :ymin, :ymax]
output_aesthetics(::ContourStatistic) = [:x, :y, :color, :group]
const contour = ContourStatistic
default_scales(::ContourStatistic, t::Gadfly.Theme=Gadfly.current_theme()) =
[Gadfly.Scale.z_func(), Gadfly.Scale.x_continuous(), Gadfly.Scale.y_continuous(),
t.continuous_color_scale]
function apply_statistic(stat::ContourStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
xs = aes.x === nothing ? nothing : convert(Vector{Float64}, aes.x)
ys = aes.y === nothing ? nothing : convert(Vector{Float64}, aes.y)
if typeof(aes.z) <: Function
if xs == nothing && aes.xmin != nothing && aes.xmax != nothing
xs = linspace(aes.xmin[1], aes.xmax[1], stat.samples)
end
if ys == nothing && aes.ymin != nothing && aes.ymax != nothing
ys = linspace(aes.ymin[1], aes.ymax[1], stat.samples)
end
zs = Float64[aes.z(x, y) for x in xs, y in ys]
elseif typeof(aes.z) <: Matrix
zs = convert(Matrix{Float64}, aes.z)
if xs == nothing
xs = collect(Float64, 1:size(zs)[1])
end
if ys == nothing
ys = collect(Float64, 1:size(zs)[2])
end
size(zs) != (length(xs), length(ys)) &&
error("Stat.contour requires dimension of z to be length(x) by length(y)")
else
error("Stat.contour requires either a matrix or a function")
end
levels = Float64[]
contour_xs = eltype(xs)[]
contour_ys = eltype(ys)[]
stat_levels = typeof(stat.levels) <: Function ? stat.levels(zs) : stat.levels
groups = discretize_make_ia(Int[])
group = 0
for level in Contour.levels(Contour.contours(xs, ys, zs, stat_levels))
for line in Contour.lines(level)
xc, yc = Contour.coordinates(line)
append!(contour_xs, xc)
append!(contour_ys, yc)
for _ in 1:length(xc)
push!(groups, group)
push!(levels, Contour.level(level))
end
group += 1
end
end
aes.group = groups
color_scale = get(scales, :color, Gadfly.Scale.color_continuous_gradient())
Scale.apply_scale(color_scale, [aes], Gadfly.Data(color=levels))
Scale.apply_scale(scales[:x], [aes], Gadfly.Data(x=contour_xs))
Scale.apply_scale(scales[:y], [aes], Gadfly.Data(y=contour_ys))
end
struct QQStatistic <: Gadfly.StatisticElement end
input_aesthetics(::QQStatistic) = [:x, :y]
output_aesthetics(::QQStatistic) = [:x, :y]
const qq = QQStatistic
default_scales(::QQStatistic) =
[Gadfly.Scale.x_continuous(), Gadfly.Scale.y_continuous]
function apply_statistic(stat::QQStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
Gadfly.assert_aesthetics_defined("Stat.qq", aes, :x, :y)
Gadfly.assert_aesthetics_undefined("State.qq", aes, :color)
# NOTES:
#
# apply_scales happens before apply_statistics, so we need to handle in
# apply_scales the Distributions that might be bound to x and y... By
# analogy with Stat.func, we can add a check in apply_statistic which defers
# application. Stat.func though requires an ARRAY of Functions, and doesn't
# work on naked functions bound to aes.y. If we want to bind Distributions,
# we'd need to extend the types that are allowed for aes.y/.x (e.g. change
# type of Aesthetics fields x and y). Right now these are of type
# NumericalOrCategoricalAesthetic. The .x and .y fields are the _only_
# place where this type is used, but I'm not sure if there's a reason that
# changing this typealias would be a bad idea...for now I've just used a
# direct `@compat(Union{NumericalOrCategoricalAesthetic, Distribution})`.
#
# TODO:
#
# Grouping by color etc.?
# a little helper function to convert either numeric or distribution
# variables to a format suitable to input to qqbuild.
toVecOrDist = v -> typeof(v) <: Distribution ? v : convert(Vector{Float64}, v)
# check and convert :x and :y to proper types for input to qqbuild
local xs, ys
try
(xs, ys) = map(toVecOrDist, (aes.x, aes.y))
catch e
error("Stat.qq requires that x and y be bound to either a Distribution or to arrays of plain numbers.")
end
qqq = qqbuild(xs, ys)
aes.x = qqq.qx
aes.y = qqq.qy
# apply_scale to Distribution-bound aesthetics is deferred, so re-apply here
# (but only for Distribution, numeric data is already scaled). Only one of
# :x or :y can be a Distribution since qqbuild will throw an error for two
# Distributions.
data = Gadfly.Data()
if typeof(xs) <: Distribution
data.x = aes.x
Scale.apply_scale(scales[:x], [aes], data)
elseif typeof(ys) <: Distribution
data.y = aes.y
Scale.apply_scale(scales[:y], [aes], data)
end
end
struct ViolinStatistic <: Gadfly.StatisticElement
# Number of points sampled
n::Int
end
ViolinStatistic() = ViolinStatistic(300)
input_aesthetics(::ViolinStatistic) = [:x, :y, :width]
const violin = ViolinStatistic
function apply_statistic(stat::ViolinStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
isa(aes.y[1], Real) || error("Kernel density estimation only works on Real types.")
grouped_y = Dict(1=>aes.y)
grouped_color = Dict{Int, Gadfly.ColorOrNothing}(1=>nothing)
ux = unique(aes.x)
uxflag = length(ux) < length(aes.x)
colorflag = aes.color != nothing
uxflag && (grouped_y = Dict(x=>aes.y[aes.x.==x] for x in ux))
grouped_color = (colorflag ? Dict(x=>first(aes.color[aes.x.==x]) for x in ux) :
uxflag && Dict(x=>nothing for x in ux) )
aes.x = Array{Float64}(0)
aes.y = Array{Float64}(0)
aes.width = Array{Float64}(0)
colors = eltype(aes.color)[]
for (x, ys) in grouped_y
window = stat.n > 1 ? KernelDensity.default_bandwidth(ys) : 0.1
f = KernelDensity.kde(ys, bandwidth=window, npoints=stat.n)
append!(aes.x, fill(x, length(f.x)))
append!(aes.y, f.x)
append!(aes.width, f.density)
append!(colors, fill(grouped_color[x], length(f.x)))
end
colorflag && (aes.color = colors)
pad = 0.1
maxwidth = maximum(aes.width)
broadcast!(*, aes.width, aes.width, 1 - pad)
broadcast!(/, aes.width, aes.width, maxwidth)
end
struct JitterStatistic <: Gadfly.StatisticElement
vars::Vector{Symbol}
range::Float64
seed::UInt32
end
JitterStatistic(vars; range=0.8, seed=0x0af5a1f7) = JitterStatistic(vars, range, seed)
x_jitter(; range=0.8, seed=0x0af5a1f7) = JitterStatistic([:x], range=range, seed=seed)
y_jitter(; range=0.8, seed=0x0af5a1f7) = JitterStatistic([:y], range=range, seed=seed)
input_aesthetics(stat::JitterStatistic) = stat.vars
output_aesthetics(stat::JitterStatistic) = stat.vars
function minimum_span(vars::Vector{Symbol}, aes::Gadfly.Aesthetics)
span = nothing
for var in vars
data = getfield(aes, var)
length(data) < 2 && continue
dataspan = data[2] - data[1]
T = eltype(data)
z = convert(T, zero(T))
sorteddata = sort(data)
for δ in diff(sorteddata)
if δ != z && (δ < dataspan || dataspan == z)
dataspan = δ
end
end
if span == nothing || (dataspan != nothing && dataspan < span)
span = dataspan
end
end
return span
end
function apply_statistic(stat::JitterStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
span = minimum_span(stat.vars, aes)
span == nothing && return
rng = MersenneTwister(stat.seed)
for var in stat.vars
data = getfield(aes, var)
outdata = Array{Float64}(size(data))
broadcast!(+, outdata, data, stat.range * (rand(rng, length(data)) - 0.5) .* span)
setfield!(aes, var, outdata)
end
end
# Bin mean returns the mean of x and y in n bins of x
struct BinMeanStatistic <: Gadfly.StatisticElement
n::Int
end
BinMeanStatistic(; n=20) = BinMeanStatistic(n)
const binmean = BinMeanStatistic
input_aesthetics(::BinMeanStatistic) = [:x, :y]
output_aesthetics(::BinMeanStatistic) = [:x, :y]
function apply_statistic(stat::BinMeanStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
Gadfly.assert_aesthetics_defined("Stat.binmean", aes, :x, :y)
breaks = quantile(aes.x, collect(1:stat.n)/stat.n)
Tx = eltype(aes.x)
Ty = eltype(aes.y)
if aes.color === nothing
(aes.x, aes.y) = mean_by_group(aes.x, aes.y, breaks)
else
groups = Dict()
for (x, y, c) in zip(aes.x, aes.y, cycle(aes.color))
if !haskey(groups, c)
xs = append!(Tx[], collect(Tx, aes.x))
ys = append!(Ty[], collect(Ty, aes.y))
groups[c] = Array[xs, ys]
else
push!(groups[c][1], x)
push!(groups[c][2], y)
end
end
colors = Array{RGB{Float32}}(0)
aes.x = Array{Tx}(0)
aes.y = Array{Ty}(0)
for (c, v) in groups
(fx, fy) = mean_by_group(v[1], v[2], breaks)
append!(aes.x, fx)
append!(aes.y, fy)
for _ in 1:length(fx)
push!(colors, c)
end
end
aes.color = discretize_make_ia(colors)
end
end
function mean_by_group(x::Vector{Tx}, y::Vector{Ty}, breaks::Vector{Float64}) where {Tx, Ty}
count = zeros(Int64, length(breaks))
totalx = zeros(Tx, length(breaks))
totaly = zeros(Ty, length(breaks))
for i in 1:length(x)
refs = searchsortedfirst(breaks, x[i])
count[refs] += 1
totalx[refs] += x[i]
totaly[refs] += y[i]
end
subset = count .> 0
count = count[subset]
return (totalx[subset] ./ count, totaly[subset] ./ count)
end
struct EnumerateStatistic <: Gadfly.StatisticElement
var::Symbol
end
input_aesthetics(stat::EnumerateStatistic) = [stat.var]
output_aesthetics(stat::EnumerateStatistic) = [stat.var]
function default_scales(stat::EnumerateStatistic)
if stat.var == :y
return [Gadfly.Scale.y_continuous()]
elseif stat.var == :x
return [Gadfly.Scale.x_continuous()]
else
return Gadfly.ScaleElement[]
end
end
const x_enumerate = EnumerateStatistic(:x)
const y_enumerate = EnumerateStatistic(:y)
function apply_statistic(stat::EnumerateStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
has_x = aes.x != nothing
has_y = aes.y != nothing
if stat.var == :x && !has_x && has_y
aes.x = collect(1:length(aes.y))
elseif stat.var == :y && !has_y && has_x
aes.y = collect(1:length(aes.x))
end
end
### Vector Field Statistic
struct VecFieldStatistic <: Gadfly.StatisticElement
smoothness::Float64
scale::Float64
samples::Int64
end
VecFieldStatistic(; smoothness=1.0, scale=1.0, samples=20) =
VecFieldStatistic(smoothness, scale, samples)
input_aesthetics(stat::VecFieldStatistic) = [:z, :x, :y, :color]
output_aesthetics(stat::VecFieldStatistic) = [:x, :y, :xend, :yend, :color]
default_scales(stat::VecFieldStatistic, t::Gadfly.Theme=Gadfly.current_theme()) =
[Gadfly.Scale.z_func(), Gadfly.Scale.x_continuous(), Gadfly.Scale.y_continuous(),
t.continuous_color_scale ]
const vectorfield = VecFieldStatistic
function apply_statistic(stat::VecFieldStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
xs = aes.x === nothing ? nothing : Float64.(aes.x)
ys = aes.y === nothing ? nothing : Float64.(aes.y)
if isa(aes.z, Function)
if xs == nothing && aes.xmin != nothing && aes.xmax != nothing
xs = linspace(aes.xmin[1], aes.xmax[1], stat.samples)
end
if ys == nothing && aes.ymin != nothing && aes.ymax != nothing
ys = linspace(aes.ymin[1], aes.ymax[1], stat.samples)
end
zs = Float64[aes.z(x, y) for x in xs, y in ys]
elseif isa(aes.z, Matrix)
zs = Float64.(aes.z)
if xs == nothing
xs = collect(Float64, 1:size(zs)[1])
end
if ys == nothing
ys = collect(Float64, 1:size(zs)[2])
end
if size(zs) != (length(xs), length(ys))
error("Stat.vectorfield requires dimension of z to be length(x) by length(y)")
end
else
error("Stat.vectorfield requires either a matrix or a function")
end
X = vcat([[x y] for x in xs, y in ys]...)
Z = vec(zs)
# The next two lines make use of the package CoupledFields.jl
kpars = GaussianKP(X)
∇g = hcat(gradvecfield([stat.smoothness -7.0], X, Z[:,1:1], kpars)...)'
vecf = [X-∇g*stat.scale X+∇g*stat.scale]
aes.z = nothing
aes.x = vecf[:,1]
aes.y = vecf[:,2]
aes.xend = vecf[:,3]
aes.yend = vecf[:,4]
color_scale = get(scales, :color, Gadfly.Scale.color_continuous_gradient())
Scale.apply_scale(color_scale, [aes], Gadfly.Data(color=Z))
end
### Hair Statistic
struct HairStatistic <: Gadfly.StatisticElement
intercept
orientation::Symbol # :horizontal or :vertical like BarStatistic
end
HairStatistic(;intercept=0.0, orientation=:vertical) = HairStatistic(intercept, orientation)
input_aesthetics(stat::HairStatistic) = [:x, :y]
output_aesthetics(stat::HairStatistic) = [:x, :y, :xend, :yend]
default_scales(stat::HairStatistic) = [Gadfly.Scale.x_continuous(), Gadfly.Scale.y_continuous()]
const hair = HairStatistic
function apply_statistic(stat::HairStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
if stat.orientation == :vertical
aes.xend = aes.x
aes.yend = fill(stat.intercept, length(aes.y))
else
aes.yend = aes.y
aes.xend = fill(stat.intercept, length(aes.x))
end
end
### Ellipse Statistic
struct EllipseStatistic <: Gadfly.StatisticElement
distribution::Type{<:ContinuousMultivariateDistribution}
levels::Vector{<:AbstractFloat}
nsegments::Int
end
function EllipseStatistic(;
distribution::(Type{<:ContinuousMultivariateDistribution})=MvNormal,
levels::Vector{Float64}=[0.95],
nsegments::Int=51 )
return EllipseStatistic(distribution, levels, nsegments)
end
Gadfly.input_aesthetics(stat::EllipseStatistic) = [:x, :y]
Gadfly.output_aesthetics(stat::EllipseStatistic) = [:x, :y]
Gadfly.default_scales(stat::EllipseStatistic) = [Gadfly.Scale.x_continuous(), Gadfly.Scale.y_continuous()]
const ellipse = EllipseStatistic
function Gadfly.Stat.apply_statistic(stat::EllipseStatistic,
scales::Dict{Symbol, Gadfly.ScaleElement},
coord::Gadfly.CoordinateElement,
aes::Gadfly.Aesthetics)
Dat = [aes.x aes.y]
grouped_xy = Dict(1=>Dat)
grouped_color = Dict{Int, Gadfly.ColorOrNothing}(1=>nothing)
colorflag = aes.color != nothing
aes.group = (colorflag ? aes.color : aes.group)
if aes.group != nothing
ug = unique(aes.group)
grouped_xy = Dict(g=>Dat[aes.group.==g,:] for g in ug)
grouped_color = Dict(g=>first(aes.group[aes.group.==g]) for g in ug)
end
levels = Float64[]
colors = eltype(aes.color)[]
ellipse_x = eltype(Dat)[]
ellipse_y = eltype(Dat)[]
dfn = 2
θ = 2π*(0:stat.nsegments)/stat.nsegments
n = length(θ)
for (g, data) in grouped_xy
dfd = size(data,1)-1
dhat = fit(stat.distribution, data')
Σ½ = chol(cov(dhat))
rv = sqrt.(dfn*[quantile(FDist(dfn,dfd), p) for p in stat.levels])
ellxy = [cos.(θ) sin.(θ)] * Σ½
μ = mean(dhat)
for r in rv
append!(ellipse_x, r*ellxy[:,1].+μ[1])
append!(ellipse_y, r*ellxy[:,2].+μ[2])
append!(colors, fill(grouped_color[g], n))
append!(levels, fill(r, n))
end
end
aes.group = discretize_make_ia(levels)
colorflag && (aes.color = colors)
aes.x = ellipse_x
aes.y = ellipse_y
end
end # module Stat
|
{"hexsha": "f176246a10d67b81a7338f72000ac74af8bd3f12", "size": 61450, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/statistics.jl", "max_stars_repo_name": "Mattriks/Gadfly.jl", "max_stars_repo_head_hexsha": "d31554bba68194793e1c6e5afbda57111433d1c5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2016-08-05T02:06:53.000Z", "max_stars_repo_stars_event_max_datetime": "2016-08-05T02:06:53.000Z", "max_issues_repo_path": "src/statistics.jl", "max_issues_repo_name": "Mattriks/Gadfly.jl", "max_issues_repo_head_hexsha": "d31554bba68194793e1c6e5afbda57111433d1c5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/statistics.jl", "max_forks_repo_name": "Mattriks/Gadfly.jl", "max_forks_repo_head_hexsha": "d31554bba68194793e1c6e5afbda57111433d1c5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2016-08-05T02:08:54.000Z", "max_forks_repo_forks_event_max_datetime": "2016-08-05T02:08:54.000Z", "avg_line_length": 32.63409453, "max_line_length": 111, "alphanum_fraction": 0.5923352319, "num_tokens": 16631}
|
//
// Created by Alex Beccaro on 18/01/18.
//
#define BOOST_TEST_DYN_LINK
#include <boost/test/unit_test.hpp>
#include "../../src/problems/1-50/18/problem18.hpp"
BOOST_AUTO_TEST_SUITE( Problem18 )
BOOST_AUTO_TEST_CASE( Solution ) {
auto res = problems::problem18::solve();
BOOST_CHECK_EQUAL(res, 1074);
}
BOOST_AUTO_TEST_SUITE_END()
|
{"hexsha": "047b6c5abfed54160fc7cb2eea5f4f733b33421e", "size": 360, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "tests/1-50/test_problem18.cpp", "max_stars_repo_name": "abeccaro/project-euler", "max_stars_repo_head_hexsha": "c3b124bb973dc3a1cf29e8c96c3e70c8816d5fa3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-12-25T10:17:15.000Z", "max_stars_repo_stars_event_max_datetime": "2019-12-25T10:17:15.000Z", "max_issues_repo_path": "tests/1-50/test_problem18.cpp", "max_issues_repo_name": "abeccaro/project-euler", "max_issues_repo_head_hexsha": "c3b124bb973dc3a1cf29e8c96c3e70c8816d5fa3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/1-50/test_problem18.cpp", "max_forks_repo_name": "abeccaro/project-euler", "max_forks_repo_head_hexsha": "c3b124bb973dc3a1cf29e8c96c3e70c8816d5fa3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.5, "max_line_length": 51, "alphanum_fraction": 0.7, "num_tokens": 92}
|
"""
Bitmap manipulation
Friedrich Schotte, Hun Sun Cho, Dec 2008 - 5 Sep 2009
"""
version = "1.2"
def grow_bitmap(mask,count=1):
"""Extents the area where the pixels have to value 1 by one pixel in each
direction, including diagnonal by the number of pixels given by the
parameter 'count'.
If count is 1 or ommited a single pixel grows to nine pixels.
"""
from numpy import array,zeros
if count < 1: return mask
if count > 1: mask = grow_bitmap(mask,count-1)
w,h = mask.shape
mask2 = zeros((w,h),mask.dtype)
mask2 |= mask
mask2[0:w,0:h-1] |= mask[0:w,1:h] # move up by 1 pixel
mask2[0:w,1:h] |= mask[0:w,0:h-1] # move down by 1 pixel
mask2[0:w-1,0:h] |= mask[1:w,0:h] # move to the left by 1 pixel
mask2[1:w,0:h] |= mask[0:w-1,0:h] # move to the right by 1 pixel
mask2[0:w-1,0:h-1] |= mask[1:w,1:h] # move left and up by 1 pixel
mask2[0:w-1,1:h] |= mask[1:w,0:h-1] # move left and down by 1 pixel
mask2[1:w,0:h-1] |= mask[0:w-1,1:h] # move up and up by 1 pixel
mask2[1:w,1:h] |= mask[0:w-1,0:h-1] # move up and down by 1 pixel
return mask2
def within(image,x,y):
w,h = image.shape
return (0 <= x < h and 0 <= y < w)
def flood_fill(image,border_color,x,y,value):
"Flood fill on a region of non-border_color pixels."
if not within(image,x,y) or image[x,y] == border_color: return
edge = [(x,y)]
image [x,y] = value
while edge:
newedge = []
for (x,y) in edge:
for (s,t) in ((x+1,y), (x-1,y), (x,y+1), (x,y-1)):
if within(image,s,t) and \
image[s,t] not in (border_color,value):
image[s,t] = value
newedge.append((s,t))
edge = newedge
if __name__ == "__main__": # for testing..
from numpy import *
from pylab import *
from time import time
mask = zeros((2048,2048),int8)
mask[924:1124,924:2048] = 1 # to be filled
mask[924:1124,524:724] = 1 # not to be filled
t = time()
flood_fill(mask,0,1024,1024,2)
print time()-t
imshow(mask.T,interpolation='nearest')
show()
|
{"hexsha": "81e7ef85695e772590d68f6adf6a82d89edff6a0", "size": 2139, "ext": "py", "lang": "Python", "max_stars_repo_path": "grow_bitmap.py", "max_stars_repo_name": "bopopescu/Lauecollect", "max_stars_repo_head_hexsha": "60ae2b05ea8596ba0decf426e37aeaca0bc8b6be", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "grow_bitmap.py", "max_issues_repo_name": "bopopescu/Lauecollect", "max_issues_repo_head_hexsha": "60ae2b05ea8596ba0decf426e37aeaca0bc8b6be", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-10-22T21:28:31.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-22T21:39:12.000Z", "max_forks_repo_path": "grow_bitmap.py", "max_forks_repo_name": "bopopescu/Lauecollect", "max_forks_repo_head_hexsha": "60ae2b05ea8596ba0decf426e37aeaca0bc8b6be", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-06-06T15:06:46.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-20T02:03:22.000Z", "avg_line_length": 31.9253731343, "max_line_length": 77, "alphanum_fraction": 0.5815801777, "include": true, "reason": "from numpy", "num_tokens": 723}
|
# Empirical estimation of CDF and PDF
## Empirical CDF
struct ECDF{T <: AbstractVector{<:Real}}
sorted_values::T
end
function (ecdf::ECDF)(x::Real)
searchsortedlast(ecdf.sorted_values, x) / length(ecdf.sorted_values)
end
function (ecdf::ECDF)(v::RealVector)
ord = sortperm(v)
m = length(v)
r = similar(ecdf.sorted_values, m)
r0 = 0
i = 1
n = length(ecdf.sorted_values)
for x in ecdf.sorted_values
while i <= m && x > v[ord[i]]
r[ord[i]] = r0
i += 1
end
r0 += 1
if i > m
break
end
end
while i <= m
r[ord[i]] = n
i += 1
end
return r / n
end
"""
ecdf(X)
Return an empirical cumulative distribution function (ECDF) based on a vector of samples
given in `X`.
Note: this function that returns a callable composite type, which can then be applied to
evaluate CDF values on other samples.
`extrema`, `minimum`, and `maximum` are supported to for obtaining the range over which
function is inside the interval ``(0,1)``; the function is defined for the whole real line.
"""
ecdf(X::RealVector{T}) where T<:Real = ECDF(sort(X))
minimum(ecdf::ECDF) = first(ecdf.sorted_values)
maximum(ecdf::ECDF) = last(ecdf.sorted_values)
extrema(ecdf::ECDF) = (minimum(ecdf), maximum(ecdf))
|
{"hexsha": "3e9c31ef58142b7bdff1b72693410007a6bc6b06", "size": 1326, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/empirical.jl", "max_stars_repo_name": "jgoldfar/StatsBase.jl", "max_stars_repo_head_hexsha": "f4567cb9f5a8bd00c146eadae781bdf1b467938a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-02-27T00:22:00.000Z", "max_stars_repo_stars_event_max_datetime": "2019-02-27T00:22:00.000Z", "max_issues_repo_path": "src/empirical.jl", "max_issues_repo_name": "jgoldfar/StatsBase.jl", "max_issues_repo_head_hexsha": "f4567cb9f5a8bd00c146eadae781bdf1b467938a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-07-20T17:06:53.000Z", "max_issues_repo_issues_event_max_datetime": "2018-07-24T17:33:55.000Z", "max_forks_repo_path": "src/empirical.jl", "max_forks_repo_name": "jgoldfar/StatsBase.jl", "max_forks_repo_head_hexsha": "f4567cb9f5a8bd00c146eadae781bdf1b467938a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2017-04-29T08:53:23.000Z", "max_forks_repo_forks_event_max_datetime": "2017-04-29T08:53:23.000Z", "avg_line_length": 23.2631578947, "max_line_length": 91, "alphanum_fraction": 0.6304675716, "num_tokens": 385}
|
# rvt.blend quick TEST
import rvt.default
import rvt.vis
import rvt.blend
import numpy as np
# test blend combination archeological (VAT), general
#####
# manual blending, custom raster numpy arrays
# if you create_layer and don't input image or image_path then if vis_method is correct it automatically
# calculates visualization in render_all_images
layers_manual = rvt.blend.BlenderCombination()
input_dem_path = r"test_data\TM1_564_146.tif"
layers_manual.add_dem_path(dem_path=input_dem_path)
output_blend_path = r"test_data\TM1_564_146_test_blend_manual.tif"
dict_arr_res = rvt.default.get_raster_arr(input_dem_path)
input_dem_arr = dict_arr_res["array"]
x_res = dict_arr_res["resolution"][0]
y_res = dict_arr_res["resolution"][1]
# layers;vis_method;norm;min;max;blending_mode;opacity
# 1;svf;value;0.7;1.0;multiply;25
svf_dict = rvt.vis.sky_view_factor(dem=input_dem_arr, resolution=x_res, compute_svf=True, compute_opns=True)
svf_arr = svf_dict["svf"]
layers_manual.create_layer(vis_method="Sky-View Factor", normalization="value", minimum=0.7, maximum=1,
blend_mode="multiply", opacity=25,
image=svf_arr) # you could also input image_path
# 2;opns_pos;value;68;93;overlay;50
opns_arr = svf_dict["opns"]
layers_manual.create_layer(vis_method="Openness - Positive", normalization="value", minimum=68, maximum=93,
blend_mode="overlay",
opacity=50, image=opns_arr)
# 3;slope;value;0;50;luminosity;50
slope_dict = rvt.vis.slope_aspect(dem=input_dem_arr, resolution_x=x_res, resolution_y=y_res, output_units="degree",
ve_factor=1)
slope_arr = slope_dict["slope"]
layers_manual.create_layer(vis_method="Slope gradient", normalization="value", minimum=0, maximum=50,
blend_mode="luminosity",
opacity=50, image=slope_arr)
# # 4;hillshade;value;0;1;normal;100
hillshade_arr = rvt.vis.hillshade(dem=input_dem_arr, resolution_x=x_res, resolution_y=y_res)
layers_manual.create_layer(vis_method="Hillshade", normalization="value", minimum=0, maximum=1, blend_mode="normal",
opacity=100, image=hillshade_arr)
# 5;None
layers_manual.create_layer(vis_method=None)
# you can save to GeoTif if save_render_path presented else it only returns array
render_arr = layers_manual.render_all_images(save_render_path=output_blend_path)
# you can save layers combination to .json file, be aware image and image_path won't be saved
# this is a problem when vis_method is non rvt visualization(is not correct)!
layers_manual.save_to_file(r"settings\blender_custom_layers.json")
#####
#####
# automatic blending, blending from blender_file with values from default.DefaultValues class
# when save_visualizations=False, blending save every needed visualization in GeoTif in dem_path directory
input_dem_path = r"test_data\TM1_564_146.tif"
# Example file (for file_path) in dir settings: blender_file_example.txt
blender_file = r"settings\blender_file_example.json"
output_blend_path = r"test_data\TM1_564_146_test_blend_automatic.tif"
layers_auto = rvt.blend.BlenderCombination()
default = rvt.default.DefaultValues()
default.read_default_from_file(r"settings\default_settings.json")
layers_auto.read_from_file(file_path=blender_file) # build BlenderCombination from file
# when building_blender from file single BlenderLayer image and image_path are None
layers_auto.add_dem_path(input_dem_path) # needed when save_visualizations is True and save_rander_path is not None
# render_all_images reads images simultaneously if layer (BlenderLayer) image is None and image_path is None it
# calculates them
layers_auto.render_all_images(default=default, save_visualizations=True, save_render_path=output_blend_path,
save_float=True, save_8bit=True)
#####
#####
# automatic blending, blending from blender_file with values from default.DefaultValues class
# when save_visualizations=False, blending doesn't save every visualization, it calculates it when needed
layers_auto.add_dem_arr(dem_arr=input_dem_arr, dem_resolution=x_res) # needed when save_visualizations is False
rendered_arr = layers_auto.render_all_images(save_visualizations=False)
#####
|
{"hexsha": "fbe15b1833219f41cabaf91d0387af58dafa54b1", "size": 4291, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_blend.py", "max_stars_repo_name": "H4estu/RVT_py", "max_stars_repo_head_hexsha": "6dc408f495c455c2b5d88f552f22d4496d288fb2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-10-12T06:10:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T15:33:00.000Z", "max_issues_repo_path": "test_blend.py", "max_issues_repo_name": "H4estu/RVT_py", "max_issues_repo_head_hexsha": "6dc408f495c455c2b5d88f552f22d4496d288fb2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-12-14T06:42:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-10T19:25:03.000Z", "max_forks_repo_path": "test_blend.py", "max_forks_repo_name": "H4estu/RVT_py", "max_forks_repo_head_hexsha": "6dc408f495c455c2b5d88f552f22d4496d288fb2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-10T19:00:22.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-10T19:00:22.000Z", "avg_line_length": 51.0833333333, "max_line_length": 116, "alphanum_fraction": 0.7678862736, "include": true, "reason": "import numpy", "num_tokens": 1026}
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 26 18:53:22 2020
@author: Lenovo
"""
"""
Horizontal analysis of Limit Order Book:
Given an initial order book status, the Horizontal Tests will simulate the first arrived order 10000 or even more times,
and verify the distribution of each type of order.
"""
import numpy as np
import pandas as pd
import time
import math
import os
import sys
sys.path.append(os.path.pardir)
# from LOB.LOB import LimitOrderBook
#os.chdir("C:\\Users\\Lenovo\\Desktop")
from OMS.OMS import OrderManagementSystem
from ZIAgent.ZIAgent import ZIAgent
class HorizontalAnalysis:
def __init__(self,SampleSize,OMSTest,ZIAgentTest):
self.SampleSize = SampleSize
self.OMSTest = OMSTest
self.ZIAgentTest = ZIAgentTest
self.TICK_SIZE = ZIAgentTest.TICK_SIZE
self.qtysize = ZIAgentTest.qtysize
self.PRICE_A_START = OMSTest.ask
self.PRICE_B_START = OMSTest.bid
self.MU_Est = ZIAgentTest.MU
self.LAMBDA_Est = ZIAgentTest.LAMBDA
self.THETA_Est = ZIAgentTest.THETA
def OrderCollection(self, SampleSize, OMSTest, ZIAgentTest):
k = 0
OrderIssued = [[]]
ArrivalTime = []
while(k < SampleSize):
ZIAgentTest.Execute(OMSTest) # execute ZIAgent generator
OrderIssued.append(ZIAgentTest.OrderIssued)
ArrivalTime.append(ZIAgentTest.OrderArrivalTime)
k += 1
OrderIssued.pop(0)
return(ArrivalTime, OrderIssued)
def OrderClassify(self, Ordertype,Direction,OrderIssued,ArrivalTime):
if (Ordertype == 'limit' and Direction =='buy'):
OrderRec = []
for i in range(len(ArrivalTime)):
if (OrderIssued[i][1]== 'limit' and OrderIssued[i][2]== 'buy'):
distance = round((self.PRICE_A_START-OrderIssued[i][4])/self.TICK_SIZE)
OrderRec.append([distance, ArrivalTime[i]])
else:
OrderRec = OrderRec
elif (Ordertype == 'limit' and Direction =='sell'):
OrderRec = []
for i in range(len(ArrivalTime)):
if (OrderIssued[i][1]== 'limit' and OrderIssued[i][2]== 'sell'):
distance = round((OrderIssued[i][4]-self.PRICE_B_START)/self.TICK_SIZE)
OrderRec.append([distance, ArrivalTime[i]])
else:
OrderRec = OrderRec
elif (Ordertype == 'cancel' and Direction =='buy'):
OrderRec = []
for i in range(len(ArrivalTime)):
if (OrderIssued[i][1]== 'cancel' and OrderIssued[i][2]== 'buy'):
distance = round((self.PRICE_A_START-OrderIssued[i][4])/self.TICK_SIZE)
OrderRec.append([distance, ArrivalTime[i]])
else:
OrderRec = OrderRec
elif (Ordertype == 'cancel' and Direction =='sell'):
OrderRec = []
for i in range(len(ArrivalTime)):
if (OrderIssued[i][1]== 'cancel' and OrderIssued[i][2]== 'sell'):
distance = round((OrderIssued[i][4]-self.PRICE_B_START)/self.TICK_SIZE)
OrderRec.append([distance, ArrivalTime[i]])
else:
OrderRec = OrderRec
elif (Ordertype == 'market' and Direction =='buy'):
OrderRec = []
for i in range(len(ArrivalTime)):
if (OrderIssued[i][1]== 'market' and OrderIssued[i][2]== 'buy'):
OrderRec.append(ArrivalTime[i])
else:
OrderRec = OrderRec
elif (Ordertype == 'market' and Direction =='sell'):
OrderRec = []
for i in range(len(ArrivalTime)):
if (OrderIssued[i][1]== 'market' and OrderIssued[i][2]== 'sell'):
OrderRec.append(ArrivalTime[i])
else:
OrderRec = OrderRec
return(OrderRec)
def LimitOrderAnalysis(self, Ordertype,Direction,OrderIssued,ArrivalTime):
# Ordertype should be "limit" or "cancel"
OrderRec = self.OrderClassify(Ordertype,Direction,OrderIssued,ArrivalTime)
limitOrderCount = [[], [], [], [], [], []]
# only collect distance from 1 to 6
for i in range(len(OrderRec)):
j = int(round(OrderRec[i][0]))
limitOrderCount[j-1].append(OrderRec[i][1])
OrderNum = pd.DataFrame(columns=('Distance', 'OrderNum'))
OrderTime = pd.DataFrame(columns=('Distance', 'ArrivalTime_Mean', 'ArrivalTime_Std'))
for i in range(len(limitOrderCount)):
OrderNum.loc[i] = [i+1,len(limitOrderCount[i])]
OrderTime.loc[i] = [i+1, np.mean(limitOrderCount[i]),np.std(limitOrderCount[i])]
OrderStat = pd.merge(OrderNum,OrderTime)
return(OrderNum,OrderTime,OrderStat)
def MarketOrderAnalysis(self, Ordertype,Direction,OrderIssued,ArrivalTime):
OrderRec = self.OrderClassify(Ordertype,Direction,OrderIssued,ArrivalTime)
OrderNum = pd.DataFrame(columns=('Distance', 'OrderNum'))
OrderTime = pd.DataFrame(columns=('Distance', 'ArrivalTime_Mean', 'ArrivalTime_Std'))
OrderNum.loc[0] = [0,len(OrderRec)]
OrderTime.loc[0] = [0, np.mean(OrderRec),np.std(OrderRec)]
OrderStat = pd.merge(OrderNum,OrderTime)
return(OrderNum,OrderTime,OrderStat)
def BirthRate(self,distance, Lambda, Mu):
if distance > 5:
return 0
elif distance > 0 and distance <= 5:
return Lambda[distance -1]
elif distance == 0:
return Mu
def DeathRate(self,distance, Theta):
if distance > 5:
return Theta[-1]
elif distance <= 5:
return Theta[distance-1]
def AskOrderStat(self, OMSTest,OrderIssued,ArrivalTime):
AskOrder = pd.DataFrame(columns=('Distance', 'Qty','BirthRate',"DeathRate"))
Distance_Ask = []
birthrate = []
for i in range(len(OMSTest.ask_book)):
dis = int(round((OMSTest.ask_book[i][0]-self.PRICE_B_START)/self.TICK_SIZE))
Distance_Ask.append(dis)
Distance_Ask_Max = max(Distance_Ask)
for i in range(Distance_Ask_Max+1):
birthrate.append(self.BirthRate(i, self.LAMBDA_Est, self.MU_Est))
AskOrder['Distance'] = list(range(Distance_Ask_Max+1))
AskOrder['BirthRate'] = birthrate
for i in range(len(OMSTest.ask_book)):
distance = int(round((OMSTest.ask_book[i][0]-self.PRICE_B_START)/self.TICK_SIZE))
for j in (range(Distance_Ask_Max+1)):
if distance == j:
qty = int(round(OMSTest.ask_book[i][1]/self.qtysize))
deathrate = self.DeathRate(distance,self.THETA_Est)*qty
AskOrder.Qty.iloc[j] = qty
AskOrder.DeathRate.iloc[j] = deathrate
AskOrder = AskOrder.fillna(0)
limitsell = self.LimitOrderAnalysis('limit','sell',OrderIssued,ArrivalTime)[0]
marketsell = self.MarketOrderAnalysis('market','sell',OrderIssued,ArrivalTime)[0]
cancelsell = self.LimitOrderAnalysis('cancel','sell',OrderIssued,ArrivalTime)[0]
cancel0 = pd.DataFrame(columns=('Distance', 'CanceledNum'))
cancel0.loc[0] = [0,0] # cancel number of marker order should be zero
cancelsell.columns = ['Distance', 'CanceledNum']
CanceledOrder = pd.concat([cancel0,cancelsell], axis=0 ,ignore_index=True)
GenOrder = pd.concat([marketsell,limitsell], axis=0 ,ignore_index=True)
GenOrder.columns = ['Distance', 'GeneratedNum']
OrderNum = pd.merge(GenOrder,CanceledOrder)
askorderstat = pd.merge(AskOrder,OrderNum)
return(askorderstat)
def BidOrderStat(self, OMSTest,OrderIssued,ArrivalTime):
BidOrder = pd.DataFrame(columns=('Distance', 'Qty','BirthRate',"DeathRate"))
Distance_Bid = []
birthrate = []
for i in range(len(OMSTest.bid_book)):
dis = int(round((self.PRICE_A_START-OMSTest.bid_book[i][0])/self.TICK_SIZE))
Distance_Bid.append(dis)
Distance_Bid_Max = max(Distance_Bid)
for i in range(Distance_Bid_Max+1):
birthrate.append(self.BirthRate(i, self.LAMBDA_Est, self.MU_Est))
BidOrder['Distance'] = list(range(Distance_Bid_Max+1))
BidOrder['BirthRate'] = birthrate
for i in range(len(OMSTest.bid_book)):
distance = int(round((self.PRICE_A_START-OMSTest.bid_book[i][0])/self.TICK_SIZE))
for j in (range(Distance_Bid_Max+1)):
if distance == j:
qty = int(round(OMSTest.bid_book[i][1]/self.qtysize))
deathrate = self.DeathRate(distance,self.THETA_Est)*qty
BidOrder.Qty.iloc[j] = qty
BidOrder.DeathRate.iloc[j] = deathrate
BidOrder = BidOrder.fillna(0)
limitbuy = self.LimitOrderAnalysis('limit','buy',OrderIssued,ArrivalTime)[0]
marketbuy = self.MarketOrderAnalysis('market','buy',OrderIssued,ArrivalTime)[0]
cancelbuy = self.LimitOrderAnalysis('cancel','buy',OrderIssued,ArrivalTime)[0]
cancel0 = pd.DataFrame(columns=('Distance', 'CanceledNum'))
cancel0.loc[0] = [0,0] # cancel number of marker order should be zero
cancelbuy.columns = ['Distance', 'CanceledNum']
CanceledOrder = pd.concat([cancel0,cancelbuy], axis=0 ,ignore_index=True)
GenOrder = pd.concat([marketbuy,limitbuy], axis=0 ,ignore_index=True)
GenOrder.columns = ['Distance', 'GeneratedNum']
OrderNum = pd.merge(GenOrder,CanceledOrder)
bidorderstat = pd.merge(BidOrder,OrderNum)
return(bidorderstat)
def ResultTest(self, OrderType, Direction, OMSTest,OrderIssued,ArrivalTime):
askorder = self.AskOrderStat(OMSTest,OrderIssued,ArrivalTime)
bidorder = self.BidOrderStat(OMSTest,OrderIssued,ArrivalTime)
TotalRate = round(sum(askorder.BirthRate)+sum(askorder.DeathRate)\
+sum(bidorder.BirthRate)+sum(bidorder.DeathRate),2)
Result = pd.DataFrame(columns=('Distance', 'TheoProb','RealProb', 'TheoStd', 'z_value','P_or_F'))
if (Direction == 'buy' and OrderType == 'birth'):
Result['Distance'] = bidorder['Distance']
Result['RealProb'] = bidorder.GeneratedNum/self.SampleSize #
Result['TheoProb'] = bidorder.BirthRate/TotalRate #
elif (Direction == 'buy' and OrderType == 'death'):
Result['Distance'] = bidorder['Distance']
Result['RealProb'] = bidorder.CanceledNum/self.SampleSize #
Result['TheoProb'] = bidorder.DeathRate/TotalRate #
elif (Direction == 'sell' and OrderType == 'birth'):
Result['Distance'] = askorder['Distance']
Result['RealProb'] = askorder.GeneratedNum/self.SampleSize #
Result['TheoProb'] = askorder.BirthRate/TotalRate #
elif (Direction == 'sell' and OrderType == 'death'):
Result['Distance'] = askorder['Distance']
Result['RealProb'] = askorder.CanceledNum/self.SampleSize #
Result['TheoProb'] = askorder.DeathRate/TotalRate #
Result['TheoStd'] = ((1-Result.TheoProb)*Result.TheoProb)**0.5 #
Result = Result.fillna(0)
for i in range(len(Result['Distance'])):
if Result.TheoProb.loc[i] > 0:
diff = (Result.TheoProb.loc[i]-Result.RealProb.loc[i])
Result.z_value.loc[i] = (diff*math.sqrt(self.SampleSize))/Result.TheoStd.loc[i]
for i in range(len(Result['z_value'])):
if abs(Result.z_value.loc[i]) < 1.96:
Result.P_or_F.loc[i] = 'PASS'
elif abs(Result.z_value.loc[i]) >= 1.96:
Result.P_or_F.loc[i] = 'FAIL'
Result = Result.drop(columns = ['TheoStd'])
return(Result)
if __name__ == "__main__":
MAX_PRICE_LEVELS = 200 # total number of price grids
TICK_SIZE = 0.1 # usually 1 or 0.1 or 0.01
QTYSIZE = 1000
PRICE_START = 9.7
PRICE_A_START = 10.0
PRICE_B_START = 9.8
# parameters according to Cont paper
MU_Est = 0.94
LAMBDA_Est = [1.85,1.51,1.09,0.88,0.77]
THETA_Est = [0.71,0.81,0.68,0.56,0.47]
CurrentTime = 0
OrderCount = 0
ZIOrderBook = [[]]
SampleSize = 1000000
OMSTest = OrderManagementSystem(PRICE_A_START,PRICE_B_START,TICK_SIZE,5,PRICE_START)
ZIAgentTest = ZIAgent(NAME = "ZIagent",
OMSinput = OMSTest,
MAX_PRICE_LEVELS = MAX_PRICE_LEVELS,
TICK_SIZE = TICK_SIZE,
#QTYSIZE = qtysize,
MU = MU_Est,
LAMBDA = LAMBDA_Est,
THETA = THETA_Est,
CurrentTime = CurrentTime,
OrderCount = OrderCount,
ZIOrderBook = ZIOrderBook)
HorizontalTest = HorizontalAnalysis(SampleSize,OMSTest,ZIAgentTest)
starttime=time.perf_counter()
OrderBookSample = HorizontalTest.OrderCollection(SampleSize,OMSTest,ZIAgentTest)
arrival_time = OrderBookSample[0]
order_issued = OrderBookSample[1]
endtime=time.perf_counter()
print("the running cost:",endtime-starttime, " seconds")
### start analysis
print("Ask Book Statistics:")
print(HorizontalTest.AskOrderStat(OMSTest,order_issued,arrival_time))
print("Bid Book Statistics:")
print(HorizontalTest.BidOrderStat(OMSTest,order_issued,arrival_time))
print("Limit&Market Buy Order Test:")
print(HorizontalTest.ResultTest('birth', 'buy', OMSTest, order_issued, arrival_time))
print("Limit&Market Sell Order Test:")
print(HorizontalTest.ResultTest('birth', 'sell', OMSTest, order_issued, arrival_time))
print("Cancel Buy Order Test:")
print(HorizontalTest.ResultTest('death', 'buy', OMSTest, order_issued, arrival_time))
print("Cancel Sell Order Test:")
print(HorizontalTest.ResultTest('death', 'sell', OMSTest, order_issued, arrival_time))
|
{"hexsha": "5063fae5b967016ce06b15efff5da14aff5e821b", "size": 15151, "ext": "py", "lang": "Python", "max_stars_repo_path": "ZIAgent/HorizontalAnalysis.py", "max_stars_repo_name": "HKUST-DB-Capstone2020/Market-Agent-Simulation", "max_stars_repo_head_hexsha": "227514a118d6ebcdc81f9948b1a21af71492ca40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-04-22T16:17:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-02T03:26:50.000Z", "max_issues_repo_path": "ZIAgent/HorizontalAnalysis.py", "max_issues_repo_name": "HKUST-DB-Capstone2020/Market-Agent-Simulation", "max_issues_repo_head_hexsha": "227514a118d6ebcdc81f9948b1a21af71492ca40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-04-10T14:40:44.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-31T09:20:44.000Z", "max_forks_repo_path": "ZIAgent/HorizontalAnalysis.py", "max_forks_repo_name": "TSKC13/Market-Agent-Simulation", "max_forks_repo_head_hexsha": "29c4b6caf92110957d455698c6d18462b6f7dd12", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2020-04-10T14:38:01.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-02T03:26:52.000Z", "avg_line_length": 42.0861111111, "max_line_length": 122, "alphanum_fraction": 0.571381427, "include": true, "reason": "import numpy", "num_tokens": 3722}
|
# Import packages
from datetime import date
from pathlib import Path
import numpy as np
import pandas as pd
# Import data
import_fp = Path("data/main/cornelia-raw.csv")
dataset = pd.read_csv(import_fp, encoding="utf-8", sep=";")
# Fix pseudo-NaN values
dataset.loc[:, "actor_first_name"] = (dataset.loc[:, "actor_first_name"]
.replace(to_replace = ["[NN]",
"anonymous"],
value = np.nan))
# Fix date columns format multiplicity
date_cols = ["date_day", "date_month", "date_year"]
for date_col in date_cols:
notna_mask = dataset.loc[:, date_col].notna()
dataset.loc[notna_mask, date_col] = (dataset
.loc[notna_mask, date_col]
.astype(int) # To drop right zeros
.astype(str)
.str.lstrip("0")) # To drop left zeros
# Fix 'source_entry' format multiplicity
alpha_mask = dataset.loc[:, "source_entry"].str[-1].str.isalpha()
dataset.loc[alpha_mask, "source_entry"] = (dataset
.loc[alpha_mask, "source_entry"]
.str[-1])
dataset.loc[~alpha_mask, "source_entry"] = ""
dataset.loc[:, "source_entry"] = ((dataset["date_day"].astype(str)
+ dataset["date_month"].astype(str)
+ dataset["date_year"].astype(str)
+ dataset["source_entry"])
.str.replace("nannan", "0000"))
# Fix ENG/NL duality
replace_dict = {
"role": {
"apotheker": "pharmacist",
"vergulder": "gilder",
"glasschilder": "glass painter",
"gelaesschryver": "glass painter",
"goudslager": "goldsmith",
"plaatslager": "plate craftsman"
},
"status": {
"leermeester": "tutor",
"meesterszoon": "master's son",
"ouderman": "dean",
"recognitie": "non-sworn in master",
"cortosie": "non-sworn in master"
}
}
dataset.loc[:, ["role", "status"]] = (dataset.loc[:, ["role", "status"]]
.replace(to_replace=replace_dict))
# Enrich 'role' column
# Create an 'actor role' dataframe that holds the role info for all the actors
actor_role = dataset.loc[:, ["actor_id","role"]]
actor_role = (actor_role
.loc[actor_role["role"] != "member", ["actor_id", "role"]]
.groupby("actor_id")
.first()
.reset_index())
# Join with the original dataset
dataset = (dataset
.merge(actor_role, on="actor_id")
.drop(["role_x"], axis=1)
.rename({"role_y": "role"}, axis=1))
# Export data
export_fp = Path("data/cleaned/cornelia-cleaned.csv")
dataset.to_csv(export_fp, encoding="utf-8",
sep=";", index=False)
|
{"hexsha": "1e1fd69073613b6a97b87b75d5e79a40d145cd17", "size": 3015, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/data-cleaning/clean-main.py", "max_stars_repo_name": "ejgenc/data-analysis_cornelia", "max_stars_repo_head_hexsha": "e1c855aec786427ad18a28274895719fab7987ef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-29T07:39:55.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-29T07:39:55.000Z", "max_issues_repo_path": "src/data-cleaning/clean-main.py", "max_issues_repo_name": "ejgenc/data-analysis_cornelia", "max_issues_repo_head_hexsha": "e1c855aec786427ad18a28274895719fab7987ef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-14T12:10:32.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-14T12:10:32.000Z", "max_forks_repo_path": "src/data-cleaning/clean-main.py", "max_forks_repo_name": "ejgenc/cornelia-dataset", "max_forks_repo_head_hexsha": "e1c855aec786427ad18a28274895719fab7987ef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2222222222, "max_line_length": 79, "alphanum_fraction": 0.5187396352, "include": true, "reason": "import numpy", "num_tokens": 688}
|
import sys
import torch
import torch.nn as nn
from torchvision import transforms
sys.path.append("/opt/cocoapi/PythonAPI")
from data_loader_wrapper import DataLoaderWrapper
from model import EncoderCNN, DecoderRNN
import math
## TODO #1: Select appropriate values for the Python variables below.
batch_size = 64
vocab_threshold = 5
vocab_from_file = True
embed_size = 512 # 512
hidden_size = 512 # 512
num_epochs = 1000 # number of training epochs
save_every = 1 # determines frequency of saving model weights
print_every = 100 # determines window for printing average loss
log_file = "training_log.txt" # name of file with saved training loss and perplexity
transform_train = transforms.Compose(
[
transforms.Resize(256), # smaller edge of image resized to 256
transforms.RandomCrop(224), # get 224x224 crop from random location
transforms.RandomHorizontalFlip(), # horizontally flip image with probability=0.5
transforms.ToTensor(), # convert the PIL Image to a tensor
transforms.Normalize(
(0.485, 0.456, 0.406), # normalize image for pre-trained model
(0.229, 0.224, 0.225),
),
]
)
# Build data loader.
data_loader_wrapper = DataLoaderWrapper(
transform=transform_train,
batch_size_for_training=batch_size,
vocab_threshold=vocab_threshold,
vocab_from_file=vocab_from_file,
num_workers=4,
)
# The size of the vocabulary.
vocab_size = len(data_loader_wrapper.dataset_for_training.vocab)
# Initialize the encoder and decoder.
encoder = EncoderCNN(embed_size)
decoder = DecoderRNN(embed_size, hidden_size, vocab_size)
# Move models to GPU if CUDA is available.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu") # overwrite it with cpu
encoder.to(device)
decoder.to(device)
# Define the loss function.
criterion = (
nn.CrossEntropyLoss().cuda() if torch.cuda.is_available() else nn.CrossEntropyLoss()
)
criterion = nn.CrossEntropyLoss() # overwrite it with cpu
# TODO #3: Specify the learnable parameters of the model.
params = list(encoder.embed.parameters()) + list(decoder.parameters())
# TODO #4: Define the optimizer.
optimizer = torch.optim.Adam(params, lr=0.001)
# Set the total number of training steps per epoch.
total_step = math.ceil(
len(data_loader_wrapper.dataset_for_training.caption_lengths)
/ data_loader_wrapper.dataset_for_training.batch_size
)
# <a id='step2'></a>
# ## Step 2: Train your Model
#
# Once you have executed the code cell in **Step 1**, the training procedure below should run without issue.
#
# It is completely fine to leave the code cell below as-is without modifications to train your model. However, if you would like to modify the code used to train the model below, you must ensure that your changes are easily parsed by your reviewer. In other words, make sure to provide appropriate comments to describe how your code works!
#
# You may find it useful to load saved weights to resume training. In that case, note the names of the files containing the encoder and decoder weights that you'd like to load (`encoder_file` and `decoder_file`). Then you can load the weights by using the lines below:
#
# ```python
# # Load pre-trained weights before resuming training.
# encoder.load_state_dict(torch.load(os.path.join('../models', encoder_file)))
# decoder.load_state_dict(torch.load(os.path.join('../models', decoder_file)))
# ```
#
# While trying out parameters, make sure to take extensive notes and record the settings that you used in your various training runs. In particular, you don't want to encounter a situation where you've trained a model for several hours but can't remember what settings you used :).
#
# ### A Note on Tuning Hyperparameters
#
# To figure out how well your model is doing, you can look at how the training loss and perplexity evolve during training - and for the purposes of this project, you are encouraged to amend the hyperparameters based on this information.
#
# However, this will not tell you if your model is overfitting to the training data, and, unfortunately, overfitting is a problem that is commonly encountered when training image captioning models.
#
# For this project, you need not worry about overfitting. **This project does not have strict requirements regarding the performance of your model**, and you just need to demonstrate that your model has learned **_something_** when you generate captions on the test data. For now, we strongly encourage you to train your model for the suggested 3 epochs without worrying about performance; then, you should immediately transition to the next notebook in the sequence (**3_Inference.ipynb**) to see how your model performs on the test data. If your model needs to be changed, you can come back to this notebook, amend hyperparameters (if necessary), and re-train the model.
#
# That said, if you would like to go above and beyond in this project, you can read about some approaches to minimizing overfitting in section 4.3.1 of [this paper](http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=7505636). In the next (optional) step of this notebook, we provide some guidance for assessing the performance on the validation dataset.
# In[ ]:
import numpy as np
import os
# Open the training log file.
f = open(log_file, "w")
for epoch in range(1, num_epochs + 1):
for i_step in range(1, total_step + 1):
# Obtain the batch.
images, captions = next(
iter(data_loader_wrapper.get_data_loader_for_training())
)
# Move batch of images and captions to GPU if CUDA is available.
images = images.to(device)
captions = captions.to(device)
# Zero the gradients.
decoder.zero_grad()
encoder.zero_grad()
# Pass the inputs through the CNN-RNN model.
features = encoder(images)
outputs = decoder(features, captions)
# Calculate the batch loss.
loss = criterion(outputs.view(-1, vocab_size), captions.view(-1))
# Backward pass.
loss.backward()
# Update the parameters in the optimizer.
optimizer.step()
# Get training statistics.
stats = (
f"Epoch [{epoch}/{num_epochs}], "
f"Step [{i_step}/{total_step}], "
f"Loss: {loss.item():.4f}, "
f"Perplexity: {np.exp(loss.item()):5.4f}"
)
# Print training statistics (on same line).
print("\r" + stats, end="")
sys.stdout.flush()
# Print training statistics to file.
f.write(stats + "\n")
f.flush()
# Print training statistics (on different line).
if i_step % print_every == 0:
print("\r" + stats)
# Close the training log file.
f.close()
# <a id='step3'></a>
# ## Step 3: (Optional) Validate your Model
#
# To assess potential overfitting, one approach is to assess performance on a validation set. If you decide to do this **optional** task, you are required to first complete all of the steps in the next notebook in the sequence (**3_Inference.ipynb**); as part of that notebook, you will write and test code (specifically, the `sample` method in the `DecoderRNN` class) that uses your RNN decoder to generate captions. That code will prove incredibly useful here.
#
# If you decide to validate your model, please do not edit the data loader in **data_loader.py**. Instead, create a new file named **data_loader_val.py** containing the code for obtaining the data loader for the validation data. You can access:
# - the validation images at filepath `'/opt/cocoapi/images/train2014/'`, and
# - the validation image caption annotation file at filepath `'/opt/cocoapi/annotations/captions_val2014.json'`.
#
# The suggested approach to validating your model involves creating a json file such as [this one](https://github.com/cocodataset/cocoapi/blob/master/results/captions_val2014_fakecap_results.json) containing your model's predicted captions for the validation images. Then, you can write your own script or use one that you [find online](https://github.com/tylin/coco-caption) to calculate the BLEU score of your model. You can read more about the BLEU score, along with other evaluation metrics (such as TEOR and Cider) in section 4.1 of [this paper](https://arxiv.org/pdf/1411.4555.pdf). For more information about how to use the annotation file, check out the [website](http://cocodataset.org/#download) for the COCO dataset.
# In[ ]:
# (Optional) TODO: Validate your model.
|
{"hexsha": "3e9eea5af9fa34bdc1976e261e58a39ee6266894", "size": 8584, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/2_Training.py", "max_stars_repo_name": "hogansung/udacity-computer-vision-nanodegree-program-project-2", "max_stars_repo_head_hexsha": "3c9cfb42532f5149003017acb9c950c0d2d7499b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/2_Training.py", "max_issues_repo_name": "hogansung/udacity-computer-vision-nanodegree-program-project-2", "max_issues_repo_head_hexsha": "3c9cfb42532f5149003017acb9c950c0d2d7499b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/2_Training.py", "max_forks_repo_name": "hogansung/udacity-computer-vision-nanodegree-program-project-2", "max_forks_repo_head_hexsha": "3c9cfb42532f5149003017acb9c950c0d2d7499b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.652173913, "max_line_length": 730, "alphanum_fraction": 0.726584343, "include": true, "reason": "import numpy", "num_tokens": 1962}
|
"""
$(SIGNATURES)
Update flow variables with finite volume formulation
"""
function step!(fwL::X, w::X, prim::AV{X}, fwR::X, a, dx, RES, AVG) where {X<:FN} # scalar
#--- store W^n and calculate H^n,\tau^n ---#
w_old = deepcopy(w)
#--- update W^{n+1} ---#
w += (fwL - fwR) / dx
prim .= conserve_prim(w, a)
#--- record residuals ---#
RES += (w - w_old)^2
AVG += abs(w)
return w, RES, AVG
end
"""
$(SIGNATURES)
"""
function step!(
fwL::X,
w::Y,
prim::Y,
fwR::X,
γ,
dx,
RES,
AVG,
) where {X<:AA{<:FN,1},Y<:AA{<:FN,1}} # 1D0F
#--- store W^n and calculate H^n,\tau^n ---#
w_old = deepcopy(w)
#--- update W^{n+1} ---#
@. w += (fwL - fwR) / dx
prim .= conserve_prim(w, γ)
#--- record residuals ---#
@. RES += (w - w_old)^2
@. AVG += abs(w)
return nothing
end
"""
$(SIGNATURES)
Mixture
"""
function step!(
fwL::T1,
w::T2,
prim::T2,
fwR::T1,
γ,
mi,
ni,
me,
ne,
Kn,
dx,
dt,
RES,
AVG,
) where {T1<:AA{<:FN,2},T2<:AA{<:FN,2}}
#--- update conservative flow variables ---#
# w^n
w_old = deepcopy(w)
prim_old = deepcopy(prim)
# flux -> w^{n+1}
@. w += (fwL - fwR) / dx
prim .= mixture_conserve_prim(w, γ)
# temperature protection
if prim[end, 1] < 0
@warn "negative temperature update of component 1"
w .= w_old
prim .= prim_old
elseif prim[end, 2] < 0
@warn "negative temperature update of component 2"
w .= w_old
prim .= prim_old
end
# source -> w^{n+1}
# explicit
tau = aap_hs_collision_time(prim, mi, ni, me, ne, Kn)
mprim = aap_hs_prim(prim, tau, mi, ni, me, ne, Kn)
mw = mixture_prim_conserve(mprim, γ)
for k in axes(w, 2)
@. w[:, k] += (mw[:, k] - w_old[:, k]) * dt / tau[k]
end
prim .= mixture_conserve_prim(w, γ)
#--- record residuals ---#
@. RES += (w_old - w)^2
@. AVG += abs(w)
end
"""
$(SIGNATURES)
1D1F1V
"""
function step!(
fwL::T1,
ffL::T2,
w::T3,
prim::T3,
f::T4,
fwR::T1,
ffR::T2,
u::T5,
weights::T5,
γ,
μᵣ,
ω,
Pr,
dx,
dt,
RES,
AVG,
collision = :bgk::Symbol,
) where {T1<:AA{<:FN,1},T2<:AA{<:FN,1},T3<:AA{<:FN,1},T4<:AA{<:FN,1},T5<:AA{<:FN,1}}
#--- store W^n and calculate H^n,\tau^n ---#
w_old = deepcopy(w)
if collision == :shakhov
q = heat_flux(f, prim, u, weights)
M_old = maxwellian(u, prim)
S = shakhov(u, M_old, q, prim, Pr)
else
S = zeros(axes(f))
end
#--- update W^{n+1} ---#
@. w += (fwL - fwR) / dx
prim .= conserve_prim(w, γ)
#--- record residuals ---#
@. RES += (w - w_old)^2
@. AVG += abs(w)
#--- calculate M^{n+1} and tau^{n+1} ---#
M = maxwellian(u, prim)
M .+= S
τ = vhs_collision_time(prim, μᵣ, ω)
#--- update distribution function ---#
for i in eachindex(u)
f[i] = (f[i] + (ffL[i] - ffR[i]) / dx + dt / τ * M[i]) / (1.0 + dt / τ)
end
end
"""
$(SIGNATURES)
1D1F3V
"""
function step!(
fwL::T1,
ffL::T2,
w::T3,
prim::T3,
f::T4,
fwR::T1,
ffR::T2,
uVelo::T5,
vVelo::T5,
wVelo::T5, # avoid conflict with w
weights::T5,
γ,
μᵣ,
ω,
Pr,
dx,
dt,
RES,
AVG,
collision = :bgk::Symbol,
) where {T1<:AA{<:FN,1},T2<:AA{<:FN,3},T3<:AA{<:FN,1},T4<:AA{<:FN,3},T5<:AA{<:FN,3}}
#--- store W^n and calculate shakhov term ---#
w_old = deepcopy(w)
if collision == :shakhov
q = heat_flux(f, prim, uVelo, vVelo, wVelo, weights)
M_old = maxwellian(uVelo, vVelo, wVelo, prim)
S = shakhov(uVelo, vVelo, wVelo, M_old, q, prim, Pr, K)
else
S = zeros(axes(f))
end
#--- update W^{n+1} ---#
@. w += (fwL - fwR) / dx
prim .= conserve_prim(w, γ)
#--- record residuals ---#
@. RES += (w - w_old)^2
@. AVG += abs(w)
#--- calculate M^{n+1} and tau^{n+1} ---#
M = maxwellian(uVelo, vVelo, wVelo, prim)
M .+= S
τ = vhs_collision_time(prim, μᵣ, ω)
#--- update distribution function ---#
for k in axes(wVelo, 3), j in axes(vVelo, 2), i in axes(uVelo, 1)
f[i, j, k] =
(f[i, j, k] + (ffL[i, j, k] - ffR[i, j, k]) / dx + dt / τ * M[i, j, k]) /
(1.0 + dt / τ)
end
end
"""
$(SIGNATURES)
1D1F3V @ FSM
"""
function step!(
fwL::T1,
ffL::T2,
w::T3,
prim::T3,
f::T4,
fwR::T1,
ffR::T2,
γ,
Kn_bz,
nm,
phi,
psi,
phipsi,
dx,
dt,
RES,
AVG,
collision = :fsm::Symbol,
) where {T1<:AA{<:FN,1},T2<:AA{<:FN,3},T3<:AA{<:FN,1},T4<:AA{<:FN,3}}
@assert collision == :fsm
w_old = deepcopy(w)
@. w += (fwL - fwR) / dx
prim .= conserve_prim(w, γ)
@. RES += (w - w_old)^2
@. AVG += abs(w)
Q = zero(f[:, :, :])
boltzmann_fft!(Q, f, Kn_bz, nm, phi, psi, phipsi)
for k in axes(f, 3), j in axes(f, 2), i in axes(f, 1)
f[i, j, k] += (ffL[i, j, k] - ffR[i, j, k]) / dx + dt * Q[i, j, k]
end
end
"""
$(SIGNATURES)
1D2F1V
"""
function step!(
fwL::T1,
fhL::T2,
fbL::T2,
w::T3,
prim::T3,
h::T4,
b::T4,
fwR::T1,
fhR::T2,
fbR::T2,
u::T5,
weights::T5,
K,
γ,
μᵣ,
ω,
Pr,
dx,
dt,
RES,
AVG,
collision = :bgk::Symbol,
) where {T1<:AA{<:FN,1},T2<:AA{<:FN,1},T3<:AA{<:FN,1},T4<:AA{<:FN,1},T5<:AA{<:FN,1}}
#--- store W^n and calculate shakhov term ---#
w_old = deepcopy(w)
if collision == :shakhov
q = heat_flux(h, b, prim, u, weights)
MH_old = maxwellian(u, prim)
MB_old = MH_old .* K ./ (2.0 * prim[end])
SH, SB = shakhov(u, MH_old, MB_old, q, prim, Pr, K)
else
SH = zeros(axes(h))
SB = zeros(axes(b))
end
#--- update W^{n+1} ---#
@. w += (fwL - fwR) / dx
prim .= conserve_prim(w, γ)
#--- record residuals ---#
@. RES += (w - w_old)^2
@. AVG += abs(w)
#--- calculate M^{n+1} and tau^{n+1} ---#
MH = maxwellian(u, prim)
MB = MH .* K ./ (2.0 * prim[end])
MH .+= SH
MB .+= SB
τ = vhs_collision_time(prim, μᵣ, ω)
#--- update distribution function ---#
for i in eachindex(u)
h[i] = (h[i] + (fhL[i] - fhR[i]) / dx + dt / τ * MH[i]) / (1.0 + dt / τ)
b[i] = (b[i] + (fbL[i] - fbR[i]) / dx + dt / τ * MB[i]) / (1.0 + dt / τ)
end
end
"""
$(SIGNATURES)
1D2F1V @ Mixture
"""
function step!(
fwL::T1,
fhL::T2,
fbL::T2,
w::T3,
prim::T3,
h::T4,
b::T4,
fwR::T1,
fhR::T2,
fbR::T2,
u::T5,
weights::T5,
inK,
γ,
mi,
ni,
me,
ne,
Kn,
Pr,
dx,
dt,
RES,
AVG,
collision = :bgk::Symbol,
) where {T1<:AA{<:FN,2},T2<:AA{<:FN,2},T3<:AA{<:FN,2},T4<:AA{<:FN,2},T5<:AA{<:FN,2}}
#--- update conservative flow variables ---#
# w^n
w_old = deepcopy(w)
prim_old = deepcopy(prim)
# flux -> w^{n+1}
@. w += (fwL - fwR) / dx
prim .= mixture_conserve_prim(w, γ)
# temperature protection
if prim[end, 1] < 0
@warn "negative temperature update of component 1"
w .= w_old
prim .= prim_old
elseif prim[end, 2] < 0
@warn "negative temperature update of component 2"
w .= w_old
prim .= prim_old
end
# source -> w^{n+1}
#=
# DifferentialEquations.jl
tau = get_tau(prim, mi, ni, me, ne, Kn)
for j in axes(w, 2)
prob = ODEProblem(aap_hs_diffeq!,
vcat(w[1:end,j,1], w[1:end,j,2]),
dt,
(tau[1], tau[2], mi, ni, me, ne, Kn, γ)
)
sol = solve(prob, Rosenbrock23())
w[:,j,1] .= sol[end][1:end÷2]
w[:,j,2] .= sol[end][end÷2+1:end]
end
prim .= mixture_conserve_prim(w, γ)
=#
# explicit
tau = aap_hs_collision_time(prim, mi, ni, me, ne, Kn)
mprim = aap_hs_prim(prim, tau, mi, ni, me, ne, Kn)
mw = mixture_prim_conserve(mprim, γ)
for k in axes(w, 2)
@. w[:, k] += (mw[:, k] - w_old[:, k]) * dt / tau[k]
end
prim .= mixture_conserve_prim(w, γ)
#--- update particle distribution function ---#
# flux -> f^{n+1}
@. h += (fhL - fhR) / dx
@. b += (fbL - fbR) / dx
# source -> f^{n+1}
tau = aap_hs_collision_time(prim, mi, ni, me, ne, Kn)
# interspecies interaction
#mprim = deepcopy(prim)
mprim = aap_hs_prim(prim, tau, mi, ni, me, ne, Kn)
H = mixture_maxwellian(u, mprim)
B = similar(H)
for j in axes(B, 2)
B[:, j] = H[:, j] * inK / (2.0 * mprim[end, j])
end
# BGK term
for k in axes(h, 2)
@. h[:, k] = (h[:, k] + dt / tau[k] * H[:, k]) / (1.0 + dt / tau[k])
@. b[:, k] = (b[:, k] + dt / tau[k] * B[:, k]) / (1.0 + dt / tau[k])
end
#--- record residuals ---#
@. RES += (w_old - w)^2
@. AVG += abs(w)
end
"""
$(SIGNATURES)
1D3F1V @ Rykov
"""
function step!(
fwL::T1,
fhL::T2,
fbL::T2,
frL::T2,
w::T3,
prim::T3,
h::T4,
b::T4,
r::T4,
fwR::T1,
fhR::T2,
fbR::T2,
frR::T2,
u::T5,
weights::T5,
K,
Kr,
μᵣ,
ω,
Pr,
T₀,
Z₀,
σ,
ω0,
ω1,
dx,
dt,
RES,
AVG,
collision = :rykov::Symbol,
) where {T1<:AA{<:FN,1},T2<:AA{<:FN,1},T3<:AA{<:Real,1},T4<:AA{<:FN,1},T5<:AA{<:FN,1}}
#--- store W^n and calculate shakhov term ---#
w_old = deepcopy(w)
if collision == :rykov
q = heat_flux(h, b, r, prim, u, weights)
else
q = zeros(2)
end
#--- update W^{n+1} ---#
@. w += (fwL - fwR) / dx
MHT = similar(h)
MBT = similar(b)
MRT = similar(r)
MHR = similar(h)
MBR = similar(b)
MRR = similar(r)
maxwellian!(MHT, MBT, MRT, MHR, MBR, MRR, u, prim, K, Kr)
τ_old = vhs_collision_time(prim[1:end-1], μᵣ, ω)
Zr = rykov_zr(1.0 / prim[4], T₀, Z₀)
Er0_old = 0.5 * sum(@. weights * ((1.0 / Zr) * MRR + (1.0 - 1.0 / Zr) * MRT))
w[4] += dt * (Er0_old - w_old[4]) / τ_old
prim .= conserve_prim(w, K, Kr)
#--- record residuals ---#
@. RES += (w - w_old)^2
@. AVG += abs(w)
#--- calculate M^{n+1} and tau^{n+1} ---#
maxwellian!(MHT, MBT, MRT, MHR, MBR, MRR, u, prim, K, Kr)
SHT = similar(h)
SBT = similar(b)
SRT = similar(r)
SHR = similar(h)
SBR = similar(b)
SRR = similar(r)
rykov!(
SHT,
SBT,
SRT,
SHR,
SBR,
SRR,
u,
MHT,
MBT,
MRT,
MHR,
MBR,
MRR,
q,
prim,
Pr,
K,
σ,
ω0,
ω1,
)
MH = (1.0 - 1.0 / Zr) * (MHT + SHT) + 1.0 / Zr * (MHR + SHR)
MB = (1.0 - 1.0 / Zr) * (MBT + SBT) + 1.0 / Zr * (MBR + SBR)
MR = (1.0 - 1.0 / Zr) * (MRT + SRT) + 1.0 / Zr * (MRR + SRR)
τ = vhs_collision_time(prim[1:end-1], μᵣ, ω)
#--- update distribution function ---#
for i in eachindex(u)
h[i] = (h[i] + (fhL[i] - fhR[i]) / dx + dt / τ * MH[i]) / (1.0 + dt / τ)
b[i] = (b[i] + (fbL[i] - fbR[i]) / dx + dt / τ * MB[i]) / (1.0 + dt / τ)
r[i] = (r[i] + (frL[i] - frR[i]) / dx + dt / τ * MR[i]) / (1.0 + dt / τ)
end
end
"""
$(SIGNATURES)
1D4F1V
"""
function step!(
KS::T,
faceL::Interface1D4F,
cell::ControlVolume1D4F,
faceR::Interface1D4F,
dx,
dt,
RES,
AVG,
collision = :bgk::Symbol,
isMHD = true::Bool,
) where {T<:AbstractSolverSet}
#--- update conservative flow variables: step 1 ---#
# w^n
w_old = deepcopy(cell.w)
prim_old = deepcopy(cell.prim)
# flux -> w^{n+1}
@. cell.w += (faceL.fw - faceR.fw) / dx
cell.prim .= mixture_conserve_prim(cell.w, KS.gas.γ)
# temperature protection
if cell.prim[5, 1] < 0
@warn ("ion temperature update is negative")
cell.w .= w_old
cell.prim .= prim_old
elseif cell.prim[5, 2] < 0
@warn ("electron temperature update is negative")
cell.w .= w_old
cell.prim .= prim_old
end
# source -> w^{n+1}
if isMHD == false
#=
# DifferentialEquations.jl
tau = get_tau(cell.prim, KS.gas.mi, KS.gas.ni, KS.gas.me, KS.gas.ne, KS.gas.Kn[1])
for j in axes(wRan, 2)
prob = ODEProblem( mixture_source,
vcat(cell.w[1:5,j,1], cell.w[1:5,j,2]),
dt,
(tau[1], tau[2], KS.gas.mi, KS.gas.ni, KS.gas.me, KS.gas.ne, KS.gas.Kn[1], KS.gas.γ) )
sol = solve(prob, Rosenbrock23())
cell.w[1:5,j,1] .= sol[end][1:5]
cell.w[1:5,j,2] .= sol[end][6:10]
for k=1:2
cell.prim[:,j,k] .= conserve_prim(cell.w[:,j,k], KS.gas.γ)
end
end
=#
# explicit
tau = aap_hs_collision_time(
cell.prim,
KS.gas.mi,
KS.gas.ni,
KS.gas.me,
KS.gas.ne,
KS.gas.Kn[1],
)
mprim = aap_hs_prim(
cell.prim,
tau,
KS.gas.mi,
KS.gas.ni,
KS.gas.me,
KS.gas.ne,
KS.gas.Kn[1],
)
mw = mixture_prim_conserve(mprim, KS.gas.γ)
for k = 1:2
@. cell.w[:, k] += (mw[:, k] - w_old[:, k]) * dt / tau[k]
end
cell.prim .= mixture_conserve_prim(cell.w, KS.gas.γ)
end
#--- update electromagnetic variables ---#
# flux -> E^{n+1} & B^{n+1}
cell.E[1] -= dt * (faceL.femR[1] + faceR.femL[1]) / dx
cell.E[2] -= dt * (faceL.femR[2] + faceR.femL[2]) / dx
cell.E[3] -= dt * (faceL.femR[3] + faceR.femL[3]) / dx
cell.B[1] -= dt * (faceL.femR[4] + faceR.femL[4]) / dx
cell.B[2] -= dt * (faceL.femR[5] + faceR.femL[5]) / dx
cell.B[3] -= dt * (faceL.femR[6] + faceR.femL[6]) / dx
cell.ϕ -= dt * (faceL.femR[7] + faceR.femL[7]) / dx
cell.ψ -= dt * (faceL.femR[8] + faceR.femL[8]) / dx
for i = 1:3
if 1 ∈ vcat(isnan.(cell.E), isnan.(cell.B))
@warn "NaN electromagnetic update"
end
end
# source -> ϕ
#@. cell.ϕ += dt * (cell.w[1,:,1] / KS.gas.mi - cell.w[1,:,2] / KS.gas.me) / (KS.gas.lD^2 * KS.gas.rL)
# source -> U^{n+1}, E^{n+1} and B^{n+1}
mr = KS.gas.mi / KS.gas.me
A, b = em_coefficients(cell.prim, cell.E, cell.B, mr, KS.gas.lD, KS.gas.rL, dt)
x = A \ b
#--- calculate lorenz force ---#
cell.lorenz[1, 1] =
0.5 * (
x[1] + cell.E[1] + (cell.prim[3, 1] + x[5]) * cell.B[3] -
(cell.prim[4, 1] + x[6]) * cell.B[2]
) / KS.gas.rL
cell.lorenz[2, 1] =
0.5 * (
x[2] + cell.E[2] + (cell.prim[4, 1] + x[6]) * cell.B[1] -
(cell.prim[2, 1] + x[4]) * cell.B[3]
) / KS.gas.rL
cell.lorenz[3, 1] =
0.5 * (
x[3] + cell.E[3] + (cell.prim[2, 1] + x[4]) * cell.B[2] -
(cell.prim[3, 1] + x[5]) * cell.B[1]
) / KS.gas.rL
cell.lorenz[1, 2] =
-0.5 *
(
x[1] + cell.E[1] + (cell.prim[3, 2] + x[8]) * cell.B[3] -
(cell.prim[4, 2] + x[9]) * cell.B[2]
) *
mr / KS.gas.rL
cell.lorenz[2, 2] =
-0.5 *
(
x[2] + cell.E[2] + (cell.prim[4, 2] + x[9]) * cell.B[1] -
(cell.prim[2, 2] + x[7]) * cell.B[3]
) *
mr / KS.gas.rL
cell.lorenz[3, 2] =
-0.5 *
(
x[3] + cell.E[3] + (cell.prim[2, 2] + x[7]) * cell.B[2] -
(cell.prim[3, 2] + x[8]) * cell.B[1]
) *
mr / KS.gas.rL
cell.E[1] = x[1]
cell.E[2] = x[2]
cell.E[3] = x[3]
#--- update conservative flow variables: step 2 ---#
cell.prim[2, 1] = x[4]
cell.prim[3, 1] = x[5]
cell.prim[4, 1] = x[6]
cell.prim[2, 2] = x[7]
cell.prim[3, 2] = x[8]
cell.prim[4, 2] = x[9]
cell.w .= mixture_prim_conserve(cell.prim, KS.gas.γ)
#--- update particle distribution function ---#
# flux -> f^{n+1}
@. cell.h0 += (faceL.fh0 - faceR.fh0) / dx
@. cell.h1 += (faceL.fh1 - faceR.fh1) / dx
@. cell.h2 += (faceL.fh2 - faceR.fh2) / dx
@. cell.h3 += (faceL.fh3 - faceR.fh3) / dx
# force -> f^{n+1} : step 1
for j in axes(cell.h0, 2)
_h0 = @view cell.h0[:, j]
_h1 = @view cell.h1[:, j]
_h2 = @view cell.h2[:, j]
_h3 = @view cell.h3[:, j]
shift_pdf!(_h0, cell.lorenz[1, j], KS.vSpace.du[1, j], dt)
shift_pdf!(_h1, cell.lorenz[1, j], KS.vSpace.du[1, j], dt)
shift_pdf!(_h2, cell.lorenz[1, j], KS.vSpace.du[1, j], dt)
shift_pdf!(_h3, cell.lorenz[1, j], KS.vSpace.du[1, j], dt)
end
# force -> f^{n+1} : step 2
for k in axes(cell.h1, 3)
@. cell.h3[:, k] +=
2.0 * dt * cell.lorenz[2, k] * cell.h1[:, k] +
(dt * cell.lorenz[2, k])^2 * cell.h0[:, k] +
2.0 * dt * cell.lorenz[3, k] * cell.h2[:, k] +
(dt * cell.lorenz[3, k])^2 * cell.h0[:, k]
@. cell.h2[:, k] += dt * cell.lorenz[3, k] * cell.h0[:, k]
@. cell.h1[:, k] += dt * cell.lorenz[2, k] * cell.h0[:, k]
end
# source -> f^{n+1}
tau = aap_hs_collision_time(
cell.prim,
KS.gas.mi,
KS.gas.ni,
KS.gas.me,
KS.gas.ne,
KS.gas.Kn[1],
)
# interspecies interaction
if isMHD == true
prim = deepcopy(cell.prim)
else
prim = aap_hs_prim(
cell.prim,
tau,
KS.gas.mi,
KS.gas.ni,
KS.gas.me,
KS.gas.ne,
KS.gas.Kn[1],
)
end
g = mixture_maxwellian(KS.vSpace.u, prim)
# BGK term
Mu, Mv, Mw, MuL, MuR = mixture_gauss_moments(prim, KS.gas.K)
for k in axes(cell.h0, 2)
@. cell.h0[:, k] = (cell.h0[:, k] + dt / tau[k] * g[:, k]) / (1.0 + dt / tau[k])
@. cell.h1[:, k] =
(cell.h1[:, k] + dt / tau[k] * Mv[1, k] * g[:, k]) / (1.0 + dt / tau[k])
@. cell.h2[:, k] =
(cell.h2[:, k] + dt / tau[k] * Mw[1, k] * g[:, k]) / (1.0 + dt / tau[k])
@. cell.h3[:, k] =
(cell.h3[:, k] + dt / tau[k] * (Mv[2, k] + Mw[2, k]) * g[:, k]) /
(1.0 + dt / tau[k])
end
#--- record residuals ---#
@. RES += (w_old - cell.w)^2
@. AVG += abs(cell.w)
end
"""
$(SIGNATURES)
1D3F2V
"""
function step!(
KS::T,
faceL::Interface1D3F,
cell::ControlVolume1D3F,
faceR::Interface1D3F,
dx,
dt,
RES,
AVG,
collision = :bgk::Symbol,
isMHD = true::Bool,
) where {T<:AbstractSolverSet}
#--- update conservative flow variables: step 1 ---#
# w^n
w_old = deepcopy(cell.w)
prim_old = deepcopy(cell.prim)
# flux -> w^{n+1}
@. cell.w += (faceL.fw - faceR.fw) / dx
cell.prim .= mixture_conserve_prim(cell.w, KS.gas.γ)
# temperature protection
if cell.prim[end, 1] < 0
@warn ("ion temperature update is negative")
cell.w .= w_old
cell.prim .= prim_old
elseif cell.prim[end, 2] < 0
@warn ("electron temperature update is negative")
cell.w .= w_old
cell.prim .= prim_old
end
# source -> w^{n+1}
if isMHD == false
#=
# DifferentialEquations.jl
tau = get_tau(cell.prim, KS.gas.mi, KS.gas.ni, KS.gas.me, KS.gas.ne, KS.gas.Kn[1])
for j in axes(wRan, 2)
prob = ODEProblem( mixture_source,
vcat(cell.w[1:5,j,1], cell.w[1:5,j,2]),
dt,
(tau[1], tau[2], KS.gas.mi, KS.gas.ni, KS.gas.me, KS.gas.ne, KS.gas.Kn[1], KS.gas.γ) )
sol = solve(prob, Rosenbrock23())
cell.w[1:5,j,1] .= sol[end][1:5]
cell.w[1:5,j,2] .= sol[end][6:10]
for k=1:2
cell.prim[:,j,k] .= conserve_prim(cell.w[:,j,k], KS.gas.γ)
end
end
=#
# explicit
tau = aap_hs_collision_time(
cell.prim,
KS.gas.mi,
KS.gas.ni,
KS.gas.me,
KS.gas.ne,
KS.gas.Kn[1],
)
mprim = aap_hs_prim(
cell.prim,
tau,
KS.gas.mi,
KS.gas.ni,
KS.gas.me,
KS.gas.ne,
KS.gas.Kn[1],
)
mw = mixture_prim_conserve(mprim, KS.gas.γ)
for k in axes(cell.w, 2)
@. cell.w[:, k] += (mw[:, k] - w_old[:, k]) * dt / tau[k]
end
cell.prim .= mixture_conserve_prim(cell.w, KS.gas.γ)
end
#--- update electromagnetic variables ---#
# flux -> E^{n+1} & B^{n+1}
cell.E[1] -= dt * (faceL.femR[1] + faceR.femL[1]) / dx
cell.E[2] -= dt * (faceL.femR[2] + faceR.femL[2]) / dx
cell.E[3] -= dt * (faceL.femR[3] + faceR.femL[3]) / dx
cell.B[1] -= dt * (faceL.femR[4] + faceR.femL[4]) / dx
cell.B[2] -= dt * (faceL.femR[5] + faceR.femL[5]) / dx
cell.B[3] -= dt * (faceL.femR[6] + faceR.femL[6]) / dx
cell.ϕ -= dt * (faceL.femR[7] + faceR.femL[7]) / dx
cell.ψ -= dt * (faceL.femR[8] + faceR.femL[8]) / dx
for i = 1:3
if 1 ∈ vcat(isnan.(cell.E), isnan.(cell.B))
@warn "electromagnetic update is NaN"
end
end
# source -> ϕ
#@. cell.ϕ += dt * (cell.w[1,:,1] / KS.gas.mi - cell.w[1,:,2] / KS.gas.me) / (KS.gas.lD^2 * KS.gas.rL)
# source -> U^{n+1}, E^{n+1} and B^{n+1}
mr = KS.gas.mi / KS.gas.me
A, b = em_coefficients(cell.prim, cell.E, cell.B, mr, KS.gas.lD, KS.gas.rL, dt)
x = A \ b
#--- calculate lorenz force ---#
cell.lorenz[1, 1] =
0.5 * (
x[1] + cell.E[1] + (cell.prim[3, 1] + x[5]) * cell.B[3] -
(cell.prim[4, 1] + x[6]) * cell.B[2]
) / KS.gas.rL
cell.lorenz[2, 1] =
0.5 * (
x[2] + cell.E[2] + (cell.prim[4, 1] + x[6]) * cell.B[1] -
(cell.prim[2, 1] + x[4]) * cell.B[3]
) / KS.gas.rL
cell.lorenz[3, 1] =
0.5 * (
x[3] + cell.E[3] + (cell.prim[2, 1] + x[4]) * cell.B[2] -
(cell.prim[3, 1] + x[5]) * cell.B[1]
) / KS.gas.rL
cell.lorenz[1, 2] =
-0.5 *
(
x[1] + cell.E[1] + (cell.prim[3, 2] + x[8]) * cell.B[3] -
(cell.prim[4, 2] + x[9]) * cell.B[2]
) *
mr / KS.gas.rL
cell.lorenz[2, 2] =
-0.5 *
(
x[2] + cell.E[2] + (cell.prim[4, 2] + x[9]) * cell.B[1] -
(cell.prim[2, 2] + x[7]) * cell.B[3]
) *
mr / KS.gas.rL
cell.lorenz[3, 2] =
-0.5 *
(
x[3] + cell.E[3] + (cell.prim[2, 2] + x[7]) * cell.B[2] -
(cell.prim[3, 2] + x[8]) * cell.B[1]
) *
mr / KS.gas.rL
cell.E[1] = x[1]
cell.E[2] = x[2]
cell.E[3] = x[3]
#--- update conservative flow variables: step 2 ---#
cell.prim[2, 1] = x[4]
cell.prim[3, 1] = x[5]
cell.prim[4, 1] = x[6]
cell.prim[2, 2] = x[7]
cell.prim[3, 2] = x[8]
cell.prim[4, 2] = x[9]
cell.w .= mixture_prim_conserve(cell.prim, KS.gas.γ)
#--- update particle distribution function ---#
# flux -> f^{n+1}
@. cell.h0 += (faceL.fh0 - faceR.fh0) / dx
@. cell.h1 += (faceL.fh1 - faceR.fh1) / dx
@. cell.h2 += (faceL.fh2 - faceR.fh2) / dx
# force -> f^{n+1} : step 1
for j in axes(cell.h0, 3) # component
for i in axes(cell.h0, 2) # v
_h0 = @view cell.h0[:, i, j]
_h1 = @view cell.h1[:, i, j]
_h2 = @view cell.h2[:, i, j]
shift_pdf!(_h0, cell.lorenz[1, j], KS.vSpace.du[1, i, j], dt)
shift_pdf!(_h1, cell.lorenz[1, j], KS.vSpace.du[1, i, j], dt)
shift_pdf!(_h2, cell.lorenz[1, j], KS.vSpace.du[1, i, j], dt)
end
end
for j in axes(cell.h0, 3) # component
for i in axes(cell.h0, 1) # u
_h0 = @view cell.h0[i, :, j]
_h1 = @view cell.h1[i, :, j]
_h2 = @view cell.h2[i, :, j]
shift_pdf!(_h0, cell.lorenz[2, j], KS.vSpace.dv[i, 1, j], dt)
shift_pdf!(_h1, cell.lorenz[2, j], KS.vSpace.dv[i, 1, j], dt)
shift_pdf!(_h2, cell.lorenz[2, j], KS.vSpace.dv[i, 1, j], dt)
end
end
# force -> f^{n+1} : step 2
for k in axes(cell.h1, 3)
@. cell.h2[:, :, k] +=
2.0 * dt * cell.lorenz[3, k] * cell.h1[:, :, k] +
(dt * cell.lorenz[3, k])^2 * cell.h0[:, :, k]
@. cell.h1[:, :, k] += dt * cell.lorenz[3, k] * cell.h0[:, :, k]
end
# source -> f^{n+1}
tau = aap_hs_collision_time(
cell.prim,
KS.gas.mi,
KS.gas.ni,
KS.gas.me,
KS.gas.ne,
KS.gas.Kn[1],
)
# interspecies interaction
if isMHD == true
prim = deepcopy(cell.prim)
else
prim = aap_hs_prim(
cell.prim,
tau,
KS.gas.mi,
KS.gas.ni,
KS.gas.me,
KS.gas.ne,
KS.gas.Kn[1],
)
end
H0 = similar(KS.vSpace.u)
H1 = similar(H0)
H2 = similar(H0)
for k in axes(H0, 3)
H0[:, :, k] .= maxwellian(KS.vSpace.u[:, :, k], KS.vSpace.v[:, :, k], prim[:, k])
@. H1[:, :, k] = H0[:, :, k] * prim[4, k]
@. H2[:, :, k] = H0[:, :, k] * (prim[4, k]^2 + 1.0 / (2.0 * prim[5, k]))
end
# BGK term
for k in axes(cell.h0, 3)
@. cell.h0[:, :, k] =
(cell.h0[:, :, k] + dt / tau[k] * H0[:, :, k]) / (1.0 + dt / tau[k])
@. cell.h1[:, :, k] =
(cell.h1[:, :, k] + dt / tau[k] * H1[:, :, k]) / (1.0 + dt / tau[k]) # NOTICE the h1 here is h2 in 1d4f case
@. cell.h2[:, :, k] =
(cell.h2[:, :, k] + dt / tau[k] * H2[:, :, k]) / (1.0 + dt / tau[k]) # NOTICE the h2 here is h3 in 1d4f case
end
#--- record residuals ---#
@. RES += (w_old - cell.w)^2
@. AVG += abs(cell.w)
end
"""
$(SIGNATURES)
2D0F @ triangle
"""
function step!(
w::T1,
prim::T1,
fw1::T1,
fw2::T1,
fw3::T1,
γ,
Δs,
dirc::T2,
RES,
AVG,
) where {T1<:AV{<:FN},T2<:AV{<:Real}}
#--- store W^n and calculate shakhov term ---#
w_old = deepcopy(w)
#--- update W^{n+1} ---#
@. w -= (fw1 * dirc[1] + fw2 * dirc[2] + fw3 * dirc[3]) / Δs
prim .= conserve_prim(w, γ)
#--- record residuals ---#
@. RES += (w - w_old)^2
@. AVG += abs(w)
end
"""
$(SIGNATURES)
2D0F @ quadrilateral
"""
function step!(
w::T1,
prim::T1,
fwL::T1,
fwR::T1,
fwD::T1,
fwU::T1,
γ,
Δs,
RES,
AVG,
collision = :bgk,
) where {T1<:AA{<:FN,1}}
#--- store W^n and calculate shakhov term ---#
w_old = deepcopy(w)
#--- update W^{n+1} ---#
@. w += (fwL - fwR + fwD - fwU) / Δs
prim .= conserve_prim(w, γ)
#--- record residuals ---#
@. RES += (w - w_old)^2
@. AVG += abs(w)
end
"""
$(SIGNATURES)
2D1F2V
"""
function step!(
w::T1,
prim::T1,
h::T2,
fwL::T1,
fhL::T2,
fwR::T1,
fhR::T2,
fwD::T1,
fhD::T2,
fwU::T1,
fhU::T2,
u::T3,
v::T3,
weights::T3,
γ,
μᵣ,
ω,
Pr,
Δs,
dt,
RES,
AVG,
collision = :bgk,
) where {T1<:AA{<:FN,1},T2<:AA{<:FN,2},T3<:AA{<:FN,2}}
#--- store W^n and calculate shakhov term ---#
w_old = deepcopy(w)
if collision == :shakhov
q = heat_flux(h, b, prim, u, v, weights)
MH_old = maxwellian(u, v, prim)
SH = shakhov(u, v, MH_old, q, prim, Pr)
else
SH = zero(h)
end
#--- update W^{n+1} ---#
@. w += (fwL - fwR + fwD - fwU) / Δs
prim .= conserve_prim(w, γ)
#--- record residuals ---#
@. RES += (w - w_old)^2
@. AVG += abs(w)
#--- calculate M^{n+1} and tau^{n+1} ---#
MH = maxwellian(u, v, prim)
MH .+= SH
τ = vhs_collision_time(prim, μᵣ, ω)
#--- update distribution function ---#
for j in axes(v, 2), i in axes(u, 1)
h[i, j] =
(
h[i, j] +
(fhL[i, j] - fhR[i, j] + fhD[i, j] - fhU[i, j]) / Δs +
dt / τ * MH[i, j]
) / (1.0 + dt / τ)
end
end
"""
$(SIGNATURES)
2D1F2V @ triangle
"""
function step!(
w::T1,
prim::T1,
f::T2,
fw1::T1,
ff1::T2,
fw2::T1,
ff2::T2,
fw3::T1,
ff3::T2,
u::T3,
v::T3,
weights::T3,
K,
γ,
μᵣ,
ω,
Pr,
Δs,
dirc::T4,
dt,
RES,
AVG,
collision = :bgk,
) where {T1<:AV{<:FN},T2<:AA{<:FN,2},T3<:AA{<:FN,2},T4<:AV{<:Real}}
#--- store W^n and calculate shakhov term ---#
w_old = deepcopy(w)
if collision == :shakhov
q = heat_flux(h, b, prim, u, v, weights)
M_old = maxwellian(u, v, prim)
S = shakhov(u, v, M_old, q, prim, Pr, K)
else
S = zero(f)
end
#--- update W^{n+1} ---#
@. w -= (fw1 * dirc[1] + fw2 * dirc[2] + fw3 * dirc[3]) / Δs
prim .= conserve_prim(w, γ)
#--- record residuals ---#
@. RES += (w - w_old)^2
@. AVG += abs(w)
#--- calculate M^{n+1} and tau^{n+1} ---#
M = maxwellian(u, v, prim)
M .+= S
τ = vhs_collision_time(prim, μᵣ, ω)
#--- update distribution function ---#
for j in axes(v, 2), i in axes(u, 1)
f[i, j] =
(
f[i, j] -
(ff1[i, j] * dirc[1] + ff2[i, j] * dirc[2] + ff3[i, j] * dirc[3]) / Δs +
dt / τ * M[i, j]
) / (1.0 + dt / τ)
end
end
"""
$(SIGNATURES)
2D2F2V
"""
function step!(
w::T1,
prim::T1,
h::T2,
b::T2,
fwL::T1,
fhL::T2,
fbL::T2,
fwR::T1,
fhR::T2,
fbR::T2,
fwD::T1,
fhD::T2,
fbD::T2,
fwU::T1,
fhU::T2,
fbU::T2,
u::T3,
v::T3,
weights::T3,
K,
γ,
μᵣ,
ω,
Pr,
Δs,
dt,
RES,
AVG,
collision = :bgk,
) where {T1<:AA{<:FN,1},T2<:AA{<:FN,2},T3<:AA{<:FN,2}}
#--- store W^n and calculate shakhov term ---#
w_old = deepcopy(w)
if collision == :shakhov
q = heat_flux(h, b, prim, u, v, weights)
MH_old = maxwellian(u, v, prim)
MB_old = MH_old .* K ./ (2.0 * prim[end])
SH, SB = shakhov(u, v, MH_old, MB_old, q, prim, Pr, K)
else
SH = zero(h)
SB = zero(b)
end
#--- update W^{n+1} ---#
@. w += (fwL - fwR + fwD - fwU) / Δs
prim .= conserve_prim(w, γ)
#--- record residuals ---#
@. RES += (w - w_old)^2
@. AVG += abs(w)
#--- calculate M^{n+1} and tau^{n+1} ---#
MH = maxwellian(u, v, prim)
MB = MH .* K ./ (2.0 * prim[end])
MH .+= SH
MB .+= SB
τ = vhs_collision_time(prim, μᵣ, ω)
#--- update distribution function ---#
for j in axes(v, 2), i in axes(u, 1)
h[i, j] =
(
h[i, j] +
(fhL[i, j] - fhR[i, j] + fhD[i, j] - fhU[i, j]) / Δs +
dt / τ * MH[i, j]
) / (1.0 + dt / τ)
b[i, j] =
(
b[i, j] +
(fbL[i, j] - fbR[i, j] + fbD[i, j] - fbU[i, j]) / Δs +
dt / τ * MB[i, j]
) / (1.0 + dt / τ)
end
end
"""
$(SIGNATURES)
2D2F2V @ triangle
"""
function step!(
w::T1,
prim::T1,
h::T2,
b::T2,
fw1::T1,
fh1::T2,
fb1::T2,
fw2::T1,
fh2::T2,
fb2::T2,
fw3::T1,
fh3::T2,
fb3::T2,
u::T3,
v::T3,
weights::T3,
K,
γ,
μᵣ,
ω,
Pr,
Δs,
dirc::T4,
dt,
RES,
AVG,
collision = :bgk,
) where {T1<:AV{<:FN},T2<:AA{<:FN,2},T3<:AA{<:FN,2},T4<:AV{<:Real}}
#--- store W^n and calculate shakhov term ---#
w_old = deepcopy(w)
if collision == :shakhov
q = heat_flux(h, b, prim, u, v, weights)
MH_old = maxwellian(u, v, prim)
MB_old = MH_old .* K ./ (2.0 * prim[end])
SH, SB = shakhov(u, v, MH_old, MB_old, q, prim, Pr, K)
else
SH = zero(h)
SB = zero(b)
end
#--- update W^{n+1} ---#
@. w -= (fw1 * dirc[1] + fw2 * dirc[2] + fw3 * dirc[3]) / Δs
prim .= conserve_prim(w, γ)
#--- record residuals ---#
@. RES += (w - w_old)^2
@. AVG += abs(w)
#--- calculate M^{n+1} and tau^{n+1} ---#
MH = maxwellian(u, v, prim)
MB = MH .* K ./ (2.0 * prim[end])
MH .+= SH
MB .+= SB
τ = vhs_collision_time(prim, μᵣ, ω)
#--- update distribution function ---#
for j in axes(v, 2), i in axes(u, 1)
h[i, j] =
(
h[i, j] -
(fh1[i, j] * dirc[1] + fh2[i, j] * dirc[2] + fh3[i, j] * dirc[3]) / Δs +
dt / τ * MH[i, j]
) / (1.0 + dt / τ)
b[i, j] =
(
b[i, j] -
(fb1[i, j] * dirc[1] + fb2[i, j] * dirc[2] + fb3[i, j] * dirc[3]) / Δs +
dt / τ * MB[i, j]
) / (1.0 + dt / τ)
end
end
"""
$(SIGNATURES)
"""
function step!(
w::T3,
prim::T3,
f::T4,
fwL::T1,
ffL::T2,
fwR::T1,
ffR::T2,
fwD::T1,
ffD::T2,
fwU::T1,
ffU::T2,
γ,
Kn_bz,
nm,
phi,
psi,
phipsi,
Δs,
dt,
RES,
AVG,
collision = :fsm::Symbol,
) where {T1<:AA{<:FN,1},T2<:AA{<:FN,3},T3<:AA{<:FN,1},T4<:AA{<:FN,3}}
@assert collision == :fsm
w_old = deepcopy(w)
@. w += (fwL - fwR + fwD - fwU) / Δs
prim .= conserve_prim(w, γ)
@. RES += (w - w_old)^2
@. AVG += abs(w)
Q = zero(f[:, :, :])
boltzmann_fft!(Q, f, Kn_bz, nm, phi, psi, phipsi)
for k in axes(f, 3), j in axes(f, 2), i in axes(f, 1)
f[i, j, k] +=
(ffL[i, j, k] - ffR[i, j, k] + ffD[i, j, k] - ffU[i, j, k]) / Δs +
dt * Q[i, j, k]
end
end
|
{"hexsha": "2ee637199f48f9c55982e9e77d09e10ee382506a", "size": 33527, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Solver/solver_step.jl", "max_stars_repo_name": "vavrines/KineticBase.jl", "max_stars_repo_head_hexsha": "d00cefe073346a3bab3b4d3577a95631e320dc9f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Solver/solver_step.jl", "max_issues_repo_name": "vavrines/KineticBase.jl", "max_issues_repo_head_hexsha": "d00cefe073346a3bab3b4d3577a95631e320dc9f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Solver/solver_step.jl", "max_forks_repo_name": "vavrines/KineticBase.jl", "max_forks_repo_head_hexsha": "d00cefe073346a3bab3b4d3577a95631e320dc9f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.9951989026, "max_line_length": 120, "alphanum_fraction": 0.4462671876, "num_tokens": 13295}
|
""" gdsfactory loads a configuration from 3 files, high priority overwrites low priority:
1. A config.yml found in the current working directory (highest priority)
2. ~/.gdsfactory/config.yml specific for the machine
3. the default_config in pp/config.py (lowest priority)
`CONFIG` has all the paths that we do not care
`conf` has all the useful info
"""
__version__ = "2.0.0"
from typing import Any
import os
import io
import json
import subprocess
import pathlib
from pprint import pprint
import logging
import numpy as np
from omegaconf import OmegaConf
from git import Repo
connections = {} # global variable to store connections in a dict
home = pathlib.Path.home()
cwd = pathlib.Path.cwd()
module_path = pathlib.Path(__file__).parent.absolute()
repo_path = module_path.parent
home_path = pathlib.Path.home() / ".gdsfactory"
home_path.mkdir(exist_ok=True)
cwd_config = cwd / "config.yml"
module_config = module_path / "config.yml"
home_config = home_path / "config.yml"
config_base = OmegaConf.load(
io.StringIO(
"""
tech:
name: generic
cache_url:
with_settings_label: False
add_pins: True
wg_expanded_width: 2.5
taper_length: 35.0
grid_unit: 1e-6
grid_resolution: 1e-9
bend_radius: 10.0
"""
)
)
try:
config_cwd = OmegaConf.load(cwd_config)
except Exception:
config_cwd = OmegaConf.create()
try:
config_home = OmegaConf.load(home_config)
except Exception:
config_home = OmegaConf.create()
conf = OmegaConf.merge(config_base, config_home, config_cwd)
conf.version = __version__
try:
conf["git_hash"] = Repo(repo_path).head.object.hexsha
except Exception:
conf["git_hash"] = None
CONFIG = dict(
config_path=cwd_config.absolute(),
repo_path=repo_path,
module_path=module_path,
gdsdir=module_path / "gds",
font_path=module_path / "gds" / "alphabet.gds",
masks_path=repo_path / "mask",
version=__version__,
home=home,
cwd=cwd,
)
mask_name = "notDefined"
if conf.get("mask"):
mask_name = conf["mask"]["name"]
mask_config_directory = cwd
build_directory = mask_config_directory / "build"
CONFIG["devices_directory"] = mask_config_directory / "devices"
CONFIG["mask_gds"] = mask_config_directory / "build" / "mask" / (mask_name + ".gds")
else:
build_directory = home_path / "build"
mask_config_directory = home_path / "build"
CONFIG["custom_components"] = conf.custom_components
CONFIG["gdslib"] = conf.gdslib or repo_path / "gdslib"
CONFIG["sp"] = CONFIG["gdslib"] / "sp"
CONFIG["gds"] = CONFIG["gdslib"] / "gds"
CONFIG["gdslib_test"] = home_path / "gdslib_test"
CONFIG["build_directory"] = build_directory
CONFIG["gds_directory"] = build_directory / "devices"
CONFIG["cache_doe_directory"] = build_directory / "cache_doe"
CONFIG["doe_directory"] = build_directory / "doe"
CONFIG["mask_directory"] = build_directory / "mask"
CONFIG["mask_gds"] = build_directory / "mask" / (mask_name + ".gds")
CONFIG["mask_config_directory"] = mask_config_directory
CONFIG["gdspath"] = build_directory / "gds.gds"
CONFIG["samples_path"] = module_path / "samples"
CONFIG["netlists"] = module_path / "samples" / "netlists"
CONFIG["components_path"] = module_path / "components"
if "gds_resources" in CONFIG:
CONFIG["gds_resources"] = CONFIG["masks_path"] / CONFIG["gds_resources"]
build_directory.mkdir(exist_ok=True)
CONFIG["gds_directory"].mkdir(exist_ok=True)
CONFIG["doe_directory"].mkdir(exist_ok=True)
CONFIG["mask_directory"].mkdir(exist_ok=True)
CONFIG["gdslib_test"].mkdir(exist_ok=True)
logging.basicConfig(
filename=CONFIG["build_directory"] / "log.log",
filemode="w",
format="%(name)s - %(levelname)s - %(message)s",
)
logging.warning("This will get logged to a file")
def print_config(key=None):
if key:
if CONFIG.get(key):
print(CONFIG[key])
else:
print(f"`{key}` key not found in {cwd_config}")
else:
pprint(CONFIG)
def complex_encoder(z):
if isinstance(z, pathlib.Path):
return str(z)
else:
type_name = type(z)
raise TypeError(f"Object {z} of type {type_name} is not serializable")
def write_config(config, json_out_path):
with open(json_out_path, "w") as f:
json.dump(config, f, indent=2, sort_keys=True, default=complex_encoder)
def call_if_func(f: Any, **kwargs) -> Any:
return f(**kwargs) if callable(f) else f
def get_git_hash():
""" Get the current git hash """
try:
with open(os.devnull, "w") as shutup:
return (
subprocess.check_output(["git", "rev-parse", "HEAD"], stderr=shutup)
.decode("utf-8")
.strip("\n")
)
except subprocess.CalledProcessError:
return "not_a_git_repo"
GRID_RESOLUTION = conf.tech.grid_resolution
GRID_PER_UNIT = conf.tech.grid_unit / GRID_RESOLUTION
GRID_ROUNDING_RESOLUTION = int(np.log10(GRID_PER_UNIT))
BEND_RADIUS = conf.tech.bend_radius
TAPER_LENGTH = conf.tech.taper_length
WG_EXPANDED_WIDTH = conf.tech.wg_expanded_width
materials = {
"si": "Si (Silicon) - Palik",
"sio2": "SiO2 (Glass) - Palik",
"sin": "Si3N4 (Silicon Nitride) - Phillip",
}
if __name__ == "__main__":
# print(conf)
# print_config("gdslib")
# print_config()
# print(CONFIG["git_hash"])
print(CONFIG["sp"])
# print(CONFIG)
|
{"hexsha": "0ae30e1fd45f8b967371f421b2e0466964b97466", "size": 5359, "ext": "py", "lang": "Python", "max_stars_repo_path": "pp/config.py", "max_stars_repo_name": "smartalecH/gdsfactory", "max_stars_repo_head_hexsha": "66dfbf740704f1a6155f4812a1d9483ccf5c116c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pp/config.py", "max_issues_repo_name": "smartalecH/gdsfactory", "max_issues_repo_head_hexsha": "66dfbf740704f1a6155f4812a1d9483ccf5c116c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pp/config.py", "max_forks_repo_name": "smartalecH/gdsfactory", "max_forks_repo_head_hexsha": "66dfbf740704f1a6155f4812a1d9483ccf5c116c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3418367347, "max_line_length": 89, "alphanum_fraction": 0.6915469304, "include": true, "reason": "import numpy", "num_tokens": 1393}
|
function elliptic_ea ( a )
!*****************************************************************************80
!
!! ELLIPTIC_EA evaluates the complete elliptic integral E(A).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! E(a) = RF ( 0, 1-sin^2(a), 1 ) - 1/3 sin^2(a) RD ( 0, 1-sin^2(a), 1 ).
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) A, the argument.
!
! Output, real ( kind = 8 ) ELLIPTIC_EA, the function value.
!
implicit none
real ( kind = 8 ) a
real ( kind = 8 ) elliptic_ea
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) k
real ( kind = 8 ), parameter :: r8_pi = 3.141592653589793D+00
real ( kind = 8 ) rd
real ( kind = 8 ) rf
real ( kind = 8 ) value
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
k = sin ( a * r8_pi / 180.0D+00 )
x = 0.0D+00
y = ( 1.0D+00 - k ) * ( 1.0D+00 + k )
z = 1.0D+00
errtol = 1.0D-03
value = rf ( x, y, z, errtol, ierr ) &
- k * k * rd ( x, y, z, errtol, ierr ) / 3.0D+00
elliptic_ea = value
return
end
subroutine elliptic_ea_values ( n_data, x, fx )
!*****************************************************************************80
!
!! ELLIPTIC_EA_VALUES returns values of the complete elliptic integral E(A).
!
! Discussion:
!
! This is one form of what is sometimes called the complete elliptic
! integral of the second kind.
!
! The function is defined by the formula:
!
! E(A) = integral ( 0 <= T <= PI/2 )
! sqrt ( 1 - sin ( A )^2 * sin ( T )^2 ) dT
!
! In Mathematica, the function can be evaluated by:
!
! EllipticE[(Sin[Pi*a/180])^2]
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 19 August 2004
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! National Bureau of Standards, 1964,
! ISBN: 0-486-61272-4,
! LC: QA47.A34.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Cambridge University Press, 1999,
! ISBN: 0-521-64314-7,
! LC: QA76.95.W65.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data; when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) X, the argument of the function, measured
! in degrees.
!
! Output, real ( kind = 8 ) FX, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 19
real ( kind = 8 ) fx
real ( kind = 8 ), save, dimension ( n_max ) :: fx_vec = (/ &
1.570796326794897D+00, &
1.567809073977622D+00, &
1.558887196601596D+00, &
1.544150496914673D+00, &
1.523799205259774D+00, &
1.498114928422116D+00, &
1.467462209339427D+00, &
1.432290969306756D+00, &
1.393140248523812D+00, &
1.350643881047676D+00, &
1.305539094297794D+00, &
1.258679624779997D+00, &
1.211056027568459D+00, &
1.163827964493139D+00, &
1.118377737969864D+00, &
1.076405113076403D+00, &
1.040114395706010D+00, &
1.012663506234396D+00, &
1.000000000000000D+00 /)
integer ( kind = 4 ) n_data
real ( kind = 8 ) x
real ( kind = 8 ), save, dimension ( n_max ) :: x_vec = (/ &
0.0D+00, &
5.0D+00, &
10.0D+00, &
15.0D+00, &
20.0D+00, &
25.0D+00, &
30.0D+00, &
35.0D+00, &
40.0D+00, &
45.0D+00, &
50.0D+00, &
55.0D+00, &
60.0D+00, &
65.0D+00, &
70.0D+00, &
75.0D+00, &
80.0D+00, &
85.0D+00, &
90.0D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
x = 0.0D+00
fx = 0.0D+00
else
x = x_vec(n_data)
fx = fx_vec(n_data)
end if
return
end
function elliptic_ek ( k )
!*****************************************************************************80
!
!! ELLIPTIC_EK evaluates the complete elliptic integral E(K).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! E(k) = RF ( 0, 1-k^2, 1 ) - 1/3 k^2 RD ( 0, 1-k^2, 1 ).
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) K, the argument.
!
! Output, real ( kind = 8 ) ELLIPTIC_EK, the function value.
!
implicit none
real ( kind = 8 ) elliptic_ek
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) k
real ( kind = 8 ) rd
real ( kind = 8 ) rf
real ( kind = 8 ) value
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
x = 0.0D+00
y = ( 1.0D+00 - k ) * ( 1.0D+00 + k )
z = 1.0D+00
errtol = 1.0D-03
value = rf ( x, y, z, errtol, ierr ) &
- k * k * rd ( x, y, z, errtol, ierr ) / 3.0D+00
elliptic_ek = value
return
end
subroutine elliptic_ek_values ( n_data, x, fx )
!*****************************************************************************80
!
!! ELLIPTIC_EK_VALUES returns values of the complete elliptic integral E(K).
!
! Discussion:
!
! This is one form of what is sometimes called the complete elliptic
! integral of the second kind.
!
! The function is defined by the formula:
!
! E(K) = integral ( 0 <= T <= PI/2 )
! sqrt ( 1 - K^2 * sin ( T )^2 ) dT
!
! In Mathematica, the function can be evaluated by:
!
! EllipticE[m]
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 29 May 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! National Bureau of Standards, 1964,
! ISBN: 0-486-61272-4,
! LC: QA47.A34.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Cambridge University Press, 1999,
! ISBN: 0-521-64314-7,
! LC: QA76.95.W65.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data; when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) X, the argument of the function.
!
! Output, real ( kind = 8 ) FX, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 21
real ( kind = 8 ) fx
real ( kind = 8 ), save, dimension ( n_max ) :: fx_vec = (/ &
1.570796326794897D+00, &
1.550973351780472D+00, &
1.530757636897763D+00, &
1.510121832092819D+00, &
1.489035058095853D+00, &
1.467462209339427D+00, &
1.445363064412665D+00, &
1.422691133490879D+00, &
1.399392138897432D+00, &
1.375401971871116D+00, &
1.350643881047676D+00, &
1.325024497958230D+00, &
1.298428035046913D+00, &
1.270707479650149D+00, &
1.241670567945823D+00, &
1.211056027568459D+00, &
1.178489924327839D+00, &
1.143395791883166D+00, &
1.104774732704073D+00, &
1.060473727766278D+00, &
1.000000000000000D+00 /)
integer ( kind = 4 ) n_data
real ( kind = 8 ) x
real ( kind = 8 ), save, dimension ( n_max ) :: x_vec = (/ &
0.0000000000000000D+00, &
0.2236067977499790D+00, &
0.3162277660168379D+00, &
0.3872983346207417D+00, &
0.4472135954999579D+00, &
0.5000000000000000D+00, &
0.5477225575051661D+00, &
0.5916079783099616D+00, &
0.6324555320336759D+00, &
0.6708203932499369D+00, &
0.7071067811865476D+00, &
0.7416198487095663D+00, &
0.7745966692414834D+00, &
0.8062257748298550D+00, &
0.8366600265340756D+00, &
0.8660254037844386D+00, &
0.8944271909999159D+00, &
0.9219544457292888D+00, &
0.9486832980505138D+00, &
0.9746794344808963D+00, &
1.0000000000000000D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
x = 0.0D+00
fx = 0.0D+00
else
x = x_vec(n_data)
fx = fx_vec(n_data)
end if
return
end
function elliptic_em ( m )
!*****************************************************************************80
!
!! ELLIPTIC_EM evaluates the complete elliptic integral E(M).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! E(m) = RF ( 0, 1-m, 1 ) - 1/3 m RD ( 0, 1-m, 1 ).
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) M, the argument.
!
! Output, real ( kind = 8 ) ELLIPTIC_EM, the function value.
!
implicit none
real ( kind = 8 ) elliptic_em
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) m
real ( kind = 8 ) rd
real ( kind = 8 ) rf
real ( kind = 8 ) value
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
x = 0.0D+00
y = 1.0D+00 - m
z = 1.0D+00
errtol = 1.0D-03
value = rf ( x, y, z, errtol, ierr ) &
- m * rd ( x, y, z, errtol, ierr ) / 3.0D+00
elliptic_em = value
return
end
subroutine elliptic_em_values ( n_data, x, fx )
!*****************************************************************************80
!
!! ELLIPTIC_EM_VALUES returns values of the complete elliptic integral E(M).
!
! Discussion:
!
! This is one form of what is sometimes called the complete elliptic
! integral of the second kind.
!
! The function is defined by the formula:
!
! E(M) = integral ( 0 <= T <= PI/2 )
! sqrt ( 1 - M * sin ( T )^2 ) dT
!
! In Mathematica, the function can be evaluated by:
!
! EllipticE[m]
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 14 August 2004
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! National Bureau of Standards, 1964,
! ISBN: 0-486-61272-4,
! LC: QA47.A34.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Cambridge University Press, 1999,
! ISBN: 0-521-64314-7,
! LC: QA76.95.W65.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data; when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) X, the argument of the function.
!
! Output, real ( kind = 8 ) FX, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 21
real ( kind = 8 ) fx
real ( kind = 8 ), save, dimension ( n_max ) :: fx_vec = (/ &
1.570796326794897D+00, &
1.550973351780472D+00, &
1.530757636897763D+00, &
1.510121832092819D+00, &
1.489035058095853D+00, &
1.467462209339427D+00, &
1.445363064412665D+00, &
1.422691133490879D+00, &
1.399392138897432D+00, &
1.375401971871116D+00, &
1.350643881047676D+00, &
1.325024497958230D+00, &
1.298428035046913D+00, &
1.270707479650149D+00, &
1.241670567945823D+00, &
1.211056027568459D+00, &
1.178489924327839D+00, &
1.143395791883166D+00, &
1.104774732704073D+00, &
1.060473727766278D+00, &
1.000000000000000D+00 /)
integer ( kind = 4 ) n_data
real ( kind = 8 ) x
real ( kind = 8 ), save, dimension ( n_max ) :: x_vec = (/ &
0.00D+00, &
0.05D+00, &
0.10D+00, &
0.15D+00, &
0.20D+00, &
0.25D+00, &
0.30D+00, &
0.35D+00, &
0.40D+00, &
0.45D+00, &
0.50D+00, &
0.55D+00, &
0.60D+00, &
0.65D+00, &
0.70D+00, &
0.75D+00, &
0.80D+00, &
0.85D+00, &
0.90D+00, &
0.95D+00, &
1.00D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
x = 0.0D+00
fx = 0.0D+00
else
x = x_vec(n_data)
fx = fx_vec(n_data)
end if
return
end
function elliptic_fa ( a )
!*****************************************************************************80
!
!! ELLIPTIC_FA evaluates the complete elliptic integral F(A).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! F(a) = RF ( 0, 1-sin^2(a), 1 ).
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 29 May 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) A, the argument.
!
! Output, real ( kind = 8 ) ELLIPTIC_FA, the function value.
!
implicit none
real ( kind = 8 ) a
real ( kind = 8 ) elliptic_fa
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ), parameter :: r8_pi = 3.141592653589793D+00
real ( kind = 8 ) rf
real ( kind = 8 ) value
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
x = 0.0D+00
y = 1.0D+00 - ( sin ( a * r8_pi / 180.0 ) ) ** 2
z = 1.0D+00
errtol = 1.0D-03
value = rf ( x, y, z, errtol, ierr )
elliptic_fa = value
return
end
subroutine elliptic_fa_values ( n_data, x, fx )
!*****************************************************************************80
!
!! ELLIPTIC_FA_VALUES returns values of the complete elliptic integral F(A).
!
! Discussion:
!
! This is one form of what is sometimes called the complete elliptic integral
! of the first kind.
!
! The function is defined by the formula:
!
! F(A) = integral ( 0 <= T <= PI/2 )
! dT / sqrt ( 1 - sin ( A )^2 * sin ( T )^2 )
!
! In Mathematica, the function can be evaluated by:
!
! EllipticK[(Sin[a*Pi/180])^2]
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 19 August 2004
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! National Bureau of Standards, 1964,
! ISBN: 0-486-61272-4,
! LC: QA47.A34.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Cambridge University Press, 1999,
! ISBN: 0-521-64314-7,
! LC: QA76.95.W65.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data; when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) X, the argument of the function, measured
! in degrees.
!
! Output, real ( kind = 8 ) FX, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 18
real ( kind = 8 ) fx
real ( kind = 8 ), save, dimension ( n_max ) :: fx_vec = (/ &
0.1570796326794897D+01, &
0.1573792130924768D+01, &
0.1582842804338351D+01, &
0.1598142002112540D+01, &
0.1620025899124204D+01, &
0.1648995218478530D+01, &
0.1685750354812596D+01, &
0.1731245175657058D+01, &
0.1786769134885021D+01, &
0.1854074677301372D+01, &
0.1935581096004722D+01, &
0.2034715312185791D+01, &
0.2156515647499643D+01, &
0.2308786798167196D+01, &
0.2504550079001634D+01, &
0.2768063145368768D+01, &
0.3153385251887839D+01, &
0.3831741999784146D+01 /)
integer ( kind = 4 ) n_data
real ( kind = 8 ) x
real ( kind = 8 ), save, dimension ( n_max ) :: x_vec = (/ &
0.0D+00, &
5.0D+00, &
10.0D+00, &
15.0D+00, &
20.0D+00, &
25.0D+00, &
30.0D+00, &
35.0D+00, &
40.0D+00, &
45.0D+00, &
50.0D+00, &
55.0D+00, &
60.0D+00, &
65.0D+00, &
70.0D+00, &
75.0D+00, &
80.0D+00, &
85.0D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
x = 0.0D+00
fx = 0.0D+00
else
x = x_vec(n_data)
fx = fx_vec(n_data)
end if
return
end
function elliptic_fk ( k )
!*****************************************************************************80
!
!! ELLIPTIC_FK evaluates the complete elliptic integral F(K).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! F(k) = RF ( 0, 1-k^2, 1 ).
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 29 May 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) K, the argument.
!
! Output, real ( kind = 8 ) ELLIPTIC_FK, the function value.
!
implicit none
real ( kind = 8 ) elliptic_fk
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) k
real ( kind = 8 ) rf
real ( kind = 8 ) value
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
x = 0.0D+00
y = ( 1.0D+00 - k ) * ( 1.0D+00 + k )
z = 1.0D+00
errtol = 1.0D-03
value = rf ( x, y, z, errtol, ierr )
elliptic_fk = value
return
end
subroutine elliptic_fk_values ( n_data, x, fx )
!*****************************************************************************80
!
!! ELLIPTIC_FK_VALUES returns values of the complete elliptic integral F(K).
!
! Discussion:
!
! This is one form of what is sometimes called the complete elliptic
! integral of the first kind.
!
! The function is defined by the formula:
!
! F(K) = integral ( 0 <= T <= PI/2 )
! dT / sqrt ( 1 - K^2 * sin ( T )^2 )
!
! In Mathematica, the function can be evaluated by:
!
! EllipticK[k^2]
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 10 August 2004
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! National Bureau of Standards, 1964,
! ISBN: 0-486-61272-4,
! LC: QA47.A34.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Cambridge University Press, 1999,
! ISBN: 0-521-64314-7,
! LC: QA76.95.W65.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data; when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) X, the argument of the function.
!
! Output, real ( kind = 8 ) FX, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) fx
real ( kind = 8 ), save, dimension ( n_max ) :: fx_vec = (/ &
1.570796326794897D+00, &
1.591003453790792D+00, &
1.612441348720219D+00, &
1.635256732264580D+00, &
1.659623598610528D+00, &
1.685750354812596D+00, &
1.713889448178791D+00, &
1.744350597225613D+00, &
1.777519371491253D+00, &
1.813883936816983D+00, &
1.854074677301372D+00, &
1.898924910271554D+00, &
1.949567749806026D+00, &
2.007598398424376D+00, &
2.075363135292469D+00, &
2.156515647499643D+00, &
2.257205326820854D+00, &
2.389016486325580D+00, &
2.578092113348173D+00, &
2.908337248444552D+00 /)
integer ( kind = 4 ) n_data
real ( kind = 8 ) x
real ( kind = 8 ), save, dimension ( n_max ) :: x_vec = (/ &
0.0000000000000000D+00, &
0.2236067977499790D+00, &
0.3162277660168379D+00, &
0.3872983346207417D+00, &
0.4472135954999579D+00, &
0.5000000000000000D+00, &
0.5477225575051661D+00, &
0.5916079783099616D+00, &
0.6324555320336759D+00, &
0.6708203932499369D+00, &
0.7071067811865476D+00, &
0.7416198487095663D+00, &
0.7745966692414834D+00, &
0.8062257748298550D+00, &
0.8366600265340756D+00, &
0.8660254037844386D+00, &
0.8944271909999159D+00, &
0.9219544457292888D+00, &
0.9486832980505138D+00, &
0.9746794344808963D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
x = 0.0D+00
fx = 0.0D+00
else
x = x_vec(n_data)
fx = fx_vec(n_data)
end if
return
end
function elliptic_fm ( m )
!*****************************************************************************80
!
!! ELLIPTIC_FM evaluates the complete elliptic integral F(M).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! F(m) = RF ( 0, 1-m, 1 ).
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 29 May 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) M, the argument.
!
! Output, real ( kind = 8 ) ELLIPTIC_FM, the function value.
!
implicit none
real ( kind = 8 ) elliptic_fm
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) m
real ( kind = 8 ) rf
real ( kind = 8 ) value
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
x = 0.0D+00
y = 1.0D+00 - m
z = 1.0D+00
errtol = 1.0D-03
value = rf ( x, y, z, errtol, ierr )
elliptic_fm = value
return
end
subroutine elliptic_fm_values ( n_data, x, fx )
!*****************************************************************************80
!
!! ELLIPTIC_FM_VALUES returns values of the complete elliptic integral F(M).
!
! Discussion:
!
! This is one form of what is sometimes called the complete elliptic
! integral of the first kind.
!
! The function is defined by the formula:
!
! F(M) = integral ( 0 <= T <= PI/2 )
! dT / sqrt ( 1 - M * sin ( T )^2 )
!
! In Mathematica, the function can be evaluated by:
!
! EllipticK[m]
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 10 August 2004
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! National Bureau of Standards, 1964,
! ISBN: 0-486-61272-4,
! LC: QA47.A34.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Cambridge University Press, 1999,
! ISBN: 0-521-64314-7,
! LC: QA76.95.W65.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data; when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) X, the argument of the function.
!
! Output, real ( kind = 8 ) FX, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) fx
real ( kind = 8 ), save, dimension ( n_max ) :: fx_vec = (/ &
1.570796326794897D+00, &
1.591003453790792D+00, &
1.612441348720219D+00, &
1.635256732264580D+00, &
1.659623598610528D+00, &
1.685750354812596D+00, &
1.713889448178791D+00, &
1.744350597225613D+00, &
1.777519371491253D+00, &
1.813883936816983D+00, &
1.854074677301372D+00, &
1.898924910271554D+00, &
1.949567749806026D+00, &
2.007598398424376D+00, &
2.075363135292469D+00, &
2.156515647499643D+00, &
2.257205326820854D+00, &
2.389016486325580D+00, &
2.578092113348173D+00, &
2.908337248444552D+00 /)
integer ( kind = 4 ) n_data
real ( kind = 8 ) x
real ( kind = 8 ), save, dimension ( n_max ) :: x_vec = (/ &
0.00D+00, &
0.05D+00, &
0.10D+00, &
0.15D+00, &
0.20D+00, &
0.25D+00, &
0.30D+00, &
0.35D+00, &
0.40D+00, &
0.45D+00, &
0.50D+00, &
0.55D+00, &
0.60D+00, &
0.65D+00, &
0.70D+00, &
0.75D+00, &
0.80D+00, &
0.85D+00, &
0.90D+00, &
0.95D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
x = 0.0D+00
fx = 0.0D+00
else
x = x_vec(n_data)
fx = fx_vec(n_data)
end if
return
end
function elliptic_inc_ea ( phi, a )
!*****************************************************************************80
!
!! ELLIPTIC_INC_EA evaluates the incomplete elliptic integral E(PHI,A).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! k = sin ( a * pi / 180 )
! E(phi,a) =
! sin ( phi ) RF ( cos^2 ( phi ), 1-k^2 sin^2 ( phi ), 1 )
! - 1/3 k^2 sin^3 ( phi ) RD ( cos^2 ( phi ), 1-k^2 sin^2 ( phi ), 1 ).
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) PHI, A, the argument.
! 0 <= PHI <= PI/2.
! 0 <= sin^2 ( A * pi / 180 ) * sin^2(PHI) <= 1.
!
! Output, real ( kind = 8 ) ELLIPTIC_INC_EA, the function value.
!
implicit none
real ( kind = 8 ) a
real ( kind = 8 ) cp
real ( kind = 8 ) elliptic_inc_ea
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) k
real ( kind = 8 ) phi
real ( kind = 8 ), parameter :: r8_pi = 3.141592653589793D+00
real ( kind = 8 ) rd
real ( kind = 8 ) rf
real ( kind = 8 ) sp
real ( kind = 8 ) value
real ( kind = 8 ) value1
real ( kind = 8 ) value2
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
k = sin ( a * r8_pi / 180.0D+00 )
cp = cos ( phi )
sp = sin ( phi )
x = cp * cp
y = ( 1.0D+00 - k * sp ) * ( 1.0D+00 + k * sp )
z = 1.0D+00
errtol = 1.0D-03
value1 = rf ( x, y, z, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_EA - Fatal error!'
write ( *, '(a,i2)' ) ' RF returned IERR = ', ierr
stop 1
end if
value2 = rd ( x, y, z, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_EA - Fatal error!'
write ( *, '(a,i2)' ) ' RD returned IERR = ', ierr
stop 1
end if
value = sp * value1 - k ** 2 * sp ** 3 * value2 / 3.0D+00
elliptic_inc_ea = value
return
end
subroutine elliptic_inc_ea_values ( n_data, phi, a, ea )
!*****************************************************************************80
!
!! ELLIPTIC_INC_EA_VALUES: values of the incomplete elliptic integral E(PHI,A).
!
! Discussion:
!
! This is one form of the incomplete elliptic integral of the second kind.
!
! E(PHI,A) = integral ( 0 <= T <= PHI )
! sqrt ( 1 - sin^2 ( A ) * sin^2 ( T ) ) dT
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 24 June 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! US Department of Commerce, 1964.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Wolfram Media / Cambridge University Press, 1999.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) PHI, A, the arguments of the function.
!
! Output, real ( kind = 8 ) EA, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) a
real ( kind = 8 ) ea
integer ( kind = 4 ) n_data
real ( kind = 8 ) phi
real ( kind = 8 ), save, dimension ( n_max ) :: a_vec = (/ &
123.0821233267548D+00, &
11.26931745051486D+00, &
-94.88806452075445D+00, &
-99.71407853545323D+00, &
57.05881039324191D+00, &
-19.71363287074183D+00, &
56.31230299738043D+00, &
-91.55605346417718D+00, &
-27.00654574696468D+00, &
-169.2293728595904D+00, &
61.96859564803047D+00, &
-158.7324398933148D+00, &
105.0883958999383D+00, &
-48.95883872360177D+00, &
-42.58568835110901D+00, &
11.65603284687828D+00, &
-8.398113719173338D+00, &
17.69362213019626D+00, &
73.8803420626852D+00, &
-69.82492339645128D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: ea_vec = (/ &
0.3384181367348019D+00, &
1.292924624509506D+00, &
0.6074183768796306D+00, &
0.3939726730783567D+00, &
0.06880814097089803D+00, &
0.0969436473376824D+00, &
0.6025937791452033D+00, &
0.9500549494837583D+00, &
1.342783372140486D+00, &
0.1484915631401388D+00, &
1.085432887050926D+00, &
0.1932136916085597D+00, &
0.3983689593057807D+00, &
0.1780054133336934D+00, &
1.164525270273536D+00, &
1.080167047541845D+00, &
1.346684963830312D+00, &
1.402100272685504D+00, &
0.2928091845544553D+00, &
0.5889342583405707D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: phi_vec = (/ &
0.3430906586047127D+00, &
1.302990057703935D+00, &
0.6523628380743488D+00, &
0.4046022501376546D+00, &
0.06884642871852312D+00, &
0.0969609046794745D+00, &
0.630370432896175D+00, &
1.252375418911598D+00, &
1.409796082144801D+00, &
0.1485105463502483D+00, &
1.349466184634646D+00, &
0.1933711786970301D+00, &
0.4088829927466769D+00, &
0.1785430666405224D+00, &
1.292588374416351D+00, &
1.087095515757691D+00, &
1.352794600489329D+00, &
1.432530166308616D+00, &
0.2968093345769761D+00, &
0.6235880396594726D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
a = 0.0D+00
ea = 0.0D+00
phi = 0.0D+00
else
a = a_vec(n_data)
ea = ea_vec(n_data)
phi = phi_vec(n_data)
end if
return
end
function elliptic_inc_ek ( phi, k )
!*****************************************************************************80
!
!! ELLIPTIC_INC_EK evaluates the incomplete elliptic integral E(PHI,K).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! E(phi,k) =
! sin ( phi ) RF ( cos^2 ( phi ), 1-k^2 sin^2 ( phi ), 1 )
! - 1/3 k^2 sin^3 ( phi ) RD ( cos^2 ( phi ), 1-k^2 sin^2 ( phi ), 1 ).
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) PHI, K, the argument.
! 0 <= PHI <= PI/2.
! 0 <= K^2 * sin^2(PHI) <= 1.
!
! Output, real ( kind = 8 ) ELLIPTIC_INC_EK, the function value.
!
implicit none
real ( kind = 8 ) cp
real ( kind = 8 ) elliptic_inc_ek
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) k
real ( kind = 8 ) phi
real ( kind = 8 ), parameter :: r8_pi = 3.141592653589793D+00
real ( kind = 8 ) rd
real ( kind = 8 ) rf
real ( kind = 8 ) sp
real ( kind = 8 ) value
real ( kind = 8 ) value1
real ( kind = 8 ) value2
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
cp = cos ( phi )
sp = sin ( phi )
x = cp * cp
y = ( 1.0D+00 - k * sp ) * ( 1.0D+00 + k * sp )
z = 1.0D+00
errtol = 1.0D-03
value1 = rf ( x, y, z, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_EK - Fatal error!'
write ( *, '(a,i2)' ) ' RF returned IERR = ', ierr
stop 1
end if
value2 = rd ( x, y, z, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_EK - Fatal error!'
write ( *, '(a,i2)' ) ' RD returned IERR = ', ierr
stop 1
end if
value = sp * value1 - k ** 2 * sp ** 3 * value2 / 3.0D+00
elliptic_inc_ek = value
return
end
subroutine elliptic_inc_ek_values ( n_data, phi, k, ek )
!*****************************************************************************80
!
!! ELLIPTIC_INC_EK_VALUES: values of the incomplete elliptic integral E(PHI,K).
!
! Discussion:
!
! This is the incomplete elliptic integral of the second kind.
!
! E(PHI,K) = integral ( 0 <= T <= PHI )
! sqrt ( 1 - K^2 * sin ( T )^2 ) dT
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 22 June 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! US Department of Commerce, 1964.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Wolfram Media / Cambridge University Press, 1999.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) PHI, K, the arguments.
!
! Output, real ( kind = 8 ) EK, the function value.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) ek
real ( kind = 8 ) k
integer ( kind = 4 ) n_data
real ( kind = 8 ) phi
real ( kind = 8 ), save, dimension ( n_max ) :: ek_vec = (/ &
0.2852345328295404D+00, &
1.298690225567921D+00, &
0.5508100202571943D+00, &
0.3575401358115371D+00, &
0.06801307805507453D+00, &
0.09679584980231837D+00, &
0.6003112504412838D+00, &
0.8996717721794724D+00, &
1.380715261453875D+00, &
0.1191644625202453D+00, &
1.196994838171557D+00, &
0.1536260979667945D+00, &
0.3546768920544152D+00, &
0.1758756066650882D+00, &
1.229819109410569D+00, &
1.08381066114337D+00, &
1.35023378157378D+00, &
1.419775884709218D+00, &
0.2824895528020034D+00, &
0.5770427720982867D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: k_vec = (/ &
2.712952582080266D+00, &
0.1279518954120547D+00, &
-1.429437513650137D+00, &
-1.981659235625333D+00, &
3.894801879555818D+00, &
-1.042486024983672D+00, &
0.8641142168759754D+00, &
-1.049058412826877D+00, &
-0.3024062128402472D+00, &
-6.574288841527263D+00, &
0.6987397421988888D+00, &
-5.12558591600033D+00, &
2.074947853793764D+00, &
-1.670886158426681D+00, &
-0.4843595000931672D+00, &
0.1393061679635559D+00, &
-0.0946527302537008D+00, &
0.1977207111754007D+00, &
1.788159919089993D+00, &
-1.077780624681256D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: phi_vec = (/ &
0.3430906586047127D+00, &
1.302990057703935D+00, &
0.6523628380743488D+00, &
0.4046022501376546D+00, &
0.06884642871852312D+00, &
0.0969609046794745D+00, &
0.630370432896175D+00, &
1.252375418911598D+00, &
1.409796082144801D+00, &
0.1485105463502483D+00, &
1.349466184634646D+00, &
0.1933711786970301D+00, &
0.4088829927466769D+00, &
0.1785430666405224D+00, &
1.292588374416351D+00, &
1.087095515757691D+00, &
1.352794600489329D+00, &
1.432530166308616D+00, &
0.2968093345769761D+00, &
0.6235880396594726D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
ek = 0.0D+00
k = 0.0D+00
phi = 0.0D+00
else
ek = ek_vec(n_data)
k = k_vec(n_data)
phi = phi_vec(n_data)
end if
return
end
function elliptic_inc_em ( phi, m )
!*****************************************************************************80
!
!! ELLIPTIC_INC_EM evaluates the incomplete elliptic integral E(PHI,M).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! E(phi,m) =
! sin ( phi ) RF ( cos^2 ( phi ), 1-m sin^2 ( phi ), 1 )
! - 1/3 m sin^3 ( phi ) RD ( cos^2 ( phi ), 1-m sin^2 ( phi ), 1 ).
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) PHI, K, the argument.
! 0 <= PHI <= PI/2.
! 0 <= M * sin^2(PHI) <= 1.
!
! Output, real ( kind = 8 ) ELLIPTIC_INC_EM, the function value.
!
implicit none
real ( kind = 8 ) cp
real ( kind = 8 ) elliptic_inc_em
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) m
real ( kind = 8 ) phi
real ( kind = 8 ), parameter :: r8_pi = 3.141592653589793D+00
real ( kind = 8 ) rd
real ( kind = 8 ) rf
real ( kind = 8 ) sp
real ( kind = 8 ) value
real ( kind = 8 ) value1
real ( kind = 8 ) value2
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
cp = cos ( phi )
sp = sin ( phi )
x = cp * cp
y = 1.0D+00 - m * sp * sp
z = 1.0D+00
errtol = 1.0D-03
value1 = rf ( x, y, z, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_EM - Fatal error!'
write ( *, '(a,i2)' ) ' RF returned IERR = ', ierr
stop 1
end if
value2 = rd ( x, y, z, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_EM - Fatal error!'
write ( *, '(a,i2)' ) ' RD returned IERR = ', ierr
stop 1
end if
value = sp * value1 - m * sp ** 3 * value2 / 3.0D+00
elliptic_inc_em = value
return
end
subroutine elliptic_inc_em_values ( n_data, phi, m, em )
!*****************************************************************************80
!
!! ELLIPTIC_INC_EM_VALUES: values of the incomplete elliptic integral E(PHI,M).
!
! Discussion:
!
! This is the incomplete elliptic integral of the second kind.
!
! E(PHI,M) = integral ( 0 <= T <= PHI )
! sqrt ( 1 - M * sin ( T )^2 ) dT
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 24 June 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! US Department of Commerce, 1964.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Wolfram Media / Cambridge University Press, 1999.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) PHI, M, the arguments.
!
! Output, real ( kind = 8 ) EM, the function value.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) em
real ( kind = 8 ) m
integer ( kind = 4 ) n_data
real ( kind = 8 ) phi
real ( kind = 8 ), save, dimension ( n_max ) :: em_vec = (/ &
0.2732317284159052D+00, &
1.124749725099781D+00, &
0.6446601913679151D+00, &
0.3968902354370061D+00, &
0.06063960799944668D+00, &
0.08909411577948728D+00, &
0.532402014802015D+00, &
1.251888640660265D+00, &
1.28897116191626D+00, &
0.1481718153599732D+00, &
1.038090185639913D+00, &
0.1931275771541276D+00, &
0.3304419611986801D+00, &
0.167394796063963D+00, &
1.214501175324736D+00, &
0.9516560179840655D+00, &
1.203682959526176D+00, &
1.206426326185419D+00, &
0.2522791382096692D+00, &
0.6026499038720986D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: m_vec = (/ &
8.450689756874594D+00, &
0.6039878267930615D+00, &
0.1794126658351454D+00, &
0.7095689301026752D+00, &
133.9643389059188D+00, &
47.96621393936416D+00, &
2.172070586163255D+00, &
0.002038130569431913D+00, &
0.3600036705339421D+00, &
0.6219544540067304D+00, &
0.8834215943508453D+00, &
0.2034290670379481D+00, &
5.772526076430922D+00, &
11.14853902343298D+00, &
0.2889238477277305D+00, &
0.7166617182589116D+00, &
0.4760623731559658D+00, &
0.6094948502068943D+00, &
8.902276887883076D+00, &
0.5434439226321253D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: phi_vec = (/ &
0.3430906586047127D+00, &
1.302990057703935D+00, &
0.6523628380743488D+00, &
0.4046022501376546D+00, &
0.06884642871852312D+00, &
0.0969609046794745D+00, &
0.630370432896175D+00, &
1.252375418911598D+00, &
1.409796082144801D+00, &
0.1485105463502483D+00, &
1.349466184634646D+00, &
0.1933711786970301D+00, &
0.4088829927466769D+00, &
0.1785430666405224D+00, &
1.292588374416351D+00, &
1.087095515757691D+00, &
1.352794600489329D+00, &
1.432530166308616D+00, &
0.2968093345769761D+00, &
0.6235880396594726D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
em = 0.0D+00
m = 0.0D+00
phi = 0.0D+00
else
em = em_vec(n_data)
m = m_vec(n_data)
phi = phi_vec(n_data)
end if
return
end
function elliptic_inc_fa ( phi, a )
!*****************************************************************************80
!
!! ELLIPTIC_INC_FA evaluates the incomplete elliptic integral F(PHI,A).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! k = sin ( a * pi / 180 )
! F(phi,k) = sin(phi) * RF ( cos^2 ( phi ), 1-k^2 sin^2 ( phi ), 1 )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 24 June 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) PHI, A, the argument.
! 0 <= PHI <= PI/2.
! 0 <= sin^2 ( A * pi / 180 ) * sin^2(PHI) <= 1.
!
! Output, real ( kind = 8 ) ELLIPTIC_INC_FA, the function value.
!
implicit none
real ( kind = 8 ) a
real ( kind = 8 ) cp
real ( kind = 8 ) elliptic_inc_fa
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) k
real ( kind = 8 ) phi
real ( kind = 8 ), parameter :: r8_pi = 3.141592653589793D+00
real ( kind = 8 ) rf
real ( kind = 8 ) sp
real ( kind = 8 ) value
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
k = sin ( a * r8_pi / 180.0D+00 )
cp = cos ( phi )
sp = sin ( phi )
x = cp * cp
y = ( 1.0D+00 - k * sp ) * ( 1.0D+00 + k * sp )
z = 1.0D+00
errtol = 1.0D-03
value = rf ( x, y, z, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_FA - Fatal error!'
write ( *, '(a,i2)' ) ' RF returned IERR = ', ierr
stop 1
end if
elliptic_inc_fa = sp * value
return
end
subroutine elliptic_inc_fa_values ( n_data, phi, a, fa )
!*****************************************************************************80
!
!! ELLIPTIC_INC_FA_VALUES: values of the incomplete elliptic integral F(PHI,A).
!
! Discussion:
!
! This is the incomplete elliptic integral of the first kind.
!
! F(PHI,A) = integral ( 0 <= T <= PHI )
! dT / sqrt ( 1 - sin^2 ( A ) * sin^2 ( T ) )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 22 June 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! US Department of Commerce, 1964.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Wolfram Media / Cambridge University Press, 1999.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) PHI, A, the arguments.
!
! Output, real ( kind = 8 ) FA, the function value.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) a
real ( kind = 8 ) fa
integer ( kind = 4 ) n_data
real ( kind = 8 ) phi
real ( kind = 8 ), save, dimension ( n_max ) :: a_vec = (/ &
123.0821233267548D+00, &
11.26931745051486D+00, &
-94.88806452075445D+00, &
-99.71407853545323D+00, &
57.05881039324191D+00, &
-19.71363287074183D+00, &
56.31230299738043D+00, &
-91.55605346417718D+00, &
-27.00654574696468D+00, &
-169.2293728595904D+00, &
61.96859564803047D+00, &
-158.7324398933148D+00, &
105.0883958999383D+00, &
-48.95883872360177D+00, &
-42.58568835110901D+00, &
11.65603284687828D+00, &
-8.398113719173338D+00, &
17.69362213019626D+00, &
73.8803420626852D+00, &
-69.82492339645128D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: fa_vec = (/ &
0.3478806460316299D+00, &
1.313180577009584D+00, &
0.7037956689264326D+00, &
0.4157626844675118D+00, &
0.06888475483285136D+00, &
0.09697816754845832D+00, &
0.6605394722518515D+00, &
1.82758346036751D+00, &
1.482258783392487D+00, &
0.1485295339221232D+00, &
1.753800062701494D+00, &
0.193528896465351D+00, &
0.4199100508706138D+00, &
0.1790836490491233D+00, &
1.446048832279763D+00, &
1.094097652100984D+00, &
1.358947908427035D+00, &
1.46400078231538D+00, &
0.3009092014525799D+00, &
0.6621341112075102D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: phi_vec = (/ &
0.3430906586047127D+00, &
1.302990057703935D+00, &
0.6523628380743488D+00, &
0.4046022501376546D+00, &
0.06884642871852312D+00, &
0.0969609046794745D+00, &
0.630370432896175D+00, &
1.252375418911598D+00, &
1.409796082144801D+00, &
0.1485105463502483D+00, &
1.349466184634646D+00, &
0.1933711786970301D+00, &
0.4088829927466769D+00, &
0.1785430666405224D+00, &
1.292588374416351D+00, &
1.087095515757691D+00, &
1.352794600489329D+00, &
1.432530166308616D+00, &
0.2968093345769761D+00, &
0.6235880396594726D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
a = 0.0D+00
fa = 0.0D+00
phi = 0.0D+00
else
a = a_vec(n_data)
fa = fa_vec(n_data)
phi = phi_vec(n_data)
end if
return
end
function elliptic_inc_fk ( phi, k )
!*****************************************************************************80
!
!! ELLIPTIC_INC_FK evaluates the incomplete elliptic integral F(PHI,K).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! F(phi,k) = sin(phi) * RF ( cos^2 ( phi ), 1-k^2 sin^2 ( phi ), 1 )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 24 June 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) PHI, K, the argument.
! 0 <= PHI <= PI/2.
! 0 <= K^2 * sin^2(PHI) <= 1.
!
! Output, real ( kind = 8 ) ELLIPTIC_INC_FK, the function value.
!
implicit none
real ( kind = 8 ) cp
real ( kind = 8 ) elliptic_inc_fk
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) k
real ( kind = 8 ) phi
real ( kind = 8 ), parameter :: r8_pi = 3.141592653589793D+00
real ( kind = 8 ) rf
real ( kind = 8 ) sp
real ( kind = 8 ) value
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
cp = cos ( phi )
sp = sin ( phi )
x = cp * cp
y = ( 1.0D+00 - k * sp ) * ( 1.0D+00 + k * sp )
z = 1.0D+00
errtol = 1.0D-03
value = rf ( x, y, z, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_FK - Fatal error!'
write ( *, '(a,i2)' ) ' RF returned IERR = ', ierr
stop 1
end if
elliptic_inc_fk = sp * value
return
end
subroutine elliptic_inc_fk_values ( n_data, phi, k, fk )
!*****************************************************************************80
!
!! ELLIPTIC_INC_FK_VALUES: values of the incomplete elliptic integral F(PHI,K).
!
! Discussion:
!
! This is the incomplete elliptic integral of the first kind.
!
! F(PHI,K) = integral ( 0 <= T <= PHI )
! dT / sqrt ( 1 - K^2 * sin ( T )^2 )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 22 June 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! US Department of Commerce, 1964.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Wolfram Media / Cambridge University Press, 1999.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) PHI, K, the arguments.
!
! Output, real ( kind = 8 ) FK, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) fk
real ( kind = 8 ) k
integer ( kind = 4 ) n_data
real ( kind = 8 ) phi
real ( kind = 8 ), save, dimension ( n_max ) :: fk_vec = (/ &
0.4340870330108736D+00, &
1.307312511398114D+00, &
0.8005154258533936D+00, &
0.4656721451084328D+00, &
0.06969849613441773D+00, &
0.09712646708750489D+00, &
0.6632598061016007D+00, &
2.2308677858579D+00, &
1.439846282888019D+00, &
0.2043389243773096D+00, &
1.537183574881771D+00, &
0.2749229901565622D+00, &
0.4828388342828284D+00, &
0.1812848567886627D+00, &
1.360729522341841D+00, &
1.09039680912027D+00, &
1.355363051581808D+00, &
1.445462819732441D+00, &
0.3125355489354676D+00, &
0.6775731623807174D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: k_vec = (/ &
2.712952582080266D+00, &
0.1279518954120547D+00, &
-1.429437513650137D+00, &
-1.981659235625333D+00, &
3.894801879555818D+00, &
-1.042486024983672D+00, &
0.8641142168759754D+00, &
-1.049058412826877D+00, &
-0.3024062128402472D+00, &
-6.574288841527263D+00, &
0.6987397421988888D+00, &
-5.12558591600033D+00, &
2.074947853793764D+00, &
-1.670886158426681D+00, &
-0.4843595000931672D+00, &
0.1393061679635559D+00, &
-0.0946527302537008D+00, &
0.1977207111754007D+00, &
1.788159919089993D+00, &
-1.077780624681256D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: phi_vec = (/ &
0.3430906586047127D+00, &
1.302990057703935D+00, &
0.6523628380743488D+00, &
0.4046022501376546D+00, &
0.06884642871852312D+00, &
0.0969609046794745D+00, &
0.630370432896175D+00, &
1.252375418911598D+00, &
1.409796082144801D+00, &
0.1485105463502483D+00, &
1.349466184634646D+00, &
0.1933711786970301D+00, &
0.4088829927466769D+00, &
0.1785430666405224D+00, &
1.292588374416351D+00, &
1.087095515757691D+00, &
1.352794600489329D+00, &
1.432530166308616D+00, &
0.2968093345769761D+00, &
0.6235880396594726D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
fk = 0.0D+00
k = 0.0D+00
phi = 0.0D+00
else
fk = fk_vec(n_data)
k = k_vec(n_data)
phi = phi_vec(n_data)
end if
return
end
function elliptic_inc_fm ( phi, m )
!*****************************************************************************80
!
!! ELLIPTIC_INC_FM evaluates the incomplete elliptic integral F(PHI,M).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! F(phi,m) = sin(phi) * RF ( cos^2 ( phi ), 1-m sin^2 ( phi ), 1 )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 24 June 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) PHI, M, the argument.
! 0 <= PHI <= PI/2.
! 0 <= M * sin^2(PHI) <= 1.
!
! Output, real ( kind = 8 ) ELLIPTIC_INC_FM, the function value.
!
implicit none
real ( kind = 8 ) cp
real ( kind = 8 ) elliptic_inc_fm
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) m
real ( kind = 8 ) phi
real ( kind = 8 ), parameter :: r8_pi = 3.141592653589793D+00
real ( kind = 8 ) rf
real ( kind = 8 ) sp
real ( kind = 8 ) value
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
cp = cos ( phi )
sp = sin ( phi )
x = cp * cp
y = 1.0D+00 - m * sp ** 2
z = 1.0D+00
errtol = 1.0D-03
value = rf ( x, y, z, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_FM - Fatal error!'
write ( *, '(a,i2)' ) ' RF returned IERR = ', ierr
stop 1
end if
elliptic_inc_fm = sp * value
return
end
subroutine elliptic_inc_fm_values ( n_data, phi, m, fm )
!*****************************************************************************80
!
!! ELLIPTIC_INC_FM_VALUES: values of the incomplete elliptic integral F(PHI,M).
!
! Discussion:
!
! This is the incomplete elliptic integral of the first kind.
!
! F(PHI,M) = integral ( 0 <= T <= PHI )
! dT / sqrt ( 1 - M * sin ( T )^2 )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 22 June 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! US Department of Commerce, 1964.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Wolfram Media / Cambridge University Press, 1999.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) PHI, M, the arguments.
!
! Output, real ( kind = 8 ) FM, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) fm
real ( kind = 8 ) m
integer ( kind = 4 ) n_data
real ( kind = 8 ) phi
real ( kind = 8 ), save, dimension ( n_max ) :: fm_vec = (/ &
0.4804314075855023D+00, &
1.535634981092025D+00, &
0.6602285297476601D+00, &
0.4125884303785135D+00, &
0.07964566007155376D+00, &
0.1062834070535258D+00, &
0.7733990864393913D+00, &
1.252862499892228D+00, &
1.549988686611532D+00, &
0.1488506735822822D+00, &
1.892229900799662D+00, &
0.1936153327753556D+00, &
0.5481932935424454D+00, &
0.1911795073571756D+00, &
1.379225069349756D+00, &
1.261282453331402D+00, &
1.535239838525378D+00, &
1.739782418156071D+00, &
0.3616930047198503D+00, &
0.6458627645916422D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: m_vec = (/ &
8.450689756874594D+00, &
0.6039878267930615D+00, &
0.1794126658351454D+00, &
0.7095689301026752D+00, &
133.9643389059188D+00, &
47.96621393936416D+00, &
2.172070586163255D+00, &
0.002038130569431913D+00, &
0.3600036705339421D+00, &
0.6219544540067304D+00, &
0.8834215943508453D+00, &
0.2034290670379481D+00, &
5.772526076430922D+00, &
11.14853902343298D+00, &
0.2889238477277305D+00, &
0.7166617182589116D+00, &
0.4760623731559658D+00, &
0.6094948502068943D+00, &
8.902276887883076D+00, &
0.5434439226321253D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: phi_vec = (/ &
0.3430906586047127D+00, &
1.302990057703935D+00, &
0.6523628380743488D+00, &
0.4046022501376546D+00, &
0.06884642871852312D+00, &
0.0969609046794745D+00, &
0.630370432896175D+00, &
1.252375418911598D+00, &
1.409796082144801D+00, &
0.1485105463502483D+00, &
1.349466184634646D+00, &
0.1933711786970301D+00, &
0.4088829927466769D+00, &
0.1785430666405224D+00, &
1.292588374416351D+00, &
1.087095515757691D+00, &
1.352794600489329D+00, &
1.432530166308616D+00, &
0.2968093345769761D+00, &
0.6235880396594726D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
fm = 0.0D+00
m = 0.0D+00
phi = 0.0D+00
else
fm = fm_vec(n_data)
m = m_vec(n_data)
phi = phi_vec(n_data)
end if
return
end
function elliptic_inc_pia ( phi, n, a )
!*****************************************************************************80
!
!! ELLIPTIC_INC_PIA evaluates the incomplete elliptic integral Pi(PHI,N,A).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! Pi(PHI,N,A) = integral ( 0 <= T <= PHI )
! dT / (1 - N sin^2(T) ) sqrt ( 1 - sin^2(A*pi/180) * sin ( T )^2 )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 24 June 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) PHI, N, A, the arguments.
!
! Output, real ( kind = 8 ) ELLIPTIC_INC_PIA, the function value.
!
implicit none
real ( kind = 8 ) a
real ( kind = 8 ) cp
real ( kind = 8 ) elliptic_inc_pia
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) k
real ( kind = 8 ) n
real ( kind = 8 ) p
real ( kind = 8 ) phi
real ( kind = 8 ), parameter :: r8_pi = 3.141592653589793D+00
real ( kind = 8 ) rf
real ( kind = 8 ) rj
real ( kind = 8 ) sp
real ( kind = 8 ) value
real ( kind = 8 ) value1
real ( kind = 8 ) value2
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
k = sin ( a * r8_pi / 180.0D+00 )
cp = cos ( phi )
sp = sin ( phi )
x = cp * cp
y = ( 1.0D+00 - k * sp ) * ( 1.0D+00 + k * sp )
z = 1.0D+00
p = 1.0D+00 - n * sp ** 2
errtol = 1.0D-03
value1 = rf ( x, y, z, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_PIA - Fatal error!'
write ( *, '(a,i2)' ) ' RF returned IERR = ', ierr
stop 1
end if
value2 = rj ( x, y, z, p, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_PIA - Fatal error!'
write ( *, '(a,i2)' ) ' RJ returned IERR = ', ierr
stop 1
end if
value = sp * value1 + n * sp ** 3 * value2 / 3.0D+00
elliptic_inc_pia = value
return
end
subroutine elliptic_inc_pia_values ( n_data, phi, n, a, pia )
!*****************************************************************************80
!
!! ELLIPTIC_INC_PIA_VALUES: values of incomplete elliptic integral Pi(PHI,N,A).
!
! Discussion:
!
! This is the incomplete elliptic integral of the third kind.
!
! Pi(PHI,N,A) = integral ( 0 <= T <= PHI )
! dT / (1 - N sin^2(T) ) sqrt ( 1 - sin^2(A) * sin ( T )^2 )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 22 June 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! US Department of Commerce, 1964.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Wolfram Media / Cambridge University Press, 1999.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) PHI, N, A, the arguments of the function.
!
! Output, real ( kind = 8 ) PIA, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) a
real ( kind = 8 ) n
integer ( kind = 4 ) n_data
real ( kind = 8 ) phi
real ( kind = 8 ) pia
real ( kind = 8 ), save, dimension ( n_max ) :: a_vec = (/ &
88.87822485052908D+00, &
-86.55208740039521D+00, &
-116.6195703112117D+00, &
-9.742878017582015D+00, &
65.73480919446207D+00, &
-115.0387719677141D+00, &
124.9421177735846D+00, &
-89.78704401263703D+00, &
-98.42673771271734D+00, &
-53.74936192418378D+00, &
68.28047574440727D+00, &
20.82174673810708D+00, &
-29.1042364797769D+00, &
-37.80176710944693D+00, &
-55.81173355852393D+00, &
-37.66594589748672D+00, &
-80.09408170610219D+00, &
52.23806528467412D+00, &
74.30945212430545D+00, &
-17.22920703094039D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: n_vec = (/ &
8.064681366127422D+00, &
-0.2840588974558835D+00, &
-5.034023488967104D+00, &
-1.244606253942751D+00, &
1.465981775919188D+00, &
95338.12857321106D+00, &
-44.43130633436311D+00, &
-0.8029374966926196D+00, &
5.218883222649502D+00, &
2.345821782626782D+00, &
0.157358332363011D+00, &
1.926593468907062D+00, &
6.113982855261652D+00, &
1.805710621498681D+00, &
-0.4072847419780592D+00, &
-0.9416404038595624D+00, &
0.7009655305226739D+00, &
-1.019830985340273D+00, &
-0.4510798219577842D+00, &
0.6028821390092596D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: phi_vec = (/ &
0.3430906586047127D+00, &
0.8823091382756705D+00, &
0.4046022501376546D+00, &
0.9958310121985398D+00, &
0.630370432896175D+00, &
0.002887706662908567D+00, &
0.1485105463502483D+00, &
1.320800086884777D+00, &
0.4088829927466769D+00, &
0.552337007372852D+00, &
1.087095515757691D+00, &
0.7128175949111615D+00, &
0.2968093345769761D+00, &
0.2910907344062498D+00, &
0.9695030752034163D+00, &
1.122288759723523D+00, &
1.295911610809573D+00, &
1.116491437736542D+00, &
1.170719322533712D+00, &
1.199360682338851D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: pia_vec = (/ &
0.7099335174334724D+00, &
0.9601963779142505D+00, &
0.3362852532098376D+00, &
0.7785343427543768D+00, &
0.857889755214478D+00, &
0.004630772344931844D+00, &
0.1173842687902911D+00, &
1.505788070660267D+00, &
0.7213264194624553D+00, &
0.8073261799642218D+00, &
1.402853811110838D+00, &
1.259245331474513D+00, &
0.3779079263971614D+00, &
0.3088493910496766D+00, &
0.9782829177005183D+00, &
0.9430491574504173D+00, &
3.320796277384155D+00, &
0.9730988737054799D+00, &
1.301988094953789D+00, &
1.64558360445259D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
a = 0.0D+00
n = 0.0D+00
phi = 0.0D+00
pia = 0.0D+00
else
a = a_vec(n_data)
n = n_vec(n_data)
phi = phi_vec(n_data)
pia = pia_vec(n_data)
end if
return
end
function elliptic_inc_pik ( phi, n, k )
!*****************************************************************************80
!
!! ELLIPTIC_INC_PIK evaluates the incomplete elliptic integral Pi(PHI,N,K).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! Pi(PHI,N,K) = integral ( 0 <= T <= PHI )
! dT / (1 - N sin^2(T) ) sqrt ( 1 - k^2 * sin ( T )^2 )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 24 June 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) PHI, N, K, the arguments.
!
! Output, real ( kind = 8 ) ELLIPTIC_INC_PIK, the function value.
!
implicit none
real ( kind = 8 ) cp
real ( kind = 8 ) elliptic_inc_pik
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) k
real ( kind = 8 ) n
real ( kind = 8 ) p
real ( kind = 8 ) phi
real ( kind = 8 ), parameter :: r8_pi = 3.141592653589793D+00
real ( kind = 8 ) rf
real ( kind = 8 ) rj
real ( kind = 8 ) sp
real ( kind = 8 ) value
real ( kind = 8 ) value1
real ( kind = 8 ) value2
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
cp = cos ( phi )
sp = sin ( phi )
x = cp * cp
y = ( 1.0D+00 - k * sp ) * ( 1.0D+00 + k * sp )
z = 1.0D+00
p = 1.0D+00 - n * sp ** 2
errtol = 1.0D-03
value1 = rf ( x, y, z, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_PIK - Fatal error!'
write ( *, '(a,i2)' ) ' RF returned IERR = ', ierr
stop 1
end if
value2 = rj ( x, y, z, p, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_PIK - Fatal error!'
write ( *, '(a,i2)' ) ' RJ returned IERR = ', ierr
stop 1
end if
value = sp * value1 + n * sp ** 3 * value2 / 3.0D+00
elliptic_inc_pik = value
return
end
subroutine elliptic_inc_pik_values ( n_data, phi, n, k, pik )
!*****************************************************************************80
!
!! ELLIPTIC_INC_PIK_VALUES: values of incomplete elliptic integral Pi(PHI,N,K).
!
! Discussion:
!
! This is the incomplete elliptic integral of the third kind.
!
! Pi(PHI,N,K) = integral ( 0 <= T <= PHI )
! dT / (1 - N sin^2(T) ) sqrt ( 1 - K^2 * sin ( T )^2 )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 23 June 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! US Department of Commerce, 1964.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Wolfram Media / Cambridge University Press, 1999.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) PHI, N, K, the arguments of the function.
!
! Output, real ( kind = 8 ) PIK, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) k
real ( kind = 8 ) n
integer ( kind = 4 ) n_data
real ( kind = 8 ) phi
real ( kind = 8 ) pik
real ( kind = 8 ), save, dimension ( n_max ) :: k_vec = (/ &
1.959036804709882D+00, &
-1.123741823223131D+00, &
-2.317629084640271D+00, &
-0.1202582658444815D+00, &
1.008702896970963D+00, &
-103.3677494756118D+00, &
4.853800240677973D+00, &
-1.016577251056124D+00, &
-1.94341484065839D+00, &
-0.8876593284500023D+00, &
0.8160487832898813D+00, &
0.2994546721661018D+00, &
-0.7044232294525243D+00, &
-0.9266523277404759D+00, &
-0.6962608926846425D+00, &
-0.4453932031991797D+00, &
-0.9104582513322106D+00, &
0.6187501419936026D+00, &
0.8672305032589989D+00, &
-0.1996772638241632D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: n_vec = (/ &
8.064681366127422D+00, &
-0.2840588974558835D+00, &
-5.034023488967104D+00, &
-1.244606253942751D+00, &
1.465981775919188D+00, &
95338.12857321106D+00, &
-44.43130633436311D+00, &
-0.8029374966926196D+00, &
5.218883222649502D+00, &
2.345821782626782D+00, &
0.157358332363011D+00, &
1.926593468907062D+00, &
6.113982855261652D+00, &
1.805710621498681D+00, &
-0.4072847419780592D+00, &
-0.9416404038595624D+00, &
0.7009655305226739D+00, &
-1.019830985340273D+00, &
-0.4510798219577842D+00, &
0.6028821390092596D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: phi_vec = (/ &
0.3430906586047127D+00, &
0.8823091382756705D+00, &
0.4046022501376546D+00, &
0.9958310121985398D+00, &
0.630370432896175D+00, &
0.002887706662908567D+00, &
0.1485105463502483D+00, &
1.320800086884777D+00, &
0.4088829927466769D+00, &
0.552337007372852D+00, &
1.087095515757691D+00, &
0.7128175949111615D+00, &
0.2968093345769761D+00, &
0.2910907344062498D+00, &
0.9695030752034163D+00, &
1.122288759723523D+00, &
1.295911610809573D+00, &
1.116491437736542D+00, &
1.170719322533712D+00, &
1.199360682338851D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: pik_vec = (/ &
0.7982975462595892D+00, &
1.024022134726036D+00, &
0.40158120852642D+00, &
0.7772649487439858D+00, &
0.8737159913132074D+00, &
0.004733334297691273D+00, &
0.1280656893638068D+00, &
1.594376037512564D+00, &
0.8521145133671923D+00, &
0.8154325229803082D+00, &
1.31594514075427D+00, &
1.25394623148424D+00, &
0.3796503567258643D+00, &
0.3111034454739552D+00, &
0.9442477901112342D+00, &
0.9153111661980959D+00, &
2.842080644328393D+00, &
0.9263253777034376D+00, &
1.212396018757624D+00, &
1.628083572710471D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
k = 0.0D+00
n = 0.0D+00
phi = 0.0D+00
pik = 0.0D+00
else
k = k_vec(n_data)
n = n_vec(n_data)
phi = phi_vec(n_data)
pik = pik_vec(n_data)
end if
return
end
function elliptic_inc_pim ( phi, n, m )
!*****************************************************************************80
!
!! ELLIPTIC_INC_PIM evaluates the incomplete elliptic integral Pi(PHI,N,M).
!
! Discussion:
!
! The value is computed using Carlson elliptic integrals:
!
! Pi(PHI,N,M) = integral ( 0 <= T <= PHI )
! dT / (1 - N sin^2(T) ) sqrt ( 1 - m * sin ( T )^2 )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 24 June 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) PHI, N, M, the arguments.
!
! Output, real ( kind = 8 ) ELLIPTIC_INC_PIM, the function value.
!
implicit none
real ( kind = 8 ) cp
real ( kind = 8 ) elliptic_inc_pim
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) m
real ( kind = 8 ) n
real ( kind = 8 ) p
real ( kind = 8 ) phi
real ( kind = 8 ), parameter :: r8_pi = 3.141592653589793D+00
real ( kind = 8 ) rf
real ( kind = 8 ) rj
real ( kind = 8 ) sp
real ( kind = 8 ) value
real ( kind = 8 ) value1
real ( kind = 8 ) value2
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
cp = cos ( phi )
sp = sin ( phi )
x = cp * cp
y = 1.0D+00 - m * sp ** 2
z = 1.0D+00
p = 1.0D+00 - n * sp ** 2
errtol = 1.0D-03
value1 = rf ( x, y, z, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_PIM - Fatal error!'
write ( *, '(a,i2)' ) ' RF returned IERR = ', ierr
stop 1
end if
value2 = rj ( x, y, z, p, errtol, ierr )
if ( ierr /= 0 ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'ELLIPTIC_INC_PIM - Fatal error!'
write ( *, '(a,i2)' ) ' RJ returned IERR = ', ierr
stop 1
end if
value = sp * value1 + n * sp ** 3 * value2 / 3.0D+00
elliptic_inc_pim = value
return
end
subroutine elliptic_inc_pim_values ( n_data, phi, n, m, pim )
!*****************************************************************************80
!
!! ELLIPTIC_INC_PIM_VALUES: values of incomplete elliptic integral Pi(PHI,N,M).
!
! Discussion:
!
! This is the incomplete elliptic integral of the third kind.
!
! Pi(PHI,N,M) = integral ( 0 <= T <= PHI )
! dT / (1 - N sin^2(T) ) sqrt ( 1 - M * sin ( T )^2 )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 24 June 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! US Department of Commerce, 1964.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Wolfram Media / Cambridge University Press, 1999.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) PHI, N, M, the arguments of the function.
!
! Output, real ( kind = 8 ) PIM, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) m
real ( kind = 8 ) n
integer ( kind = 4 ) n_data
real ( kind = 8 ) phi
real ( kind = 8 ) pim
real ( kind = 8 ), save, dimension ( n_max ) :: m_vec = (/ &
7.330122710928245D+00, &
0.1108806690614566D+00, &
0.2828355944410993D+00, &
0.6382999794812498D+00, &
2.294718938593894D+00, &
42062.55329826538D+00, &
39.2394337789563D+00, &
0.008002151065098688D+00, &
0.7190579590867517D+00, &
0.9703767630929055D+00, &
1.098881295982823D+00, &
1.398066725917478D+00, &
4.641021931654496D+00, &
4.455969064311461D+00, &
0.3131448239736511D+00, &
0.3686443684703166D+00, &
0.06678210908100803D+00, &
0.9635538974026796D+00, &
1.060208762696207D+00, &
0.4687160847955397D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: n_vec = (/ &
8.064681366127422D+00, &
-0.2840588974558835D+00, &
-5.034023488967104D+00, &
-1.244606253942751D+00, &
1.465981775919188D+00, &
95338.12857321106D+00, &
-44.43130633436311D+00, &
-0.8029374966926196D+00, &
5.218883222649502D+00, &
2.345821782626782D+00, &
0.157358332363011D+00, &
1.926593468907062D+00, &
6.113982855261652D+00, &
1.805710621498681D+00, &
-0.4072847419780592D+00, &
-0.9416404038595624D+00, &
0.7009655305226739D+00, &
-1.019830985340273D+00, &
-0.4510798219577842D+00, &
0.6028821390092596D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: phi_vec = (/ &
0.3430906586047127D+00, &
0.8823091382756705D+00, &
0.4046022501376546D+00, &
0.9958310121985398D+00, &
0.630370432896175D+00, &
0.002887706662908567D+00, &
0.1485105463502483D+00, &
1.320800086884777D+00, &
0.4088829927466769D+00, &
0.552337007372852D+00, &
1.087095515757691D+00, &
0.7128175949111615D+00, &
0.2968093345769761D+00, &
0.2910907344062498D+00, &
0.9695030752034163D+00, &
1.122288759723523D+00, &
1.295911610809573D+00, &
1.116491437736542D+00, &
1.170719322533712D+00, &
1.199360682338851D+00 /)
real ( kind = 8 ), save, dimension ( n_max ) :: pim_vec = (/ &
1.0469349800785D+00, &
0.842114448140669D+00, &
0.3321642201520043D+00, &
0.8483033529960849D+00, &
1.055753817656772D+00, &
0.005108896144265593D+00, &
0.1426848042785896D+00, &
1.031350958206424D+00, &
0.7131013701418496D+00, &
0.8268044665355507D+00, &
1.57632867896015D+00, &
1.542817120857211D+00, &
0.4144629799126912D+00, &
0.3313231611366746D+00, &
0.9195822851915201D+00, &
0.9422320754002217D+00, &
2.036599002815859D+00, &
1.076799231499882D+00, &
1.416084462957852D+00, &
1.824124922310891D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
m = 0.0D+00
n = 0.0D+00
phi = 0.0D+00
pim = 0.0D+00
else
m = m_vec(n_data)
n = n_vec(n_data)
phi = phi_vec(n_data)
pim = pim_vec(n_data)
end if
return
end
function elliptic_pia ( n, a )
!*****************************************************************************80
!
!! ELLIPTIC_PIA evaluates the complete elliptic integral Pi(N,A).
!
! Discussion:
!
! This is one form of what is sometimes called the complete elliptic
! integral of the third kind.
!
! The function is defined by the formula:
!
! Pi(N,A) = integral ( 0 <= T <= PI/2 )
! dT / (1 - N sin^2(T) ) sqrt ( 1 - sin^2(A) * sin ( T )^2 )
!
! In MATLAB, the function can be evaluated by:
!
! ellipticPi(n,(sin(a*pi/180)^2)
!
! The value is computed using Carlson elliptic integrals:
!
! k = sin ( a * pi / 180 )
! Pi(n,k) = RF ( 0, 1 - k^2, 1 ) + 1/3 n RJ ( 0, 1 - k^2, 1, 1 - n )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) N, A, the arguments.
!
! Output, real ( kind = 8 ) ELLIPTIC_PIA, the function value.
!
implicit none
real ( kind = 8 ) a
real ( kind = 8 ) elliptic_pia
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) k
real ( kind = 8 ) n
real ( kind = 8 ) p
real ( kind = 8 ), parameter :: r8_pi = 3.141592653589793D+00
real ( kind = 8 ) rf
real ( kind = 8 ) rj
real ( kind = 8 ) value
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
k = sin ( a * r8_pi / 180.0D+00 )
x = 0.0D+00
y = ( 1.0D+00 - k ) * ( 1.0D+00 + k )
z = 1.0D+00
p = 1.0D+00 - n
errtol = 1.0D-03
value = rf ( x, y, z, errtol, ierr ) &
+ n * rj ( x, y, z, p, errtol, ierr ) / 3.0D+00
elliptic_pia = value
return
end
subroutine elliptic_pia_values ( n_data, n, a, pia )
!*****************************************************************************80
!
!! ELLIPTIC_PIA_VALUES returns values of the complete elliptic integral Pi(N,A).
!
! Discussion:
!
! This is one form of what is sometimes called the complete elliptic
! integral of the third kind.
!
! The function is defined by the formula:
!
! Pi(N,A) = integral ( 0 <= T <= PI/2 )
! dT / (1 - N sin^2(T) ) sqrt ( 1 - sin^2(A) * sin ( T )^2 )
!
! In MATLAB, the function can be evaluated by:
!
! ellipticPi(n,(sin(A*pi/180))^2)
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! National Bureau of Standards, 1964,
! ISBN: 0-486-61272-4,
! LC: QA47.A34.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Cambridge University Press, 1999,
! ISBN: 0-521-64314-7,
! LC: QA76.95.W65.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data; when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) N, A, the arguments of the function.
!
! Output, real ( kind = 8 ) PIA, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) a
real ( kind = 8 ), save, dimension ( n_max ) :: a_vec = (/ &
30.00000000000000D+00, &
45.00000000000000D+00, &
60.00000000000000D+00, &
77.07903361841643D+00, &
30.00000000000000D+00, &
45.00000000000000D+00, &
60.00000000000000D+00, &
77.07903361841643D+00, &
30.00000000000000D+00, &
45.00000000000000D+00, &
60.00000000000000D+00, &
77.07903361841643D+00, &
30.00000000000000D+00, &
45.00000000000000D+00, &
60.00000000000000D+00, &
77.07903361841643D+00, &
30.00000000000000D+00, &
45.00000000000000D+00, &
60.00000000000000D+00, &
77.07903361841643D+00 /)
real ( kind = 8 ) n
integer ( kind = 4 ) n_data
real ( kind = 8 ), save, dimension ( n_max ) :: n_vec = (/ &
-10.0D+00, &
-10.0D+00, &
-10.0D+00, &
-10.0D+00, &
-3.0D+00, &
-3.0D+00, &
-3.0D+00, &
-3.0D+00, &
-1.0D+00, &
-1.0D+00, &
-1.0D+00, &
-1.0D+00, &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00 /)
real ( kind = 8 ) pia
real ( kind = 8 ), save, dimension ( n_max ) :: pia_vec = (/ &
0.4892245275965397D+00, &
0.5106765677902629D+00, &
0.5460409271920561D+00, &
0.6237325893535237D+00, &
0.823045542660675D+00, &
0.8760028274011437D+00, &
0.9660073560143946D+00, &
1.171952391481798D+00, &
1.177446843000566D+00, &
1.273127366749682D+00, &
1.440034318657551D+00, &
1.836472172302591D+00, &
1.685750354812596D+00, &
1.854074677301372D+00, &
2.156515647499643D+00, &
2.908337248444552D+00, &
2.413671504201195D+00, &
2.701287762095351D+00, &
3.234773471249465D+00, &
4.633308147279891D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
a = 0.0D+00
n = 0.0D+00
pia = 0.0D+00
else
a = a_vec(n_data)
n = n_vec(n_data)
pia = pia_vec(n_data)
end if
return
end
function elliptic_pik ( n, k )
!*****************************************************************************80
!
!! ELLIPTIC_PIK evaluates the complete elliptic integral Pi(N,K).
!
! Discussion:
!
! This is one form of what is sometimes called the complete elliptic
! integral of the third kind.
!
! The function is defined by the formula:
!
! Pi(N,K) = integral ( 0 <= T <= PI/2 )
! dT / (1 - N sin^2(T) ) sqrt ( 1 - K^2 * sin ( T )^2 )
!
! In MATLAB, the function can be evaluated by:
!
! ellipticPi(n,k^2)
!
! The value is computed using Carlson elliptic integrals:
!
! Pi(n,k) = RF ( 0, 1 - k^2, 1 ) + 1/3 n RJ ( 0, 1 - k^2, 1, 1 - n )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) N, K, the arguments.
!
! Output, real ( kind = 8 ) ELLIPTIC_PIK, the function value.
!
implicit none
real ( kind = 8 ) elliptic_pik
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) k
real ( kind = 8 ) n
real ( kind = 8 ) p
real ( kind = 8 ) rf
real ( kind = 8 ) rj
real ( kind = 8 ) value
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
x = 0.0D+00
y = ( 1.0D+00 - k ) * ( 1.0D+00 + k )
z = 1.0D+00
p = 1.0D+00 - n
errtol = 1.0D-03
value = rf ( x, y, z, errtol, ierr ) &
+ n * rj ( x, y, z, p, errtol, ierr ) / 3.0D+00
elliptic_pik = value
return
end
subroutine elliptic_pik_values ( n_data, n, k, pik )
!*****************************************************************************80
!
!! ELLIPTIC_PIK_VALUES returns values of the complete elliptic integral Pi(N,K).
!
! Discussion:
!
! This is one form of what is sometimes called the complete elliptic
! integral of the third kind.
!
! The function is defined by the formula:
!
! Pi(N,K) = integral ( 0 <= T <= PI/2 )
! dT / (1 - N sin^2(T) ) sqrt ( 1 - K^2 * sin ( T )^2 )
!
! In MATLAB, the function can be evaluated by:
!
! ellipticPi(n,k^2)
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! National Bureau of Standards, 1964,
! ISBN: 0-486-61272-4,
! LC: QA47.A34.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Cambridge University Press, 1999,
! ISBN: 0-521-64314-7,
! LC: QA76.95.W65.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data; when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) N, K, the arguments of the function.
!
! Output, real ( kind = 8 ) PIK, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) k
real ( kind = 8 ), save, dimension ( n_max ) :: k_vec = (/ &
0.5000000000000000D+00, &
0.7071067811865476D+00, &
0.8660254037844386D+00, &
0.9746794344808963D+00, &
0.5000000000000000D+00, &
0.7071067811865476D+00, &
0.8660254037844386D+00, &
0.9746794344808963D+00, &
0.5000000000000000D+00, &
0.7071067811865476D+00, &
0.8660254037844386D+00, &
0.9746794344808963D+00, &
0.5000000000000000D+00, &
0.7071067811865476D+00, &
0.8660254037844386D+00, &
0.9746794344808963D+00, &
0.5000000000000000D+00, &
0.7071067811865476D+00, &
0.8660254037844386D+00, &
0.9746794344808963D+00 /)
real ( kind = 8 ) n
integer ( kind = 4 ) n_data
real ( kind = 8 ), save, dimension ( n_max ) :: n_vec = (/ &
-10.0D+00, &
-10.0D+00, &
-10.0D+00, &
-10.0D+00, &
-3.0D+00, &
-3.0D+00, &
-3.0D+00, &
-3.0D+00, &
-1.0D+00, &
-1.0D+00, &
-1.0D+00, &
-1.0D+00, &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00 /)
real ( kind = 8 ) pik
real ( kind = 8 ), save, dimension ( n_max ) :: pik_vec = (/ &
0.4892245275965397D+00, &
0.5106765677902629D+00, &
0.5460409271920561D+00, &
0.6237325893535237D+00, &
0.823045542660675D+00, &
0.8760028274011437D+00, &
0.9660073560143946D+00, &
1.171952391481798D+00, &
1.177446843000566D+00, &
1.273127366749682D+00, &
1.440034318657551D+00, &
1.836472172302591D+00, &
1.685750354812596D+00, &
1.854074677301372D+00, &
2.156515647499643D+00, &
2.908337248444552D+00, &
2.413671504201195D+00, &
2.701287762095351D+00, &
3.234773471249465D+00, &
4.633308147279891D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
k = 0.0D+00
n = 0.0D+00
pik = 0.0D+00
else
k = k_vec(n_data)
n = n_vec(n_data)
pik = pik_vec(n_data)
end if
return
end
function elliptic_pim ( n, m )
!*****************************************************************************80
!
!! ELLIPTIC_PIM evaluates the complete elliptic integral Pi(N,M).
!
! Discussion:
!
! This is one form of what is sometimes called the complete elliptic
! integral of the third kind.
!
! The function is defined by the formula:
!
! Pi(N,M) = integral ( 0 <= T <= PI/2 )
! dT / (1 - N sin^2(T) ) sqrt ( 1 - M * sin ( T )^2 )
!
! In MATLAB, the function can be evaluated by:
!
! ellipticPi(n,m)
!
! The value is computed using Carlson elliptic integrals:
!
! Pi(n,m) = RF ( 0, 1 - m, 1 ) + 1/3 n RJ ( 0, 1 - m, 1, 1 - n )
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! Input, real ( kind = 8 ) N, M, the arguments.
!
! Output, real ( kind = 8 ) ELLIPTIC_PIM, the function value.
!
implicit none
real ( kind = 8 ) elliptic_pim
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) m
real ( kind = 8 ) n
real ( kind = 8 ) p
real ( kind = 8 ) rf
real ( kind = 8 ) rj
real ( kind = 8 ) value
real ( kind = 8 ) x
real ( kind = 8 ) y
real ( kind = 8 ) z
x = 0.0D+00
y = 1.0D+00 - m
z = 1.0D+00
p = 1.0D+00 - n
errtol = 1.0D-03
value = rf ( x, y, z, errtol, ierr ) &
+ n * rj ( x, y, z, p, errtol, ierr ) / 3.0D+00
elliptic_pim = value
return
end
subroutine elliptic_pim_values ( n_data, n, m, pim )
!*****************************************************************************80
!
!! ELLIPTIC_PIM_VALUES returns values of the complete elliptic integral Pi(N,M).
!
! Discussion:
!
! This is one form of what is sometimes called the complete elliptic
! integral of the third kind.
!
! The function is defined by the formula:
!
! Pi(N,M) = integral ( 0 <= T <= PI/2 )
! dT / (1 - N sin^2(T) ) sqrt ( 1 - M * sin ( T )^2 )
!
! In MATLAB, the function can be evaluated by:
!
! ellipticPi(n,m)
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! National Bureau of Standards, 1964,
! ISBN: 0-486-61272-4,
! LC: QA47.A34.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Cambridge University Press, 1999,
! ISBN: 0-521-64314-7,
! LC: QA76.95.W65.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data; when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) N, M, the arguments of the function.
!
! Output, real ( kind = 8 ) PIM, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) m
real ( kind = 8 ), save, dimension ( n_max ) :: m_vec = (/ &
0.25D+00, &
0.50D+00, &
0.75D+00, &
0.95D+00, &
0.25D+00, &
0.50D+00, &
0.75D+00, &
0.95D+00, &
0.25D+00, &
0.50D+00, &
0.75D+00, &
0.95D+00, &
0.25D+00, &
0.50D+00, &
0.75D+00, &
0.95D+00, &
0.25D+00, &
0.50D+00, &
0.75D+00, &
0.95D+00 /)
real ( kind = 8 ) n
integer ( kind = 4 ) n_data
real ( kind = 8 ), save, dimension ( n_max ) :: n_vec = (/ &
-10.0D+00, &
-10.0D+00, &
-10.0D+00, &
-10.0D+00, &
-3.0D+00, &
-3.0D+00, &
-3.0D+00, &
-3.0D+00, &
-1.0D+00, &
-1.0D+00, &
-1.0D+00, &
-1.0D+00, &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00 /)
real ( kind = 8 ) pim
real ( kind = 8 ), save, dimension ( n_max ) :: pim_vec = (/ &
0.4892245275965397D+00, &
0.5106765677902629D+00, &
0.5460409271920561D+00, &
0.6237325893535237D+00, &
0.823045542660675D+00, &
0.8760028274011437D+00, &
0.9660073560143946D+00, &
1.171952391481798D+00, &
1.177446843000566D+00, &
1.273127366749682D+00, &
1.440034318657551D+00, &
1.836472172302591D+00, &
1.685750354812596D+00, &
1.854074677301372D+00, &
2.156515647499643D+00, &
2.908337248444552D+00, &
2.413671504201195D+00, &
2.701287762095351D+00, &
3.234773471249465D+00, &
4.633308147279891D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
m = 0.0D+00
n = 0.0D+00
pim = 0.0D+00
else
m = m_vec(n_data)
n = n_vec(n_data)
pim = pim_vec(n_data)
end if
return
end
function jacobi_cn ( u, m )
!*****************************************************************************80
!
!! JACOBI_CN evaluates the Jacobi elliptic function CN(U,M).
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 25 June 2018
!
! Author:
!
! Original ALGOL version by Roland Bulirsch.
! FORTRAN90 version by John Burkardt
!
! Reference:
!
! Roland Bulirsch,
! Numerical calculation of elliptic integrals and elliptic functions,
! Numerische Mathematik,
! Volume 7, Number 1, 1965, pages 78-90.
!
! Parameters:
!
! Input, real ( kind = 8 ) U, M, the arguments.
!
! Output, real ( kind = 8 ) JACOBI_CN, the function value.
!
implicit none
real ( kind = 8 ) cn
real ( kind = 8 ) dn
real ( kind = 8 ) jacobi_cn
real ( kind = 8 ) m
real ( kind = 8 ) sn
real ( kind = 8 ) u
call sncndn ( u, m, sn, cn, dn )
jacobi_cn = cn
return
end
subroutine jacobi_cn_values ( n_data, u, m, cn )
!*****************************************************************************80
!
!! JACOBI_CN_VALUES returns some values of the Jacobi elliptic function CN(U,M).
!
! Discussion:
!
! In Mathematica, the function can be evaluated by:
!
! JacobiCN[ u, m ]
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 25 June 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! National Bureau of Standards, 1964,
! ISBN: 0-486-61272-4,
! LC: QA47.A34.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Cambridge University Press, 1999,
! ISBN: 0-521-64314-7,
! LC: QA76.95.W65.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data; when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) U, the argument of the function.
!
! Output, real ( kind = 8 ) M, the parameter of the function.
!
! Output, real ( kind = 8 ) CN, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) m
real ( kind = 8 ), save, dimension ( n_max ) :: m_vec = (/ &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00 /)
real ( kind = 8 ) cn
real ( kind = 8 ), save, dimension ( n_max ) :: cn_vec = (/ &
0.9950041652780258D+00, &
0.9800665778412416D+00, &
0.8775825618903727D+00, &
0.5403023058681397D+00, &
-0.4161468365471424D+00, &
0.9950124626090582D+00, &
0.9801976276784098D+00, &
0.8822663948904403D+00, &
0.5959765676721407D+00, &
-0.1031836155277618D+00, &
0.9950207489532265D+00, &
0.9803279976447253D+00, &
0.8868188839700739D+00, &
0.6480542736638854D+00, &
0.2658022288340797D+00, &
0.3661899347368653D-01, &
0.9803279976447253D+00, &
0.8868188839700739D+00, &
0.6480542736638854D+00, &
0.2658022288340797D+00 /)
integer ( kind = 4 ) n_data
real ( kind = 8 ) u
real ( kind = 8 ), save, dimension ( n_max ) :: u_vec = (/ &
0.1D+00, &
0.2D+00, &
0.5D+00, &
1.0D+00, &
2.0D+00, &
0.1D+00, &
0.2D+00, &
0.5D+00, &
1.0D+00, &
2.0D+00, &
0.1D+00, &
0.2D+00, &
0.5D+00, &
1.0D+00, &
2.0D+00, &
4.0D+00, &
-0.2D+00, &
-0.5D+00, &
-1.0D+00, &
-2.0D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
m = 0.0D+00
u = 0.0D+00
cn = 0.0D+00
else
m = m_vec(n_data)
u = u_vec(n_data)
cn = cn_vec(n_data)
end if
return
end
function jacobi_dn ( u, m )
!*****************************************************************************80
!
!! JACOBI_DN evaluates the Jacobi elliptic function DN(U,M).
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 25 June 2018
!
! Author:
!
! Original ALGOL version by Roland Bulirsch.
! FORTRAN90 version by John Burkardt
!
! Reference:
!
! Roland Bulirsch,
! Numerical calculation of elliptic integrals and elliptic functions,
! Numerische Mathematik,
! Volume 7, Number 1, 1965, pages 78-90.
!
! Parameters:
!
! Input, real ( kind = 8 ) U, M, the arguments.
!
! Output, real ( kind = 8 ) JACOBI_DN, the function value.
!
implicit none
real ( kind = 8 ) cn
real ( kind = 8 ) dn
real ( kind = 8 ) jacobi_dn
real ( kind = 8 ) m
real ( kind = 8 ) sn
real ( kind = 8 ) u
call sncndn ( u, m, sn, cn, dn )
jacobi_dn = dn
return
end
subroutine jacobi_dn_values ( n_data, u, m, dn )
!*****************************************************************************80
!
!! JACOBI_DN_VALUES returns some values of the Jacobi elliptic function DN(U,M).
!
! Discussion:
!
! In Mathematica, the function can be evaluated by:
!
! JacobiDN[ u, m ]
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 25 June 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! National Bureau of Standards, 1964,
! ISBN: 0-486-61272-4,
! LC: QA47.A34.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Cambridge University Press, 1999,
! ISBN: 0-521-64314-7,
! LC: QA76.95.W65.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data; when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) U, the argument of the function.
!
! Output, real ( kind = 8 ) M, the parameter of the function.
!
! Output, real ( kind = 8 ) DN, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) m
real ( kind = 8 ), save, dimension ( n_max ) :: m_vec = (/ &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00 /)
real ( kind = 8 ) dn
real ( kind = 8 ), save, dimension ( n_max ) :: dn_vec = (/ &
0.1000000000000000D+01, &
0.1000000000000000D+01, &
0.1000000000000000D+01, &
0.1000000000000000D+01, &
0.1000000000000000D+01, &
0.9975093485144243D+00, &
0.9901483195224800D+00, &
0.9429724257773857D+00, &
0.8231610016315963D+00, &
0.7108610477840873D+00, &
0.9950207489532265D+00, &
0.9803279976447253D+00, &
0.8868188839700739D+00, &
0.6480542736638854D+00, &
0.2658022288340797D+00, &
0.3661899347368653D-01, &
0.9803279976447253D+00, &
0.8868188839700739D+00, &
0.6480542736638854D+00, &
0.2658022288340797D+00 /)
integer ( kind = 4 ) n_data
real ( kind = 8 ) u
real ( kind = 8 ), save, dimension ( n_max ) :: u_vec = (/ &
0.1D+00, &
0.2D+00, &
0.5D+00, &
1.0D+00, &
2.0D+00, &
0.1D+00, &
0.2D+00, &
0.5D+00, &
1.0D+00, &
2.0D+00, &
0.1D+00, &
0.2D+00, &
0.5D+00, &
1.0D+00, &
2.0D+00, &
4.0D+00, &
-0.2D+00, &
-0.5D+00, &
-1.0D+00, &
-2.0D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
m = 0.0D+00
u = 0.0D+00
dn = 0.0D+00
else
m = m_vec(n_data)
u = u_vec(n_data)
dn = dn_vec(n_data)
end if
return
end
function jacobi_sn ( u, m )
!*****************************************************************************80
!
!! JACOBI_SN evaluates the Jacobi elliptic function SN(U,M).
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 25 June 2018
!
! Author:
!
! Original ALGOL version by Roland Bulirsch.
! FORTRAN90 version by John Burkardt
!
! Reference:
!
! Roland Bulirsch,
! Numerical calculation of elliptic integrals and elliptic functions,
! Numerische Mathematik,
! Volume 7, Number 1, 1965, pages 78-90.
!
! Parameters:
!
! Input, real ( kind = 8 ) U, M, the arguments.
!
! Output, real ( kind = 8 ) JACOBI_SN, the function value.
!
implicit none
real ( kind = 8 ) cn
real ( kind = 8 ) dn
real ( kind = 8 ) jacobi_sn
real ( kind = 8 ) m
real ( kind = 8 ) sn
real ( kind = 8 ) u
call sncndn ( u, m, sn, cn, dn )
jacobi_sn = sn
return
end
subroutine jacobi_sn_values ( n_data, u, m, sn )
!*****************************************************************************80
!
!! JACOBI_SN_VALUES returns some values of the Jacobi elliptic function SN(U,M).
!
! Discussion:
!
! In Mathematica, the function can be evaluated by:
!
! JacobiSN[ u, m ]
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 25 June 2018
!
! Author:
!
! John Burkardt
!
! Reference:
!
! Milton Abramowitz, Irene Stegun,
! Handbook of Mathematical Functions,
! National Bureau of Standards, 1964,
! ISBN: 0-486-61272-4,
! LC: QA47.A34.
!
! Stephen Wolfram,
! The Mathematica Book,
! Fourth Edition,
! Cambridge University Press, 1999,
! ISBN: 0-521-64314-7,
! LC: QA76.95.W65.
!
! Parameters:
!
! Input/output, integer ( kind = 4 ) N_DATA. The user sets N_DATA to 0
! before the first call. On each call, the routine increments N_DATA by 1,
! and returns the corresponding data; when there is no more data, the
! output value of N_DATA will be 0 again.
!
! Output, real ( kind = 8 ) U, the argument of the function.
!
! Output, real ( kind = 8 ) M, the parameter of the function.
!
! Output, real ( kind = 8 ) SN, the value of the function.
!
implicit none
integer ( kind = 4 ), parameter :: n_max = 20
real ( kind = 8 ) m
real ( kind = 8 ), save, dimension ( n_max ) :: m_vec = (/ &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.0D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00, &
0.5D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00, &
1.0D+00 /)
real ( kind = 8 ) sn
real ( kind = 8 ), save, dimension ( n_max ) :: sn_vec = (/ &
0.9983341664682815D-01, &
0.1986693307950612D+00, &
0.4794255386042030D+00, &
0.8414709848078965D+00, &
0.9092974268256817D+00, &
0.9975068547462484D-01, &
0.1980217429819704D+00, &
0.4707504736556573D+00, &
0.8030018248956439D+00, &
0.9946623253580177D+00, &
0.9966799462495582D-01, &
0.1973753202249040D+00, &
0.4621171572600098D+00, &
0.7615941559557649D+00, &
0.9640275800758169D+00, &
0.9993292997390670D+00, &
-0.1973753202249040D+00, &
-0.4621171572600098D+00, &
-0.7615941559557649D+00, &
-0.9640275800758169D+00 /)
integer ( kind = 4 ) n_data
real ( kind = 8 ) u
real ( kind = 8 ), save, dimension ( n_max ) :: u_vec = (/ &
0.1D+00, &
0.2D+00, &
0.5D+00, &
1.0D+00, &
2.0D+00, &
0.1D+00, &
0.2D+00, &
0.5D+00, &
1.0D+00, &
2.0D+00, &
0.1D+00, &
0.2D+00, &
0.5D+00, &
1.0D+00, &
2.0D+00, &
4.0D+00, &
-0.2D+00, &
-0.5D+00, &
-1.0D+00, &
-2.0D+00 /)
if ( n_data < 0 ) then
n_data = 0
end if
n_data = n_data + 1
if ( n_max < n_data ) then
n_data = 0
m = 0.0D+00
u = 0.0D+00
sn = 0.0D+00
else
m = m_vec(n_data)
u = u_vec(n_data)
sn = sn_vec(n_data)
end if
return
end
function rc ( x, y, errtol, ierr )
!*****************************************************************************80
!
!! RC computes the elementary integral RC(X,Y).
!
! Discussion:
!
! This function computes the elementary integral
!
! RC(X,Y) = Integral ( 0 <= T < oo )
!
! -1/2 -1
! (1/2)(T+X) (T+Y) DT,
!
! where X is nonnegative and Y is positive. The duplication
! theorem is iterated until the variables are nearly equal,
! and the function is then expanded in Taylor series to fifth
! order.
!
! Logarithmic, inverse circular, and inverse hyperbolic
! functions can be expressed in terms of RC.
!
! Check by addition theorem:
!
! RC(X,X+Z) + RC(Y,Y+Z) = RC(0,Z),
! where X, Y, and Z are positive and X * Y = Z * Z.
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! Original FORTRAN77 version by Bille Carlson, Elaine Notis.
! This FORTRAN90 version by John Burkardt.
!
! Reference:
!
! Bille Carlson,
! Computing Elliptic Integrals by Duplication,
! Numerische Mathematik,
! Volume 33, 1979, pages 1-16.
!
! Bille Carlson, Elaine Notis,
! Algorithm 577, Algorithms for Incomplete Elliptic Integrals,
! ACM Transactions on Mathematical Software,
! Volume 7, Number 3, pages 398-403, September 1981.
!
! Parameters:
!
! Input, real ( kind = 8 ) X, Y, the arguments in the integral.
!
! Input, real ( kind = 8 ) ERRTOL, the error tolerance.
! Relative error due to truncation is less than
! 16 * ERRTOL ^ 6 / (1 - 2 * ERRTOL).
! Sample choices:
! ERRTOL Relative truncation error less than
! 1.D-3 2.D-17
! 3.D-3 2.D-14
! 1.D-2 2.D-11
! 3.D-2 2.D-8
! 1.D-1 2.D-5
!
! Output, integer ( kind = 4 ) IERR, the error flag.
! 0, no error occurred.
! 1, abnormal termination.
!
implicit none
real ( kind = 8 ) c1
real ( kind = 8 ) c2
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) lamda
real ( kind = 8 ) lolim
real ( kind = 8 ) mu
real ( kind = 8 ) rc
real ( kind = 8 ) s
real ( kind = 8 ) sn
real ( kind = 8 ) uplim
real ( kind = 8 ) x
real ( kind = 8 ) xn
real ( kind = 8 ) y
real ( kind = 8 ) yn
!
! LOLIM AND UPLIM DETERMINE THE RANGE OF VALID ARGUMENTS.
! LOLIM IS NOT LESS THAN THE MACHINE MINIMUM MULTIPLIED BY 5.
! UPLIM IS NOT GREATER THAN THE MACHINE MAXIMUM DIVIDED BY 5.
!
save lolim
save uplim
data lolim /3.D-78/
data uplim /1.D+75/
if ( &
x < 0.0d0 .or. &
y <= 0.0d0 .or. &
( x + y ) < lolim .or. &
uplim < x .or. &
uplim < y ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'RC - Error!'
write ( *, '(a)' ) ' Invalid input arguments.'
write ( *, '(a,d23.16)' ) ' X = ', x
write ( *, '(a,d23.16)' ) ' Y = ', y
write ( *, '(a)' ) ''
ierr = 1
rc = 0.0D+00
return
end if
ierr = 0
xn = x
yn = y
do
mu = ( xn + yn + yn ) / 3.0d0
sn = ( yn + mu ) / mu - 2.0d0
if ( abs ( sn ) < errtol ) then
c1 = 1.0d0 / 7.0d0
c2 = 9.0d0 / 22.0d0
s = sn * sn * ( 0.3d0 &
+ sn * ( c1 + sn * ( 0.375d0 + sn * c2 ) ) )
rc = ( 1.0d0 + s ) / sqrt ( mu )
return
end if
lamda = 2.0d0 * sqrt ( xn ) * sqrt ( yn ) + yn
xn = ( xn + lamda ) * 0.25d0
yn = ( yn + lamda ) * 0.25d0
end do
end
function rd ( x, y, z, errtol, ierr )
!*****************************************************************************80
!
!! RD computes an incomplete elliptic integral of the second kind, RD(X,Y,Z).
!
! Discussion:
!
! This function computes an incomplete elliptic integral of the second kind.
!
! RD(X,Y,Z) = Integral ( 0 <= T < oo )
!
! -1/2 -1/2 -3/2
! (3/2)(T+X) (T+Y) (T+Z) DT,
!
! where X and Y are nonnegative, X + Y is positive, and Z is positive.
!
! If X or Y is zero, the integral is complete.
!
! The duplication theorem is iterated until the variables are
! nearly equal, and the function is then expanded in Taylor
! series to fifth order.
!
! Check:
!
! RD(X,Y,Z) + RD(Y,Z,X) + RD(Z,X,Y) = 3 / sqrt ( X * Y * Z ),
! where X, Y, and Z are positive.
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! Original FORTRAN77 version by Bille Carlson, Elaine Notis.
! This FORTRAN90 version by John Burkardt.
!
! Reference:
!
! Bille Carlson,
! Computing Elliptic Integrals by Duplication,
! Numerische Mathematik,
! Volume 33, 1979, pages 1-16.
!
! Bille Carlson, Elaine Notis,
! Algorithm 577, Algorithms for Incomplete Elliptic Integrals,
! ACM Transactions on Mathematical Software,
! Volume 7, Number 3, pages 398-403, September 1981.
!
! Parameters:
!
! Input, real ( kind = 8 ) X, Y, Z, the arguments in the integral.
!
! Input, real ( kind = 8 ) ERRTOL, the error tolerance.
! The relative error due to truncation is less than
! 3 * ERRTOL ^ 6 / (1-ERRTOL) ^ 3/2.
! Sample choices:
! ERRTOL Relative truncation error less than
! 1.D-3 4.D-18
! 3.D-3 3.D-15
! 1.D-2 4.D-12
! 3.D-2 3.D-9
! 1.D-1 4.D-6
!
! Output, integer ( kind = 4 ) IERR, the error flag.
! 0, no error occurred.
! 1, abnormal termination.
!
implicit none
real ( kind = 8 ) c1
real ( kind = 8 ) c2
real ( kind = 8 ) c3
real ( kind = 8 ) c4
real ( kind = 8 ) ea
real ( kind = 8 ) eb
real ( kind = 8 ) ec
real ( kind = 8 ) ed
real ( kind = 8 ) ef
real ( kind = 8 ) epslon
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) lamda
real ( kind = 8 ) lolim
real ( kind = 8 ) mu
real ( kind = 8 ) power4
real ( kind = 8 ) rd
real ( kind = 8 ) sigma
real ( kind = 8 ) s1
real ( kind = 8 ) s2
real ( kind = 8 ) uplim
real ( kind = 8 ) x
real ( kind = 8 ) xn
real ( kind = 8 ) xndev
real ( kind = 8 ) xnroot
real ( kind = 8 ) y
real ( kind = 8 ) yn
real ( kind = 8 ) yndev
real ( kind = 8 ) ynroot
real ( kind = 8 ) z
real ( kind = 8 ) zn
real ( kind = 8 ) zndev
real ( kind = 8 ) znroot
!
! LOLIM AND UPLIM DETERMINE THE RANGE OF VALID ARGUMENTS.
! LOLIM IS NOT LESS THAN 2 / (MACHINE MAXIMUM) ^ (2/3).
! UPLIM IS NOT GREATER THAN (0.1 * ERRTOL / MACHINE
! MINIMUM) ^ (2/3), WHERE ERRTOL IS DESCRIBED BELOW.
! IN THE FOLLOWING TABLE IT IS ASSUMED THAT ERRTOL WILL
! NEVER BE CHOSEN SMALLER THAN 1.D-5.
!
save lolim
save uplim
data lolim /6.D-51/
data uplim /1.D+48/
if ( &
x < 0.0D+00 .or. &
y < 0.0D+00 .or. &
x + y < lolim .or. &
z < lolim .or. &
uplim < x .or. &
uplim < y .or. &
uplim < z ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'RD - Error!'
write ( *, '(a)' ) ' Invalid input arguments.'
write ( *, '(a,d23.16)' ) ' X = ', x
write ( *, '(a,d23.16)' ) ' Y = ', y
write ( *, '(a,d23.16)' ) ' Z = ', z
write ( *, '(a)' ) ''
ierr = 1
rd = 0.0D+00
return
end if
ierr = 0
xn = x
yn = y
zn = z
sigma = 0.0d0
power4 = 1.0d0
do
mu = ( xn + yn + 3.0d0 * zn ) * 0.2d0
xndev = ( mu - xn ) / mu
yndev = ( mu - yn ) / mu
zndev = ( mu - zn ) / mu
epslon = max ( abs ( xndev ), abs ( yndev ), abs ( zndev ) )
if ( epslon < errtol ) then
c1 = 3.0d0 / 14.0d0
c2 = 1.0d0 / 6.0d0
c3 = 9.0d0 / 22.0d0
c4 = 3.0d0 / 26.0d0
ea = xndev * yndev
eb = zndev * zndev
ec = ea - eb
ed = ea - 6.0d0 * eb
ef = ed + ec + ec
s1 = ed * ( - c1 + 0.25d0 * c3 * ed - 1.5d0 * c4 * zndev * ef )
s2 = zndev * ( c2 * ef + zndev * ( - c3 * ec + zndev * c4 * ea ) )
rd = 3.0d0 * sigma + power4 * ( 1.0d0 + s1 + s2 ) / ( mu * sqrt ( mu ) )
return
end if
xnroot = sqrt ( xn )
ynroot = sqrt ( yn )
znroot = sqrt ( zn )
lamda = xnroot * ( ynroot + znroot ) + ynroot * znroot
sigma = sigma + power4 / ( znroot * ( zn + lamda ) )
power4 = power4 * 0.25d0
xn = ( xn + lamda ) * 0.25d0
yn = ( yn + lamda ) * 0.25d0
zn = ( zn + lamda ) * 0.25d0
end do
end
function rf ( x, y, z, errtol, ierr )
!*****************************************************************************80
!
!! RF computes an incomplete elliptic integral of the first kind, RF(X,Y,Z).
!
! Discussion:
!
! This function computes the incomplete elliptic integral of the first kind.
!
! RF(X,Y,Z) = Integral ( 0 <= T < oo )
!
! -1/2 -1/2 -1/2
! (1/2)(T+X) (T+Y) (T+Z) DT,
!
! where X, Y, and Z are nonnegative and at most one of them is zero.
!
! If X or Y or Z is zero, the integral is complete.
!
! The duplication theorem is iterated until the variables are
! nearly equal, and the function is then expanded in Taylor
! series to fifth order.
!
! Check by addition theorem:
!
! RF(X,X+Z,X+W) + RF(Y,Y+Z,Y+W) = RF(0,Z,W),
! where X, Y, Z, W are positive and X * Y = Z * W.
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! Original FORTRAN77 version by Bille Carlson, Elaine Notis.
! This FORTRAN90 version by John Burkardt.
!
! Reference:
!
! Bille Carlson,
! Computing Elliptic Integrals by Duplication,
! Numerische Mathematik,
! Volume 33, 1979, pages 1-16.
!
! Bille Carlson, Elaine Notis,
! Algorithm 577, Algorithms for Incomplete Elliptic Integrals,
! ACM Transactions on Mathematical Software,
! Volume 7, Number 3, pages 398-403, September 1981.
!
! Parameters:
!
! Input, real ( kind = 8 ) X, Y, Z, the arguments in the integral.
!
! Input, real ( kind = 8 ) ERRTOL, the error tolerance.
! Relative error due to truncation is less than
! ERRTOL ^ 6 / (4 * (1 - ERRTOL)).
! Sample choices:
! ERRTOL Relative truncation error less than
! 1.D-3 3.D-19
! 3.D-3 2.D-16
! 1.D-2 3.D-13
! 3.D-2 2.D-10
! 1.D-1 3.D-7
!
! Output, integer ( kind = 4 ) IERR, the error flag.
! 0, no error occurred.
! 1, abnormal termination.
!
implicit none
real ( kind = 8 ) c1
real ( kind = 8 ) c2
real ( kind = 8 ) c3
real ( kind = 8 ) e2
real ( kind = 8 ) e3
real ( kind = 8 ) epslon
real ( kind = 8 ) errtol
integer ( kind = 4 ) ierr
real ( kind = 8 ) lamda
real ( kind = 8 ) lolim
real ( kind = 8 ) mu
real ( kind = 8 ) rf
real ( kind = 8 ) s
real ( kind = 8 ) uplim
real ( kind = 8 ) x
real ( kind = 8 ) xn
real ( kind = 8 ) xndev
real ( kind = 8 ) xnroot
real ( kind = 8 ) y
real ( kind = 8 ) yn
real ( kind = 8 ) yndev
real ( kind = 8 ) ynroot
real ( kind = 8 ) z
real ( kind = 8 ) zn
real ( kind = 8 ) zndev
real ( kind = 8 ) znroot
!
! LOLIM AND UPLIM DETERMINE THE RANGE OF VALID ARGUMENTS.
! LOLIM IS NOT LESS THAN THE MACHINE MINIMUM MULTIPLIED BY 5.
! UPLIM IS NOT GREATER THAN THE MACHINE MAXIMUM DIVIDED BY 5.
!
save lolim
save uplim
data lolim /3.D-78/
data uplim /1.D+75/
if ( &
x < 0.0D+00 .or. &
y < 0.0D+00 .or. &
z < 0.0D+00 .or. &
x + y < lolim .or. &
x + z < lolim .or. &
y + z < lolim .or. &
uplim <= x .or. &
uplim <= y .or. &
uplim <= z ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'RF - Error!'
write ( *, '(a)' ) ' Invalid input arguments.'
write ( *, '(a,d23.16)' ) ' X = ', x
write ( *, '(a,d23.16)' ) ' Y = ', y
write ( *, '(a,d23.16)' ) ' Z = ', z
write ( *, '(a)' ) ''
ierr = 1
rf = 0.0D+00
return
end if
ierr = 0
xn = x
yn = y
zn = z
do
mu = ( xn + yn + zn ) / 3.0d0
xndev = 2.0d0 - ( mu + xn ) / mu
yndev = 2.0d0 - ( mu + yn ) / mu
zndev = 2.0d0 - ( mu + zn ) / mu
epslon = max ( abs ( xndev ), abs ( yndev ), abs ( zndev ) )
if ( epslon < errtol ) then
c1 = 1.0d0 / 24.0d0
c2 = 3.0d0 / 44.0d0
c3 = 1.0d0 / 14.0d0
e2 = xndev * yndev - zndev * zndev
e3 = xndev * yndev * zndev
s = 1.0d0 + ( c1 * e2 - 0.1d0 - c2 * e3 ) * e2 + c3 * e3
rf = s / sqrt ( mu )
return
end if
xnroot = sqrt ( xn )
ynroot = sqrt ( yn )
znroot = sqrt ( zn )
lamda = xnroot * ( ynroot + znroot ) + ynroot * znroot
xn = ( xn + lamda ) * 0.25d0
yn = ( yn + lamda ) * 0.25d0
zn = ( zn + lamda ) * 0.25d0
end do
end
function rj ( x, y, z, p, errtol, ierr )
!*****************************************************************************80
!
!! RJ computes an incomplete elliptic integral of the third kind, RJ(X,Y,Z,P).
!
! Discussion:
!
! This function computes an incomplete elliptic integral of the third kind.
!
! RJ(X,Y,Z,P) = Integral ( 0 <= T < oo )
!
! -1/2 -1/2 -1/2 -1
! (3/2)(T+X) (T+Y) (T+Z) (T+P) DT,
!
! where X, Y, and Z are nonnegative, at most one of them is
! zero, and P is positive.
!
! If X or Y or Z is zero, then the integral is complete.
!
! The duplication theorem is iterated until the variables are nearly equal,
! and the function is then expanded in Taylor series to fifth order.
!
! Check by addition theorem:
!
! RJ(X,X+Z,X+W,X+P)
! + RJ(Y,Y+Z,Y+W,Y+P) + (A-B) * RJ(A,B,B,A) + 3 / sqrt ( A)
! = RJ(0,Z,W,P), where X,Y,Z,W,P are positive and X * Y
! = Z * W, A = P * P * (X+Y+Z+W), B = P * (P+X) * (P+Y),
! and B - A = P * (P-Z) * (P-W).
!
! The sum of the third and fourth terms on the left side is 3 * RC(A,B).
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 30 May 2018
!
! Author:
!
! Original FORTRAN77 version by Bille Carlson, Elaine Notis.
! This FORTRAN90 version by John Burkardt.
!
! Reference:
!
! Bille Carlson,
! Computing Elliptic Integrals by Duplication,
! Numerische Mathematik,
! Volume 33, 1979, pages 1-16.
!
! Bille Carlson, Elaine Notis,
! Algorithm 577, Algorithms for Incomplete Elliptic Integrals,
! ACM Transactions on Mathematical Software,
! Volume 7, Number 3, pages 398-403, September 1981.
!
! Parameters:
!
! Input, real ( kind = 8 ) X, Y, Z, P, the arguments in the integral.
!
! Input, real ( kind = 8 ) ERRTOL, the error tolerance.
! Relative error due to truncation of the series for rj
! is less than 3 * ERRTOL ^ 6 / (1 - ERRTOL) ^ 3/2.
! An error tolerance (ETOLRC) will be passed to the subroutine
! for RC to make the truncation error for RC less than for RJ.
! Sample choices:
! ERRTOL Relative truncation error less than
! 1.D-3 4.D-18
! 3.D-3 3.D-15
! 1.D-2 4.D-12
! 3.D-2 3.D-9
! 1.D-1 4.D-6
!
! Output, integer ( kind = 4 ) IERR, the error flag.
! 0, no error occurred.
! 1, abnormal termination.
!
implicit none
real ( kind = 8 ) alfa
real ( kind = 8 ) beta
real ( kind = 8 ) c1
real ( kind = 8 ) c2
real ( kind = 8 ) c3
real ( kind = 8 ) c4
real ( kind = 8 ) ea
real ( kind = 8 ) eb
real ( kind = 8 ) ec
real ( kind = 8 ) e2
real ( kind = 8 ) e3
real ( kind = 8 ) epslon
real ( kind = 8 ) errtol
real ( kind = 8 ) etolrc
integer ( kind = 4 ) ierr
real ( kind = 8 ) lamda
real ( kind = 8 ) lolim
real ( kind = 8 ) mu
real ( kind = 8 ) p
real ( kind = 8 ) pn
real ( kind = 8 ) pndev
real ( kind = 8 ) power4
real ( kind = 8 ) rc
real ( kind = 8 ) rj
real ( kind = 8 ) sigma
real ( kind = 8 ) s1
real ( kind = 8 ) s2
real ( kind = 8 ) s3
real ( kind = 8 ) uplim
real ( kind = 8 ) x
real ( kind = 8 ) xn
real ( kind = 8 ) xndev
real ( kind = 8 ) xnroot
real ( kind = 8 ) y
real ( kind = 8 ) yn
real ( kind = 8 ) yndev
real ( kind = 8 ) ynroot
real ( kind = 8 ) z
real ( kind = 8 ) zn
real ( kind = 8 ) zndev
real ( kind = 8 ) znroot
!
! LOLIM AND UPLIM DETERMINE THE RANGE OF VALID ARGUMENTS.
! LOLIM IS NOT LESS THAN THE CUBE ROOT OF THE VALUE
! OF LOLIM USED IN THE SUBROUTINE FOR RC.
! UPLIM IS NOT GREATER THAN 0.3 TIMES THE CUBE ROOT OF
! THE VALUE OF UPLIM USED IN THE SUBROUTINE FOR RC.
!
save lolim
save uplim
data lolim /2.D-26/
data uplim /3.D+24/
if ( &
x < 0.0D+00 .or. &
y < 0.0D+00 .or. &
z < 0.0D+00 .or. &
x + y < lolim .or. &
x + z < lolim .or. &
y + z < lolim .or. &
p < lolim .or. &
uplim < x .or. &
uplim < y .or. &
uplim < z .or. &
uplim < p ) then
write ( *, '(a)' ) ''
write ( *, '(a)' ) 'RJ - Error!'
write ( *, '(a)' ) ' Invalid input arguments.'
write ( *, '(a,d23.16)' ) ' X = ', x
write ( *, '(a,d23.16)' ) ' Y = ', y
write ( *, '(a,d23.16)' ) ' Z = ', z
write ( *, '(a,d23.16)' ) ' P = ', p
write ( *, '(a)' ) ''
ierr = 1
rj = 0.0D+00
return
end if
ierr = 0
xn = x
yn = y
zn = z
pn = p
sigma = 0.0d0
power4 = 1.0d0
etolrc = 0.5d0 * errtol
do
mu = ( xn + yn + zn + pn + pn ) * 0.2d0
xndev = ( mu - xn ) / mu
yndev = ( mu - yn ) / mu
zndev = ( mu - zn ) / mu
pndev = ( mu - pn ) / mu
epslon = max ( abs ( xndev ), abs ( yndev ), abs ( zndev ), abs ( pndev ) )
if ( epslon < errtol ) then
c1 = 3.0d0 / 14.0d0
c2 = 1.0d0 / 3.0d0
c3 = 3.0d0 / 22.0d0
c4 = 3.0d0 / 26.0d0
ea = xndev * ( yndev + zndev ) + yndev * zndev
eb = xndev * yndev * zndev
ec = pndev * pndev
e2 = ea - 3.0d0 * ec
e3 = eb + 2.0d0 * pndev * ( ea - ec )
s1 = 1.0d0 + e2 * ( - c1 + 0.75d0 * c3 * e2 - 1.5d0 * c4 * e3 )
s2 = eb * ( 0.5d0 * c2 + pndev * ( - c3 - c3 + pndev * c4 ) )
s3 = pndev * ea * ( c2 - pndev * c3 ) - c2 * pndev * ec
rj = 3.0d0 * sigma + power4 * ( s1 + s2 + s3 ) / ( mu * sqrt ( mu ) )
return
end if
xnroot = sqrt ( xn )
ynroot = sqrt ( yn )
znroot = sqrt ( zn )
lamda = xnroot * ( ynroot + znroot ) + ynroot * znroot
alfa = pn * ( xnroot + ynroot + znroot ) &
+ xnroot * ynroot * znroot
alfa = alfa * alfa
beta = pn * ( pn + lamda ) * ( pn + lamda )
sigma = sigma + power4 * rc ( alfa, beta, etolrc, ierr )
if ( ierr /= 0 ) then
rj = 0.0D+00
return
end if
power4 = power4 * 0.25d0
xn = ( xn + lamda ) * 0.25d0
yn = ( yn + lamda ) * 0.25d0
zn = ( zn + lamda ) * 0.25d0
pn = ( pn + lamda ) * 0.25d0
end do
end
subroutine sncndn ( u, m, sn, cn, dn )
!*****************************************************************************80
!
!! SNCNDN evaluates Jacobi elliptic functions.
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 24 June 2018
!
! Author:
!
! Original ALGOL version by Roland Bulirsch.
! FORTRAN90 version by John Burkardt
!
! Reference:
!
! Roland Bulirsch,
! Numerical calculation of elliptic integrals and elliptic functions,
! Numerische Mathematik,
! Volume 7, Number 1, 1965, pages 78-90.
!
! Parameters:
!
! Input, real ( kind = 8 ) U, M, the arguments.
!
! Output, real ( kind = 8 ) SN, CN, DN, the value of the Jacobi
! elliptic functions sn(u,m), cn(u,m), and dn(u,m).
!
implicit none
real ( kind = 8 ) a
real ( kind = 8 ) b
real ( kind = 8 ) c
real ( kind = 8 ) ca
real ( kind = 8 ) cn
real ( kind = 8 ) d
real ( kind = 8 ) dn
real ( kind = 8 ) m_array(25)
real ( kind = 8 ) n_array(25)
integer ( kind = 4 ) i
integer ( kind = 4 ) l
real ( kind = 8 ) m
real ( kind = 8 ) m_comp
real ( kind = 8 ) sn
real ( kind = 8 ) u
real ( kind = 8 ) u_copy
m_comp = 1.0D+00 - m
u_copy = u
if ( m_comp == 0.0D+00 ) then
cn = 1.0D+00 / cosh ( u_copy )
dn = cn
sn = tanh ( u_copy )
return
end if
if ( 1.0D+00 < m ) then
d = 1.0D+00 - m_comp
m_comp = - m_comp / d
d = sqrt ( d )
u_copy = d * u_copy
end if
ca = sqrt ( epsilon ( ca ) )
a = 1.0D+00
dn = 1.0D+00
l = 25
do i = 1, 25
m_array(i) = a
m_comp = sqrt ( m_comp )
n_array(i) = m_comp
c = 0.5D+00 * ( a + m_comp )
if ( abs ( a - m_comp ) <= ca * a ) then
l = i
exit
end if
m_comp = a * m_comp
a = c
end do
u_copy = c * u_copy
sn = sin ( u_copy )
cn = cos ( u_copy )
if ( sn /= 0.0D+00 ) then
a = cn / sn
c = a * c
do i = l, 1, -1
b = m_array(i)
a = c * a
c = dn * c
dn = ( n_array(i) + a ) / ( b + a )
a = c / b
end do
a = 1.0D+00 / sqrt ( c * c + 1.0D+00 )
if ( sn < 0.0D+00 ) then
sn = - a
else
sn = a
end if
cn = c * sn
end if
if ( 1.0D+00 < m ) then
a = dn
dn = cn
cn = a
sn = sn / d
end if
return
end
subroutine timestamp ( )
!*****************************************************************************80
!
!! TIMESTAMP prints the current YMDHMS date as a time stamp.
!
! Example:
!
! 31 May 2001 9:45:54.872 AM
!
! Licensing:
!
! This code is distributed under the GNU LGPL license.
!
! Modified:
!
! 18 May 2013
!
! Author:
!
! John Burkardt
!
! Parameters:
!
! None
!
implicit none
character ( len = 8 ) ampm
integer ( kind = 4 ) d
integer ( kind = 4 ) h
integer ( kind = 4 ) m
integer ( kind = 4 ) mm
character ( len = 9 ), parameter, dimension(12) :: month = (/ &
'January ', 'February ', 'March ', 'April ', &
'May ', 'June ', 'July ', 'August ', &
'September', 'October ', 'November ', 'December ' /)
integer ( kind = 4 ) n
integer ( kind = 4 ) s
integer ( kind = 4 ) values(8)
integer ( kind = 4 ) y
call date_and_time ( values = values )
y = values(1)
m = values(2)
d = values(3)
h = values(5)
n = values(6)
s = values(7)
mm = values(8)
if ( h < 12 ) then
ampm = 'AM'
else if ( h == 12 ) then
if ( n == 0 .and. s == 0 ) then
ampm = 'Noon'
else
ampm = 'PM'
end if
else
h = h - 12
if ( h < 12 ) then
ampm = 'PM'
else if ( h == 12 ) then
if ( n == 0 .and. s == 0 ) then
ampm = 'Midnight'
else
ampm = 'AM'
end if
end if
end if
write ( *, '(i2.2,1x,a,1x,i4,2x,i2,a1,i2.2,a1,i2.2,a1,i3.3,1x,a)' ) &
d, trim ( month(m) ), y, h, ':', n, ':', s, '.', mm, trim ( ampm )
return
end
|
{"hexsha": "b028667d4b1406f5438f25072ac07aec5faff95a", "size": 127307, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "utils/libraries/elliptic_integral.f90", "max_stars_repo_name": "Cirdans-Home/psfun", "max_stars_repo_head_hexsha": "1583d2715b0cadf6cd673b3f522b9699746cef3f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-07-02T02:16:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T11:45:58.000Z", "max_issues_repo_path": "utils/libraries/elliptic_integral.f90", "max_issues_repo_name": "Cirdans-Home/psfun", "max_issues_repo_head_hexsha": "1583d2715b0cadf6cd673b3f522b9699746cef3f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/libraries/elliptic_integral.f90", "max_forks_repo_name": "Cirdans-Home/psfun", "max_forks_repo_head_hexsha": "1583d2715b0cadf6cd673b3f522b9699746cef3f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.0337927129, "max_line_length": 80, "alphanum_fraction": 0.5534024052, "num_tokens": 48789}
|
from config.config import models_dir
from core import utils
import re
import numpy as np
from functools import lru_cache
from core.representations import BagOfEntities
class Encoder:
"""Base class for making objects that encode one form of data into
another form, e.g., text to tokens or text to vectors.
"""
def __init__(self, fn=None):
self._encoder_fn = fn
self._input_validation_fn = None
self._name = 'Encoder'
def set_encoding_fn(self, fn):
self._encoder_fn = fn
def set_input_validation_fn(self, fn):
self._input_validation_fn = fn
def encode(self, item):
self._raise_exception_if_incompatible(item)
self._raise_exception_if_no_encoding_fn()
return self._encoder_fn(item)
def encode_many(self, items):
return [self.encode(item) for item in items]
def can_encode(self, data):
is_valid = self._input_validation_fn
return False if callable(is_valid) and not is_valid(data) else True
def _raise_invalid_encoder_fn_exception(self):
msg = f'{self._name} does not have valid encoding function.'
raise Exception(msg)
def _raise_invalid_input_data_exception(self, item):
msg = f'Invalid input data for {self._name}: {type(item)}'
raise Exception(msg)
def _raise_exception_if_no_encoding_fn(self):
if not callable(self._encoder_fn):
self._raise_invalid_encoder_fn_exception()
def _raise_exception_if_incompatible(self, item):
if not self.can_encode(item):
self._raise_invalid_input_data_exception(item)
class BagOfEntitiesEncoder(Encoder):
def __init__(self):
super().__init__()
self._name = 'BagOfEntitiesEncoder'
self.set_encoding_fn(self._get_entities)
self.set_input_validation_fn(lambda x: isinstance(x, str))
self._vocab_file = None
self._vocab = None
self._lookup_table = None
self._no_casing = True
self._maxlen = 3
self._separator = ' '
self._sent_tokenizer = utils.get_sentences
self._non_overlapping = True
def set_maxlen(self, n):
self._maxlen = n
def set_separator(self, sep):
self._separator = sep
def _load_vocab(self):
with open(self._vocab_file) as fp:
self._vocab = fp.read().strip().splitlines()
self._lookup_table = set(self._vocab)
self._load_blacklist()
def _load_vocab_if_unloaded(self):
if not isinstance(self._vocab, list):
self._load_vocab()
@lru_cache(maxsize=50000)
def _get_entities(self, text):
self._load_vocab_if_unloaded()
entities = []
for sent in self._sent_tokenizer(text):
entities += self._get_entities_from_sentence(sent)
entities = set([e for e in entities if not self._in_blacklist(e)])
if self._non_overlapping:
entities = BagOfEntities(entities).non_overlapping()
return entities
def _get_entities_from_sentence(self, sentence):
candidates = self._get_candidate_entities(sentence)
return [c for c in candidates if c in self._lookup_table]
def _get_candidate_entities(self, sent):
candidates = set()
tokens = self._tokenize(sent)
for n in range(1, self._maxlen+1):
for n_gram in self._get_n_grams(n, tokens):
candidates.add(n_gram)
return candidates
def _get_n_grams(self, n, tokens):
if len(tokens) < n:
return []
sep = self._separator
n_grams = [sep.join(tokens[i:i+n]) for i in range(len(tokens))]
return n_grams
def _tokenize(self, text):
text = text.lower() if self._no_casing else text
pattern = r'([\w\-]+|\W+)'
matches = re.findall(pattern, text)
tokens = [m for m in matches if m.strip()]
return tokens
def _in_blacklist(self, entity):
if entity in self._blacklist:
return True
def _load_blacklist(self):
with open(f'{models_dir}/entities_blacklist.txt') as file:
self._blacklist = set(file.read().strip().splitlines())
@classmethod
def from_vocab_file(self, filepath):
encoder = BagOfEntitiesEncoder()
encoder._vocab_file = filepath
return encoder
class EmbeddingMatrix():
"""A wrapper on a collection of items and their embeddings. It
provides easy retrieval of embedding of any vector and retrieval of
items similar to a given item on the basis of the similarity of their
vectors.
It can be used to store such data as word, entity, or document
embeddings.
"""
def __init__(self, items, vectors):
self._items = items
self._vectors = vectors
self._lookup = self._create_lookup()
self._unit_vectors = self._create_unit_vectors()
@property
def dims(self):
vec = self._vectors[0]
return len(vec)
def __getitem__(self, item):
idx = self._lookup[item]
return self._vectors[idx]
def __contains__(self, item):
return item in self._lookup
def similar_to_item(self, item, n=10, dist='cosine'):
idx = self._lookup[item]
vector = self._unit_vectors[idx]
return self.similar_to_vector(vector, n)
def similar_to_vector(self, vector, n=10, dist='cosine'):
if dist == 'cosine':
dists = self._cosine_dists(vector, self._unit_vectors)
elif dist == 'euclidean':
dists = self._euclid_dists(vector, self._vectors)
elif dist == 'dot':
dists = self._dot_prods(vector, self._vectors)
idxs = np.argsort(dists)[:n]
return [self._items[i] for i in idxs]
def _euclid_dists(self, a, b):
d = a - b
return np.sum(d*d, axis=1)
def _cosine_dists(self, a, b):
return 1-np.dot(a, b.T)
def _dot_prods(self, a, b):
return -np.dot(a, b.T)
def _create_lookup(self):
return {w:i for i,w in enumerate(self._items)}
def _create_unit_vectors(self):
return utils.normalize_rows(self._vectors)
@classmethod
def from_txt_npy(self, txt_filepath, npy_filepath):
"""Create an `EmbeddingMatrix` from an items file containing the
a list of item descriptions (one per line) and a numpy file with
the vectors that have one-to-one correspondance with the items.
Args:
txt_filepath (str): Path to items file
npy_filepath (str): Path to numpy (vectors) file
Returns:
EmbeddingMatrix: Resulting embedding matrix object
"""
with open(txt_filepath) as file:
items = [l.strip() for l in file if l.strip()]
vectors = np.load(npy_filepath)
return EmbeddingMatrix(items, vectors)
@classmethod
def from_tsv(self, filepath):
"""Create an `EmbeddingMatrix` from a tsv file where the first
column contains the item descriptions and subsequent columns
contain the vector components. All columns should be separated
by single tabs.
Args:
filepath (str): Path to tsv (tab separated values) file
Returns:
EmbeddingMatrix: Resulting embedding matrix object
"""
pairs = self._parse_tsv_file(filepath)
items = [word for word, _ in pairs]
vectors = np.array([vector for _, vector in pairs])
return EmbeddingMatrix(items, vectors)
@classmethod
def _parse_tsv_file(self, filepath):
with open(filepath) as file:
lines = (l for l in file if l.strip())
pairs = [self._parse_tsv_line(l) for l in lines]
return pairs
@classmethod
def _parse_tsv_line(self, line):
[word, *vector] = line.strip().split('\t')
vector = [float(val) for val in vector]
return word, vector
class BagOfVectorsEncoder(Encoder):
def __init__(self, emb_matrix):
super().__init__()
self._emb_matrix = emb_matrix
self.set_encoding_fn(self._vectorize_items)
def _vectorize_items(self, bag_of_items):
items = [item for item in bag_of_items if item in self._emb_matrix]
vectors = [self._emb_matrix[item] for item in items]
vectors_as_tuples = [tuple(vec) for vec in vectors]
return set(vectors_as_tuples)
@classmethod
def from_txt_npy(self, txtfile, npyfile):
emb_matrix = EmbeddingMatrix.from_txt_npy(txtfile, npyfile)
return BagOfVectorsEncoder(emb_matrix)
class BagOfWordsEncoder(Encoder):
def __init__(self, fn=None):
super().__init__(fn)
self._name = 'BagOfTokensEncoder'
self._input_validation_fn = lambda x: isinstance(x, str)
class VectorSequenceEncoder():
pass
class TokenSequenceEncoder(Encoder):
def __init__(self, fn=None):
super().__init__(fn)
self._name = 'TokenSequenceEncoder'
self._input_validation_fn = lambda x: isinstance(x, str)
txt_file = models_dir + 'entities.txt'
npy_file = models_dir + 'entities.npy'
default_embedding_matrix = EmbeddingMatrix.from_txt_npy(txt_file, npy_file)
default_boe_encoder = BagOfEntitiesEncoder.from_vocab_file(txt_file)
default_bov_encoder = BagOfVectorsEncoder(default_embedding_matrix)
|
{"hexsha": "fdea54bd4d465a9aaf0fe1e3c53395cc8328fd30", "size": 8254, "ext": "py", "lang": "Python", "max_stars_repo_path": "core/encoders.py", "max_stars_repo_name": "vsvarunsharma10/pqai", "max_stars_repo_head_hexsha": "3ef1351fbc39671916517917de9074a62b092eef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2021-06-23T04:17:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T16:03:49.000Z", "max_issues_repo_path": "core/encoders.py", "max_issues_repo_name": "vsvarunsharma10/pqai", "max_issues_repo_head_hexsha": "3ef1351fbc39671916517917de9074a62b092eef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 15, "max_issues_repo_issues_event_min_datetime": "2021-06-22T10:14:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:58:37.000Z", "max_forks_repo_path": "core/encoders.py", "max_forks_repo_name": "vsvarunsharma10/pqai", "max_forks_repo_head_hexsha": "3ef1351fbc39671916517917de9074a62b092eef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-06-27T18:37:53.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-15T04:41:21.000Z", "avg_line_length": 28.2671232877, "max_line_length": 75, "alphanum_fraction": 0.738672159, "include": true, "reason": "import numpy", "num_tokens": 2192}
|
from micropsi_core.nodenet.stepoperators import Propagate, Calculate
import numpy as np
from micropsi_core.nodenet.theano_engine.theano_node import *
from micropsi_core.nodenet.theano_engine.theano_definitions import *
class TheanoPropagate(Propagate):
"""
theano implementation of the Propagate operator.
Propagates activation from a across w back to a (a is the gate vector and becomes the slot vector)
every entry in the target vector is the sum of the products of the corresponding input vector
and the weight values, i.e. the dot product of weight matrix and activation vector
"""
def execute(self, nodenet, nodes, netapi):
# propagate cross-partition to the a_in vectors
for partition in nodenet.partitions.values():
for inlinks in partition.inlinks.values():
inlinks[3]() # call the theano_function at [3]
# then propagate internally in all partitions
for partition in nodenet.partitions.values():
partition.propagate()
class TheanoCalculate(Calculate):
"""
theano implementation of the Calculate operator.
implements node and gate functions as a theano graph.
"""
def __init__(self, nodenet):
self.calculate = None
self.worldadapter = None
self.nodenet = nodenet
def read_sensors_and_actuator_feedback(self):
self.nodenet.set_sensors_and_actuator_feedback_values()
def write_actuators(self):
self.nodenet.set_actuator_values()
def count_success_and_failure(self, nodenet):
nays = 0
yays = 0
for partition in nodenet.partitions.values():
if partition.has_pipes:
nays += len(np.where((partition.n_function_selector.get_value(borrow=True) == NFPG_PIPE_SUR) & (partition.a.get_value(borrow=True) <= -1))[0])
yays += len(np.where((partition.n_function_selector.get_value(borrow=True) == NFPG_PIPE_SUR) & (partition.a.get_value(borrow=True) >= 1))[0])
nodenet.set_modulator('base_number_of_expected_events', yays)
nodenet.set_modulator('base_number_of_unexpected_events', nays)
def execute(self, nodenet, nodes, netapi):
self.worldadapter = nodenet.worldadapter_instance
self.write_actuators()
self.read_sensors_and_actuator_feedback()
for partition in nodenet.partitions.values():
partition.calculate()
if nodenet.use_modulators:
self.count_success_and_failure(nodenet)
|
{"hexsha": "56adbb5e461aa7b69283b8309d7c7e35d48394f2", "size": 2570, "ext": "py", "lang": "Python", "max_stars_repo_path": "micropsi_core/nodenet/theano_engine/theano_stepoperators.py", "max_stars_repo_name": "joschabach/micropsi2", "max_stars_repo_head_hexsha": "74a2642d20da9da1d64acc5e4c11aeabee192a27", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 119, "max_stars_repo_stars_event_min_datetime": "2015-01-23T11:24:58.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-13T08:00:50.000Z", "max_issues_repo_path": "micropsi_core/nodenet/theano_engine/theano_stepoperators.py", "max_issues_repo_name": "Chediak/micropsi2", "max_issues_repo_head_hexsha": "74a2642d20da9da1d64acc5e4c11aeabee192a27", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 9, "max_issues_repo_issues_event_min_datetime": "2015-02-18T20:44:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-17T14:38:05.000Z", "max_forks_repo_path": "micropsi_core/nodenet/theano_engine/theano_stepoperators.py", "max_forks_repo_name": "Chediak/micropsi2", "max_forks_repo_head_hexsha": "74a2642d20da9da1d64acc5e4c11aeabee192a27", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 34, "max_forks_repo_forks_event_min_datetime": "2015-04-01T20:48:49.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-13T08:02:00.000Z", "avg_line_length": 37.7941176471, "max_line_length": 158, "alphanum_fraction": 0.6848249027, "include": true, "reason": "import numpy", "num_tokens": 556}
|
import csv
import logging
import os
import cv2
import numpy as np
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# raw image properties
SUB_IMAGE_SIZE = (512, 512)
BEE_OBJECT_SIZES = {1: (20, 35), # bee class is labeled 1
2: (20, 20)} # butt class is labeled 2
# pre processing params
SCALE_FACTOR = 2 # downscale images, labels by half
def get_object_drawing_functions():
return {1: draw_bee_body,
2: draw_bee_butt}
def draw_bee_butt(out_image, x, y, a, color):
r = 30 // SCALE_FACTOR
cv2.circle(out_image, (int(x), int(y)), r, color, thickness=2)
draw_center(out_image, x, y, color)
def draw_center(out_image, x, y, color):
d = 4 // SCALE_FACTOR
cv2.rectangle(out_image, (int(x) - d, int(y) - d), (int(x) + d, int(y) + d), color, thickness=-1)
def draw_bee_body(out_image, x, y, a, color):
d = 60. / SCALE_FACTOR
dx = np.sin(a) * d
dy = np.cos(a) * d
x1, y1, x2, y2 = int(x - dx), int(y + dy), int(x + dx), int(y - dy)
cv2.line(out_image, (x1, y1), (x2, y2), color, thickness=2)
draw_center(out_image, x, y, color)
def generate_training(frames_root_dir, labels_root_dir, filenames):
for name in filenames:
label_filepath = os.path.join(labels_root_dir, name + '.txt')
image_filepath = os.path.join(frames_root_dir, name + '.png')
if not os.path.exists(label_filepath) or not os.path.exists(image_filepath):
logger.info('Skipping {}.'.format(name))
continue
image = cv2.imread(image_filepath, cv2.IMREAD_GRAYSCALE)
frame_label = read_label_file(label_filepath)
all_unique_offsets = np.unique([[x[0], x[1]] for x in frame_label], axis=0)
sub_label_size = (SUB_IMAGE_SIZE[0] // SCALE_FACTOR,
SUB_IMAGE_SIZE[1] // SCALE_FACTOR)
for offset_x, offset_y in all_unique_offsets:
label_image = np.zeros(sub_label_size, dtype=np.uint8)
sub_labels = [x for x in frame_label if x[0] == offset_x and x[1] == offset_y]
for _, _, x, y, bee_type, angle in sub_labels:
bee_object_size = BEE_OBJECT_SIZES[bee_type]
x = x // SCALE_FACTOR
y = y // SCALE_FACTOR
r1 = bee_object_size[0] // SCALE_FACTOR
r2 = bee_object_size[1] // SCALE_FACTOR
ellipse_around_point(label_image, y, x, angle, r1=r1, r2=r2, value=bee_type)
sub_image = image[offset_y:offset_y + SUB_IMAGE_SIZE[0],
offset_x:offset_x + SUB_IMAGE_SIZE[1]]
fx, fy = (1 / float(SCALE_FACTOR),) * 2
sub_image = cv2.resize(sub_image, None, fx=fx, fy=fy, interpolation=cv2.INTER_LINEAR)
yield sub_image, label_image
def generate_predict(images_root_dir, filenames, regions_of_interest):
for name, roi in zip(filenames, regions_of_interest):
image_filepath = os.path.join(images_root_dir, name + '.png')
image = cv2.imread(image_filepath, cv2.IMREAD_GRAYSCALE)
image = image[roi[2]:roi[3], roi[0]:roi[1]]
fx, fy = (1 / float(SCALE_FACTOR),) * 2
image = cv2.resize(image, None, fx=fx, fy=fy, interpolation=cv2.INTER_LINEAR)
yield image
def ellipse_around_point(image, xc, yc, angle, r1, r2, value):
image_size = image.shape
ind0 = np.arange(-xc, image_size[0] - xc)[:, np.newaxis] * np.ones((1, image_size[1]))
ind1 = np.arange(-yc, image_size[1] - yc)[np.newaxis, :] * np.ones((image_size[0], 1))
ind = np.concatenate([ind0[np.newaxis], ind1[np.newaxis]], axis=0)
sin_a = np.sin(angle)
cos_a = np.cos(angle)
image[((ind[0, :, :] * sin_a + ind[1, :, :] * cos_a) ** 2 / r1 ** 2 + (
ind[1, :, :] * sin_a - ind[0, :, :] * cos_a) ** 2 / r2 ** 2) <= 1] = value
return image
def read_label_file(label_filename):
with open(label_filename, 'r') as csvfile:
csv_reader = csv.reader(csvfile, delimiter='\t')
def parse_row(row):
offset_x, offset_y = int(row[0]), int(row[1])
bee_type = int(row[2])
x, y = int(row[3]), int(row[4])
angle = float(row[5])
return offset_x, offset_y, x, y, bee_type, angle
return list(map(parse_row, csv_reader))
def read_label_file_globalcoords(label_filename):
rows = read_label_file(label_filename)
unique_offsets = np.unique([[x[0], x[1]] for x in rows], axis=0)
roi = [np.min(unique_offsets[:, 0]), np.max(unique_offsets[:, 0]) + SUB_IMAGE_SIZE[0],
np.min(unique_offsets[:, 1]), np.max(unique_offsets[:, 1]) + SUB_IMAGE_SIZE[1]]
labels_global_coordinates = [[offset_x + x - roi[0], offset_y + y - roi[2], bee_type, angle]
for offset_x, offset_y, x, y, bee_type, angle in rows]
labels_global_coordinates = [[x // SCALE_FACTOR, y // SCALE_FACTOR, bee_type, angle]
for x, y, bee_type, angle in labels_global_coordinates]
return labels_global_coordinates, roi
|
{"hexsha": "4f7b5679bebe6ec67085cfe20dd1b197a5979086", "size": 5099, "ext": "py", "lang": "Python", "max_stars_repo_path": "segmentation/bee_dataset.py", "max_stars_repo_name": "mlubega/DenseObjectDetection", "max_stars_repo_head_hexsha": "004ebf3d76bd66fcaa7f13ce3acafbf336927ed5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2019-02-01T08:08:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-26T14:24:51.000Z", "max_issues_repo_path": "segmentation/bee_dataset.py", "max_issues_repo_name": "mlubega/DenseObjectDetection", "max_issues_repo_head_hexsha": "004ebf3d76bd66fcaa7f13ce3acafbf336927ed5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2019-04-10T12:45:00.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-03T14:43:02.000Z", "max_forks_repo_path": "segmentation/bee_dataset.py", "max_forks_repo_name": "mlubega/DenseObjectDetection", "max_forks_repo_head_hexsha": "004ebf3d76bd66fcaa7f13ce3acafbf336927ed5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2019-02-22T12:30:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-08T12:48:10.000Z", "avg_line_length": 33.3267973856, "max_line_length": 101, "alphanum_fraction": 0.6126691508, "include": true, "reason": "import numpy", "num_tokens": 1406}
|
(** Generated by coq-of-ocaml *)
Require Import OCaml.OCaml.
Local Set Primitive Projections.
Local Open Scope string_scope.
Local Open Scope Z_scope.
Local Open Scope type_scope.
Import ListNotations.
Unset Positivity Checking.
Unset Guard Checking.
Inductive nat : Set :=
| O : nat
| S : nat -> nat.
Inductive natural : Set :=
| Succ : natural -> natural
| Zero : natural.
Inductive lst : Set :=
| Cons : natural -> lst -> lst
| Nil : lst.
Fixpoint double (n : natural) {struct n} : natural :=
match n with
| Succ n => Succ (Succ (double n))
| Zero => Zero
end.
Fixpoint append (append_arg0 : lst) (append_arg1 : lst) {struct append_arg0}
: lst :=
match append_arg0 with
| Cons x y => Cons x (append y append_arg1)
| Nil => append_arg1
end.
Fixpoint len (n : lst) {struct n} : natural :=
match n with
| Cons _ y => Succ (len y)
| Nil => Zero
end.
Definition synth (lf2 : lst) : natural := len (Cons (double Zero) lf2).
|
{"author": "yalhessi", "repo": "lemmaranker", "sha": "53bc2ad63ad7faba0d7fc9af4e1e34216173574a", "save_path": "github-repos/coq/yalhessi-lemmaranker", "path": "github-repos/coq/yalhessi-lemmaranker/lemmaranker-53bc2ad63ad7faba0d7fc9af4e1e34216173574a/benchmark/clam/_lfind_clam_lf_goal4_theorem0_39_lem/goal4conj72_coqofml_4UIAt9.v"}
|
\section{Background} \label{sec:background}
In this section, we describe the different systems and former research necessary to
a good understanding of this thesis.
First, we introduce Spark.
Second, we discuss possible \textsc{WCOJ} join algorithms and argue why we
choose to use Leapfrog Triejoin as basis for our thesis.
Third, we describe distributed worst-case optimal join algorithm in a Spark-like
system, called Myria.
There implementation uses a communication optimal, physical partitioning scheme named
Shares.
We also analyse its scaling behaviour for graph-pattern matching.
This analysis is a contribution of this thesis, although it is presented in the background section.
Fourth, we explain the compressed sparse row data structure.
Finally, we present a study of real-world graph sizes used in literature assembled by us to
show that most graphs used today fit into main-memory.
\subsection{Spark}\label{subsec:spark}
Spark probably is the most widely used and industry accepted cluster computing framework.
It improves over former computing frameworks, e.g. MapReduce~\cite{mapreduce}, Hadoop~\cite{hadoop} or Haloop~\cite{haloop},
by allowing to cache results in memory between multiple queries, using so-called resilient
distributed datasets~\cite{rdd}; often abbreviated to RDD.
Furthermore, it offers SQL as an alternative high-level query interface due to its extensible query optimizer framework, Catalyst.
This section introduces Spark and is organized in four subsections.
\Cref{subsubsec:resilient-distributed-datasets} describes the core data structure of Spark: the RDDs.
In~\cref{subsubsec:spark-architecture}, we explain the different components and processes in a Spark cluster.
The query optimizer of Spark, Catalyst, is explained in~\cref{subsubsec:catalyst}.
It is the component we integrate our \textsc{WCOJ} with;
therefore, it is the module of Spark that is most relevant to this thesis.
Finally, in~\cref{subsubsec:broadcast-variables} we highlight important details about \textit{Broadcast variables} which are used
to implement our parallel worst-case optimal join.
\subsubsection{Resilient distributed datasets} \label{subsubsec:resilient-distributed-datasets}
RDDs form the core of Spark.
However, for this thesis, it is not necessary to understand them in great detail.
In the next paragraph, we give a short introduction to the relevant aspects of RDDs.
For the interested reader, a more in-depth description is given in the original paper~\cite{rdd}.
Resilient distributed datasets describe a distributed collection of data items of a single type.
In contrast, to other distributed share memory solutions, RDDs do not use fine-grained
operations to manipulate single data items but coarse-grained operations which are applied
to all data items, e.g. \textit{map} to apply a function to each data item.
These operations are called \textit{transformations}.
An RDD is built starting from a persistent data source and multiple transformations to
apply to this data source.
One can represent the transformations applied to the input data source as a directed acyclic graph, the so-called \textit{lineage graph}.
This graph fully describes the dataset without materializing it because the transformations are deterministic.
Hence, the dataset can be computed and recomputed on demand, e.g. when the user asks for the count
of all items in the set.
Operations which require that the data in the RDD is computed are called \textit{actions}.
RDDs are distributed by organizing their data items into partitions.
The partitioning can be chosen by the user or the Spark query optimizer such that it allows to run transformations on all partitions
in parallel.
For example, one might choose a round-robin partitioning to generate splits of equal size when reading data items from disk or one
groups items by hashing a specific key to support parallelizable aggregation on that key per partition.
The process of repartitioning an RDD is called a \textit{shuffle}.
It is an expensive operation because it typically involves writing and reading the whole RDD to disk.
Describing datasets as RDDs comes with two main benefits.
First, it is resilient because if the dataset or some partitions of it get lost, it is possible to recompute them from persistent storage
using lineage graph information.
Second, it allows Spark to compute RDDs in parallel.
Spark can parallelize the computation of RDD in two ways.
First, by data-parallelism, since different partitions of an RDD can
be computed independently from each other.
Second, by task parallelism, because some parts of the DAG can be computed without dependence of the others.
Indeed, it is possible to compute all parts of an RDD in parallel which are not related in a topological sort of the graph.
\subsubsection{Spark architecture} \label{subsubsec:spark-architecture}
Spark allows the user to run a program on a single machine or hundredsco of machines organized in a cluster.
In this section, we explain the architecture that allows this flexibility.
\Cref{fig:spark-cluster} shows the schematics of a Spark cluster setup.
\begin{figure}
\includegraphics[width=\textwidth]{figures/spark-cluster.png}
\caption{
Schematics of a Spark cluster with two workers, each of them with one executor and two threads per executor.
Source: Apache Spark Documentation, https://spark.apache.org/docs/latest/cluster-overview.html
}
\label{fig:spark-cluster}
\end{figure}
In Spark, each physical machine is called a \textit{worker}.
On each worker, Spark starts one or multiple Spark processes in their own JVM instance; each of them is called \textit{executor}.
Nowadays, many Spark deployments use a single executor per worker\footnote{This is the setup Databricks uses; Databricks is the leading
maintainer of the Spark platform and offers professional deployment to many customers.}.
Each executor runs multiple threads (often one per core on its worker) to execute multiple tasks in parallel.
In total, a Spark cluster can run \textit{\# workers} $\times$ \textit{\# executors per worker} $\times$ \textit{\# threads per executor} tasks
in parallel.
Spark uses two kinds of processes to execute an application: a \textit{driver program} and multiple \textit{executors}.
When started, the driver program acquires resources from the \textit{cluster manager} for its executor processes.
These executors stay alive during the whole Spark application.
Then, the driver program continues executing the Spark application.
When it encounters parallelizable tasks, it schedules them on the available executors.
All tasks scheduled on the same executor share a cache for in-memory data structures like \textit{Broadcast variables} or persisted RDD
partitions.
This is important in the context of this thesis because it means that we cache the input graph once per executor,
which in many Spark deployments is once per worker or physical machine.
This would not be possible if different tasks in the same JVM would not share the same cache.
Spark allows the user to choose a cluster manager to manage resources in the cluster.
It comes with good integration for Hadoop YARN~\cite{yarn}, Apache Mesos~\cite{mesos} and Kubernetes~\cite{kubernetes}, as well as,
a standalone mode where Spark provides its own cluster manager functionality.
Finally, one can run Spark on a single machine in \textit{local mode}.
In local mode, the driver program and a single executor share a single JVM.
The executor uses the cores assigned to Spark to run multiple worker threads.
\subsubsection{Catalyst} \label{subsubsec:catalyst}
Catalyst~\cite{spark-sql} is Spark's query optimizer.
It can process queries given as a SQL string or described using the DataFrame API.
From a given query it constructs an executable \textit{physical plan}.
The query compilation process is organized in multiple stages.
Its inputs and stages are shown in~\cref{fig:catalyst-stages}.
Below we explain these in order.
We use the triangle given by the datalog rule $COUNT(triangle(A, B, C)) \leftarrow R(A, B), S(B, C), T(A, C), A < B < C $ as
a running example.
\begin{figure}
\includegraphics[width=\textwidth]{figures/catalyst-stages.png}
\caption{
Input and stages of the Catalyst optimizer.
Source: Databricks Blog, https://databricks.com/blog/2015/04/13/deep-dive-into-spark-sqls-catalyst-optimizer.html
}
\label{fig:catalyst-stages}
\end{figure}
The input of Catalyst is a query in the form of a DataFrame or SQL string.
From this, the optimizer builds a \textit{unresolved logical plan}.
This plan can include unresolved attributes, e.g. attribute names which are not matched to a specific
data source yet or which have no known type.
To resolve these attributes Catalyst uses a \textit{Catalog} of possible bindings which describe the
available data sources.
This phase is referred to as \textit{Analysis} and results in a \textit{logical plan}.
The logical plan represents \textit{what} should be done for the query but not exactly \textit{how},
e.g. it might contain a Join operator but not a Sort-merge join.
We show the logical plan for the triangle query in~\cref{fig:triangle-logical-plan}.
As we see, the query is represented as a tree where the vertices are operators and the edge indicate dataflow from
one operator to another.
The leaves of the tree are three aliases of the edge relationship.
Two of these source relationships are the input the join between \textit{R} and \textit{S} via \textit{B}.
The result of this join and the leaf relationship \textit{T} are input to the second join.
The tuples produced by this join are filtered to fulfil $A < B < C$.
Finally, at the root of the tree, there is an aggregation to count all results and report the sum.
\begin{figure}
\centering
\subfloat[Logical plan\label{fig:triangle-logical-plan}]{\includesvg[width=0.4\textwidth]{svg/triangle-logical-plan}}
\subfloat[Physical Plan\label{fig:triangle-physical-plan}]{\includesvg[width=0.6\textwidth]{svg/triangle-physical-plan}}
\caption{Logical and physical plan for the triangle count query as generated by Catalyst.}
\end{figure}
The \textit{logical optimization phase} applies batches of rewriting rules until a fixpoint is reached.
A simple example of a logical optimization would be rewriting $2 + 2$ into $4$.
In the running example of the triangle query, this phase pushes the filters into the two joins.
This optimization is called Filter Pushdown.
It is efficient because it applies filters earlier within the pipeline reducing the number tuples to process by later operators.
From the \textit{optimized logical plan} the optimizer generates one or multiple \textit{physical plans} by
applying so called \textit{Strategies}.
They translate a logical operator in one or multiple \textit{physical operators}.
\textit{Strategies} are also allowed to return multiple physical plans for a single \textit{logical plan}.
In this case, the optimizer selects the best one according to a \textit{cost model}.
The physical plan for the triangle query is shown in~\cref{fig:triangle-physical-plan}.
We see multiple examples of translation of a logical operator, which describes what to do, to its physical pendant that also
describes how to do it: the \textit{TableScan} becomes a \textit{CSVRead} and the \textit{Joins} are implemented as
\textit{BroadcastHashJoins}.
Furthermore, we see the introduction of exchanges.
\textit{BroadcastExchanges} precede the \textit{BroadcastHashJoins}.
They build a hashtable from their input operators and make them available as a broadcast variable to all executors of the cluster;
we explain broadcast variables in depth in~\cref{subsubsec:broadcast-variables}.
When an executor is tasked to execute the hash join operator, it acquires the broadcasted hashtable and executes a local hash join
of its assigned partitions.
Another exchange operator is introduced for the aggregation.
It is broken up into a partial aggregation directly after the last join, an exchange reorganizing all partial counts into a single
partition and a second aggregation over that partition to calculate the total count.
The last is a good example of Catalyst introducing a shuffle.
To summarize, the translation to a physical plan translates logical operators into concrete implementations of these and adds exchanges
to organize the data such that it can be processed independently in partitions.
After generating and choosing a physical plan, Catalyst enters the \textit{code generation} phase in which it generates
specific Java for some of the physical operators.
This code is fused together in a single loop, using a technique called data-centric query
compilation~\cite{neumann2011efficiently}.
This code executes often orders of magnitudes faster than interpreted versions of the same operator~\cite{spark-sql} because
it is specialized towards this particular query, e.g. if a join operates only on integers, code
generation can prune all code paths dealing with strings.
Indeed, the code generation phase is part of another Spark project called
\textit{Tungsten}~\cite{tungsten-project,tungsten-code-generation}.
In this thesis, we do not build any code generated physical operators.
Hence, we do not treat this topic in depth.
It is enough to know that all freshly generated Java code is wrapped into a single physical operator.
Therefore, it integrates seamlessly with interpreted operators.
Finally, Catalyst arrives at an optimized physical plan which implements the query.
The execution of this plan is called
\textit{structured query execution}~\cite{spark-internals-structured-query-execution}.
It translates the plan into RDD operations implemented by Spark's core.
Hence, the result of Catalysts query compilation is an RDD representing the query.
One should note that structured query execution does not materialize the query: the result is an RDD which is a non-materialized
representation of the operations necessary to generate the result.
In this thesis, we are not concerned with the internals of RDDs.
We do not need to introduce any new RDD operations or even touch Spark's core functionality.
Thanks to the extensibility of Catalyst, we can integrate worst-case optimal joins by adding one logical operator, multiple
physical operators and a Strategy to translate between them.
\subsubsection{Broadcast variables} \label{subsubsec:broadcast-variables}
\textit{Broadcast variables} are readonly variables which are accessible by all tasks.
They are initialized once by the driver program and should not be changed after initialization.
The process of broadcasting them is handled by Spark.
It is guaranteed that each broadcast variable is sent only once to each executor and allows it to be spilt to disk if it is not
possible to keep the whole value in memory.
Furthermore, `Spark attempts to distribute broadcast variables using efficient broadcast algorithms to reduce communication
costs'~\cite{rdd-programming-guide}; currently Spark uses a BitTorrent-like communication protocol\footnote{See Spark sources:
\texttt{org.apache.spark.broadcast.TorrentBroadcast}}.
Once sent, they are cached once per executor~(see also~\cref{subsubsec:spark-architecture}) and shared by all tasks on this executor.
They are cached in deserialized form in memory but can be spilt to disk if they are too big.
In this thesis, we use broadcast variables to cache the edge relationship of the graph on all workers.
\subsection{Worst-case optimal join algorithm}\label{subsec:worst-case-optimal-join-algorithm}
The development of worst-case optimal joins started in 2008 with the discovery that the output size of a relational query
is bound by the fractional edge number of its underlying hypergraph~\cite{agm}.
In short, this bound proves that traditional, binary join plans perform asymptotically worse than theoretically possible
for the worst-case database instances, e.g. heavily skewed instances.
For example, the worst-case runtime of binary joins on the triangle query is in $\mathcal{O} (N^2)$, while the AGM bound
shows the possibility to solve it in $\mathcal{O} (N^{3/2})$.
The AGM bound has been treated widely in literature~\cite{skew-strikes-back,andreas,agm}.
A particular good explanation is given by Hung Ngo et al in~\cite{skew-strikes-back}.
We refer the reader to these papers for further information.
In the next paragraph, we discuss different algorithms whose worst-case complexity matches the AGM bound which are called worst-case
optimal joins.
In 2012, Ngo, Porat, Re and Rudra published the first join algorithm matching the AGM bound, called \textit{NPRR} join~\cite{nprr}.
In the same year, Veldhuizen proved that the algorithm Leapfrog Triejoin used in LogicBlox,
a database system developed by his company, is also worst-case optimal with regards to the fractional edge number bound.
We often abbreviate Leapfrog Triejoin to \textsc{LFJT}.
Both algorithms have been shown to be instances of a single algorithm, the \textit{Generic Join}, in 2013 by Ngo et al.~\cite{skew-strikes-back}.
Three worst-case optimal join algorithms are known in the literature.
We choose Leapfrog Triejoin as the basis for our work.
The argumentation for this decision is given below.
First, we identify the main criteria for this choice.
Then, we use them to compare the different algorithms.
The most important argument for our decision is the degree to which the algorithm has been shown
to be of practical use.
In particular, the number of systems it is used in and openly available data on its performance.
If an algorithm is used in academia as well as in industry, we deem this as an advantage.
This criterion carries a lot of weight because the first literature on worst-case optimal joins
has been rather theoretical but in our work, we take a more practical and system-oriented perspective.
The practical character of our work also motivates the second dimension which we compare the algorithms in, namely
ease of implementation.
If two of the three algorithms both have well-proven performance, we would like to choose the algorithm
that takes less time to implement and is easier to adapt and experiment with.
That is, to be able to spend more time on evaluation and optimizations for the graph use-case, instead of,
time spent on replicating existing work.
The Leapfrog Triejoin is used in two commercial database solutions:
LogicBlox~\cite{logicBlox} and RelationalAI\footnote{https://www.relational.ai/}.
Its performance has been reported on in two publications~\cite{myria-detailed,olddog}.
In particular, it beats various general and graph specific databases for graph pattern matching, i.e.
PostgresSQL, MonetDB, neo4j, graphLab and Virtuoso~\cite{olddog}.
The broadest study of its performance uses 15 different datasets and 7 queries~\cite{olddog}.
We conclude that the performance of \textsc{LFTJ} is well established by peer-reviewed publications
as well as industrial usage.
The \textit{NPRR} algorithm has been well analyzed from the theoretical point of view.
However, we are not able to find any openly available sources with performance measurements.
This disqualifies \textsc{NPRR} as the basis for our thesis.
The \textit{Generic Join} is used in at least three academic graph processing engines,
namely GraphFlow~\cite{graphflow}, EmptyHeaded~\cite{emptyheaded} and a unnamed implementation in
Timely Dataflow~\cite{ammar2018distributed}.
All three show good performance.
However, we are not aware of any commercial systems using \textsc{GJ}.
The comparision of Leapfrog Triejoin, \textsc{NPRR} and \textit{Generic Join} by proven performance
rules out \textsc{NPRR} and puts \textsc{LFTJ} and \textsc{GJ} on a similar level.
Next, we compare these two algorithm in ease of implementation.
The description of the Leapfrog Triejoin implementation in its original paper~\cite{lftj} is excellent.
Furthermore, multiple open source implementation exists~\cite{leapfrog-triejoin-schroeder,myria-detailed}.
In particular, Christian Schroeder's implementation for a course at Oxford is helpful because it is standalone
and
does not require us to understand a whole system\footnote{https://github.com/schroederdewitt/leapfrog-triejoin}.
\textit{Generic Join} is described as a generalization of \textsc{NPRR} and Leapfrog Triejoin in its original
paper~\cite{skew-strikes-back}.
Although, well written and algorithmically clear, this explanation is much less practical than the one given for \textsc{LFTJ} which
is backed by an executable implementation.
To conclude, we choose Leapfrog Triejoin as the basis for our work based on its openly available records of performance, use in
academia as well as industrial systems and good description for direct implementation.
Furthermore, the Database research group of CWI (where this thesis was performed)
has an ongoing collaboration with the inventors of \textsc{LFTJ} which gives us access to valuable
expertise if necessary.
\subsubsection{Leapfrog Triejoin} \label{subsubsec:leapfrog-triejoin}
In this section, we describe the Leapfrog Triejoin algorithm.
In the next paragraph, we give the high-level idea behind the algorithm and some of its requirements.
Then we discuss the kind of queries that can be answered with it.
The main part of the section discusses the conceptual algorithm itself.
We finish with a short discussion of two implementation problems, namely the data structure to represent the input relationships and
the problem of choosing a good variable ordering.
The Leapfrog Triejoin is a variable-oriented join.
Given an input query, it requires a variable ordering.
For example, in the triangle query $triangles(a, b, c) \leftarrow R(a, b), S(b, c), T(a, c)$,
the variable ordering could be $a, b, c$.
Furthermore, the Leapfrog Triejoin requires its input relationships to be sorted by lexicographic, ascending order over the given
variable ordering, e.g. $R$ needs to be sorted by primarily by $a$ and secondary by $b$ given the variable ordering $a, b, c$.
The algorithm is variable-oriented because it fixes one possible binding for $a$, one for $b$ given $a$ and finally one for $c$ given $a$
and $b$.
This allows it to enumerate the result of the join query without intermediary results.
The process can be thought of as a backtracking, depth-first search for possible bindings.
The algorithms implemented in this thesis can process joins of the full conjunctive fragment of first-order logic or conjunctive
equijoin in relational algebra terms.
Possible extentsions to disjunctions, ranges (non-equi joins), negation, projection, functions and scalar operations on join variables are
explained in the original Leapfrog Triejoin paper~\cite{lftj}.
However, they are not relevant to the core of this work because many interesting graph patterns can be answered using the full conjunctive
fragment, e.g. cliques or cycles.
The Leapfrog Triejoin algorithm uses three components which are composed in a layered fashion.
The concrete composition used for the triangle query is shown in~\cref{fig:lftj-layers}.
In this figure, we see three layers, each of them made of one or more instances of a component.
The components are the \textit{TrieIterator}, \textit{LeapfrogJoin} and \textit{LeapfrogTriejoin}.
In the next paragraphs, we explain each layer in order, starting with the lowest layer.
\begin{figure}
\includesvg[width=\textwidth]{svg/lftj-layers}
\caption{
The three layers of the Leapfrog Triejoin algorithm.
The configuration for a triangle query is shown: three \textit{TrieIterators} one
per input relationship, three \textit{LeapfrogJoins} one per variable and
one \textit{LeapfrogTriejoin} component are neccessary.
The arrows indicate that a component uses another.
The \textit{LeapfrogTriejoin} uses all other components but only the vertical part of the \textit{TrieIterators} (dashed arrows).
The \textit{LeapfrogJoins} uses the linear part of two \textit{TrieIterators} each.
}
\label{fig:lftj-layers}
\end{figure}
The lowest layer is made of one \textit{TrieIterator} per input relationship.
In our example, we have three instances one for R, S and T each.
The \textit{TrieIterator} interface represents the input relationship as a trie with all values for
the first attribute on the first level, the values for the second attribute on the second level and so
on; an example for this is shown in~\cref{fig:trie-example}.
\begin{figure}
\centering
\includesvg[height=5cm]{svg/trie}
\caption{A 3-ary relationship as table (left) and trie (right), to position the iterator at the tuple (1, 1, 5) one
calls \textit{open} twice, \textit{key} returns now 5, after a call to \textit{next}, \textit{key} returns 6 and \textit{up}
would lead to \textit{key} returning 1.}
\label{fig:trie-example}
\end{figure}
The trie contains one level per attribute of the relationship;
in the case of the triangle query, there are two levels: one for $a$ and one for $b$.
Each level is made of all possible values for its attribute.
All tuples of the relationship can be enumerated by a depth-first traversal of the trie.
The \textit{TrieIterator} component offers six methods shown in~\cref{table:lftj-interfaces}.
The \textit{open} and \textit{up} methods control the level the iterator is positioned at;
\textit{open} moves it one level down and \textit{up} moves it one level up.
Additionally, \textit{open} places the iterator at the first value for the next level and the \textit{up}
method returns to the value of the upper level that was current when the deeper level was opened.
We call these two methods the vertical component of the \textit{TrieIterator} interface.
\begin{table}
\centering
\begin{tabular}{@{}ll@{}}
\toprule
Method & required complexity \\
\midrule
\textit{\textbf{TrieIterator}} & \\
int key() & $\mathcal{O}(1)$ \\
bool atEnd() & $\mathcal{O}(1)$ \\
void up() & $\mathcal{O}(\log_N)$ \\
void open() & $\mathcal{O}(\log_N)$ \\
void next() & $\mathcal{O}(\log_N)$ \\
void seek(key) & $\mathcal{O}(\log_N)$ \\\midrule
\textit{\textbf{LeapfrogJoin}} & \\
int key() & $\mathcal{O}(1)$ \\
bool atEnd() & $\mathcal{O}(1)$ \\
void init() & $\mathcal{O}(\log_N)$ \\
void next() & $\mathcal{O}(\log_N)$ \\
void seek(key) & $\mathcal{O}(\log_N)$ \\
\bottomrule
\end{tabular}
\caption{
The interfaces of \textit{TrieIterator} and \textit{LeapfrogJoin} with required complexity.
$N$ is the size of relationship represented by the iterator.
}
\label{table:lftj-interfaces}
\end{table}
The other four methods are called linear component.
All of them operate on the current level of the \textit{TrieIterator}.
The \textit{key} function returns the current key (a single integer).
The \textit{next} method moves the iterator to the next key on the same level.
The \textit{seek(key)} operation finds the least upper bound for its parameter \textit{key}.
Finally, the \textit{atEnd} method returns \textit{true} when the iterator is placed behind the last value of the current level.
The middle layer of the Leapfrog Triejoin is made of one \textit{LeapfrogJoin} per variable in the join.
This join generates possible bindings for its variable by intersecting the possible values for all input relationship containing the
variable.
Therefore, it operates on the linear component of all \textit{TrieIterators} of relationships with this variable.
\Cref{fig:lftj-layers} for the triangle query shows three \textit{LeapfrogJoin} instances (for $a, b$ and $c$);
each of them uses two \textit{TrieIterators}.
The \textit{LeapfrogJoin} interface has five methods shown in~\cref{table:lftj-interfaces}, with their required asymptotical
performance.
In the following paragraphs, we explain each of them.
In short, the join offers an iterator interface over the intersection of its input iterators.
This intersection is found by repeatedly seeking the value of the largest input iterator in the smallest input iterator.
This process resembles a frog taking a leap which gives the join its name.
When all iterators point to the same value leapfrogging stops and the value is emitted as part of the intersection.
The \textit{init} operation sorts the input iterator by their current key and finds the first value of the intersection.
To find the first value it uses the private method \textit{leapfrogSearch} which is the work-horse of the whole join.
The algorithm of this method is shown in~\cref{alg:leapfrogSearch}.
This method loops the process of calling the \textit{seek} method of its smallest input iterator with the key of the largest input
iterator until the smallest and the largest (and therefore all iterators) point to the same value.
\begin{algorithm}
\KwData{\\
iters \textit{sorted array of TrieIterators} \\
p \textit{index of the smallest iterator}
}
\KwResult{\textit{Either} atEnd \textit{is true or} key \textit{is set to next key of intersection}}
maxKey $\gets$ iters[p \% iters.length].key()\;
\While{\upshape iters[p].key() $\neq$ maxKey}{
iters[p].seek(maxKey)\;
\eIf{\upshape iters[p].atEnd()} {
atEnd $\leftarrow$ true\;
\Return\;
} {
maxKey $\leftarrow$ iters[p].key()\;
p $\leftarrow$ (p + 1) \% iters.length\;
}
}
key $\leftarrow$ iters[p].key()
\caption{leapfrogSearch()}
\label{alg:leapfrogSearch}
\end{algorithm}
The \textit{leapfrogNext} method moves the join to its next value.
Internally, it uses the \textit{next} function of its smallest iterator and then \textit{leapfrogSearch}.
The operation \textit{leapFrogSeek(key)} first uses the \textit{seek} method of the smallest input iterator to forward it to \textit{key};
then it uses \textit{leapfrogSearch} to either verify that this key is available in all iterators (hence in the intersection) or
to find the upper bound of this key.
Finally, the functions \textit{key} and \textit{atEnd} return the current key or if the intersection is complete respectively.
The last layer of the whole algorithm is a single \textit{LeapfrogTriejoin} instance.
It interacts with both lower layers to enumerate all possible bindings for the join.
For this, it acquires one binding for the first variable from the corresponding \textit{LeapfrogJoin}.
Then it moves the \textit{TrieIterators} containing this variable to the next level and
finds a binding for the second variable using the next \textit{LeapfrogJoin}.
This process continues until all variables are bound and a tuple representing this binding is emitted
by the join operator.
Then it finds the next possible binding by backtracking.
\Cref{alg:leapfrogTrieJoin-state-machine} shows the backtracking depth-first traversal.
This traversal needs to stop each time when a complete tuple has been found to support the iterator interface of the join.
Therefore, it is implemented as a state-machine which stops each time the deepest level is reached and all variables are bound
(loop condition in line 33). % LINE
The next action of the state machine is determined by the outcome of the current action.
Hence, we can characterize the state machine by describing each possible action and its possible outcomes.
There are three possible actions: \textit{next}, \textit{down} and \textit{up}.
We summarize the possible actions, conditions for the next action and if the main loop of the state machine yields the next tuple
in~\cref{table:lftj-state-machine} and describe each action below.
\begin{algorithm}
\KwData{
depth \textit{the index of the variable to find a binding for, ranges from -1 to \#variables - 1}\\
bindings \textit{array holding the current variable bindings or $-1$ for no binding} \\
MAX\_DEPTH \textit{the number of variables - 1} \\
action \textit{state of the state machine}
}
\Repeat{\upshape $\neg$ (depth = MAX\_DEPTH $\and$ bindings[MAX\_DEPTH] $\neq$ -1) $\lor$ atEnd} {
\Switch{\upshape action} {
\Case{\upshape NEXT} {
leapfrogJoins[depth].leapfrogNext() \; \label{line:lftj-leapfrog-next}
\eIf{\upshape leapfrogJoins[depth].atEnd()} {
action $\leftarrow$ UP \; \label{line:lftj-next-up}
} {
bindings(depth) $\leftarrow$ leapfrogJoins[depth].key() \; \label{line:lftj-next-down-or-next}
\eIf{\upshape depth == MAX\_DEPTH} {
action $\leftarrow$ NEXT
} {
action $\leftarrow$ DOWN
}
}
}
\Case{\upshape DOWN} {
depth $\leftarrow$ depth + 1 \;
trieJoinOpen() \; \label{line:lftj-trieJoinOpen}
\eIf{\upshape leapfrogJoins[depth].atEnd()} {
action $\leftarrow$ UP \; \label{line:lftj-down-up}
} {
bindings(depth) $\leftarrow$ leapfrogJoins[depth].key() \;
\eIf{\upshape depth = MAX\_DEPTH} {
action $\leftarrow$ NEXT \label{line:lftj-down-next}
} {
action $\leftarrow$ DOWN
}
}
}
\Case{\upshape UP} {
\eIf{\upshape depth = 0} { \label{line:lftj-atEnd}
atEnd $\leftarrow$ true \;
} {
depth $\leftarrow$ depth - 1 \;
trieJoinUp() \; \label{line:lftj-trieJoinUp}
\eIf{\upshape leapfrogJoins[depth].atEnd()} { \label{line:lftj-up-up-next}
action $\leftarrow$ UP \;
} {
action $\leftarrow$ NEXT \;
}
}
}
}
}
\caption{LeapfrogTrieJoin state machine.
\textit{trieJoinUp} and \textit{trieJoinOpen} move all \textit{TrieIterators} that involve
the current variable a level up respectively down.
}
\label{alg:leapfrogTrieJoin-state-machine}
\end{algorithm}
The \textit{next} action moves the \textit{LeapfrogJoin} at the current depth to the next possible binding for its variable
(line~\ref{line:lftj-leapfrog-next}).
If the \textit{LeapfrogJoin} reached its end, we continue with the \textit{up} action (line~\ref{line:lftj-next-up}),
otherwise we set the binding and continue by another \textit{next} action, if we are at the deepest level or by moving
to the next deeper level by the \textit{down} action (line~\ref{line:lftj-next-down-or-next}ff).
The \textit{down} action moves to the next variable in the global variable ordering by opening all related \textit{TrieIterators}
and initializing the corresponding \textit{LeapfrogJoin} (line~\ref{line:lftj-trieJoinOpen} call to \textit{trieJoinOpen}).
A \textit{down} can be followed by an \textit{up} if the \textit{LeapfrogJoin} is \textit{atEnd} (line~\ref{line:lftj-down-up}),
by a \textit{next} action if the trie join is at its lowest level (line~\ref{line:lftj-down-next}), or by another \textit{down} action to
reach the deepest level.
The \textit{up} action can signal the completion of the join if all bindings for the first variable in the global ordering have
been explored, or in other words, the first \textit{LeapfrogJoin} is \textit{atEnd} (condition
\textit{depth == 0 $\wedge$ action == UP} line~\ref{line:lftj-atEnd}).
Otherwise, all \textit{TrieIterators} corresponding to the current variable are moved upwards by calling \textit{triejoinUp}
(line~\ref{line:lftj-trieJoinUp}) which also updates \textit{depth} and \textit{bindings}.
Then, this action is followed by another \textit{up} or a \textit{next} depending on \textit{atEnd} of the current \textit{LeapfrogJoin}
(lines~\ref{line:lftj-up-up-next}).
\begin{table}[]
\centering
\begin{tabular}{@{}llll@{}}
\toprule
Action & Condition & Next action & Yields \\ \midrule
\multirow{3}{*}{NEXT} & \textit{lf.atEnd} & UP & no \\
& \textit{$\neg$lf.atEnd} $\wedge$ \textit{reachedMaxDepth} & NEXT & yes \\
& \textit{$\neg$lf.atEnd} $\wedge$ \textit{$\neg$reachedMaxDepth} & DOWN & no \\
& & &\\
\multirow{3}{*}{DOWN} & \textit{lf.atEnd} & UP & no \\
& \textit{$\neg$lf.atEnd} $\wedge$ \textit{reachedMaxDepth} & NEXT & yes \\
& \textit{$\neg$lf.atEnd} $\wedge$ \textit{$\neg$reachedMaxDepth} & DOWN & no \\
& & &\\
\multirow{3}{*}{UP} & \textit{depth = 0}, means highest \textit{lf.atEnd} is true & -- (done) & yes \\
& \textit{lf.atEnd} & UP & no \\
& \textit{$\neg$lf.atEnd} & NEXT & no \\ \bottomrule
\end{tabular}
\caption{Summary of actions, conditions for the following action and if a complete tuple has been found.
\textit{reachedMaxDepth} is true if we currently find bindings for the last variable in the global order.
\textit{lf} abbreviates the \textit{LeapfrogJoin} of the current variable.
The columns \textit{Yields} details if the main loop of the state machine yields before computing the next action,
this is the case, when all variables have been bound.
}
\label{table:lftj-state-machine}
\end{table}
\paragraph{TrieIterator implementation, backing data structure}
While we can implement the \textit{LeapfrogJoin} and \textit{LeapfrogTriejoin} component of the Leapfrog Triejoin from the
the algorithmic description given above, we are missing some details for a concrete implementation of the
\textit{TrieIterator} interface.
Mainly, we need to decide for a data structure to back the \textit{TrieIterator}.
We choose to use sorted arrays as described in~\cite{myria-detailed}.
One array is used per column of the input relationship and binary search on these arrays allows us to implement the \textit{TrieIterator}
interface with the required asymptotic complexities (see~\cref{table:lftj-interfaces}).
%We briefly compare a B-tree v.s. array-based implementation of
%the LFTJ API. The main API function is seek(v), which fetches the next value v’ of the current attribute Ai s.t. v’ > v: in a B-tree this can be computed in amortized time O(1), while our implementation uses a binary search on the remaining part of the array at a cost per operation of O(log n). Thus, TJ is at most a factor log n slower than LFTJ, and, in particular, it is also worst-case optimal (up to log n). In practice, the dominating cost of TJ is given by the sorting phase (which, as explained, is unavoidable), hence our choice to use a sorted array instead of a B-tree, because sorting is cheaper than computing a B-tree.
\paragraph{Variable ordering}
Finding a good variable ordering for the \textsc{LFTJ} is an interesting research problem in itself.
We are aware of two existing approaches.
The first is to create and maintain representative samples for each input relationship and determine the best order based on runs over
these samples.
This has been implemented in LogicBlox, the first system to use Leapfrog Triejoins~\cite{logicBlox}.
To the best of our knowledge, the exact method of creating the representative samples has not been published.
The second approach is described in great detail by Mhedhbi and Salihoglu in~\cite{mhedhbi2019}.
Its has been implemented in their research graph database Graphflow~\cite{graphflow}.
They define a novel cost-metric for \textsc{WCOJ}s which estimates the costs incurred by constructing the intersections of adjacency
lists.
The metric takes three factors into account.
First, the size of the adjacency lists.
Second, the number of intermediate matches.
The concept of intermediate matches is best understood by a simple example;
we see the tailed-triangle query in~\cref{fig:tailed-triangle}.
Two very different vertex ordering categories exist for this query.
The ones that start on $v_4$ and find all 2-paths of the graph;
and vertex orderings that start with $v_1, v_2, v_3$ in any order which closes the triangle first.
Clearly, there are more 2-paths in any graph than triangles.
Hence, the second category produces far less intermediate matches.
\begin{figure}
\centering
\includesvg[width=0.2\textwidth]{svg/tailed-triangle}
\caption{The tailed triangle; an example for the cost of intermediate matches.}
\label{fig:tailed-triangle}
\end{figure}
Finally, they implement an intersection cache in their system which takes advantage of the fact that some queries can reuse already
constructed intersections.
So, the last factor taken into account by their cost metrics is the usage of this intersection cache.
They use the described cost metric, a dynamic programming approach to enumerate possible plans and a catalogue of sampled subgraph
instances containing the sizes of adjacency lists to intersect and produced intermediate results to estimate the costs for all
variable orderings.
Moreover, they implement the ability to change the query ordering adaptively during query execution based on the real adjacency list sizes
and intermediate results.
They show that adaptive planning can improve the performance of many plans.
Furthermore, it makes the query optimizer more robust against choosing bad orderings.
The work of Mhedhbi et al. is the most comprehensive study on query vertex orderings for \textsc{WCOJs} currently available;
they introduce a cost metric, a query optimizer to use this metric and prove that it is possible and beneficial to compute parts of
the results using a different variable order.
In our work, we do not implement an automatic process to choose the best variable order.
The order we choose is based on experiments with different orders and intuition of the author.
Integrating the approach of LogixBlox would be possible but require the implementer to find a good sampling strategy because no details
are openly available.
The approach of Mhedhbi and Salihoglu is much better documented but also more complex.
It consists of four contributions which build upon each other but could be useful on their own.
The cost metric described in their paper applies to our system as well and could be used.
They use this metric for cost estimation in connection with a non-trivial subgraph catalogue.
The main challenge in integrating this way of cost estimation with our system is to elegantly integrate catalogue creation in Spark.
Their solution for adaptive variable orderings is helpful because it proves that this technique is beneficial;
they also publish performance measurements, so the impact can be evaluated.
However, their system employs a \textit{Generic Join} while we use a Leapfrog Triejoin.
The integration of adaptive variable orderings into Leapfrog Triejoin is not trivial and it is likely that their implementation is not
directly applicable.
Finally, they introduce an intersection cache to make use of repeatedly used intersections.
This can be directly applied to our system, e.g. using the decorator pattern around \textit{LeapfrogJoins}.
We note that they only cache the last, full n-way-intersection of multiple adjacency lists.
It would be interesting to research if the system would benefit from caching partial n-way intersections as well because
we noticed that for some queries, e.g. 5-clique, the intersection between the first two lists can be reused more often than the full
intersection.
This opens the interesting question in which order we should intersect the lists.
We conclude that two concepts to choose a good variable ordering exist which are both (partially) applicable to our system.
The LogixBlox approach is simpler and directly integratable but not well documented.
The solution used in GraphFlow is far more complex and developed for another \textsc{WCOJ}.
However, the paper describes it in great detail and parts of it could be integrated directly, while others need some engineering effort or
need to be redesigned completely.
% Additional ideas.
%===================
% Queries: olddog, Semih's paper,
% Mostly for graphs
% background over "typical" graph pattern matching queries needed
% olddog good introduction about binary joins vs wcoj for graph pattern matching
% background about graph pattern matching needed
% WCOJ against graph engines (oldog)
% Comparision with other systems: oldog, graphlab, virtuoso, monetdb, pssql, neo4j
% comparision, intermediate results
% intuitive understanding of why better?
% background on binary join operators?
% Codegeneration studied by RelationalAi
% Compression studied by Richard
% OLD
%====
%We implement the Leapfrog Triejoin~\cite{leapfrog} as our general sequential version of a WCOJ.
%However, instead of using B-Trees as a backing data structure, we use sorted arrays and a binary
%search, which has been described in~\cite{myria-detailed} and is called
%Tributary join in their paper.
%Our Leapfrog Triejoin is implemented in three components which we explain in order below: \textit{LeapfrogJoin}, \textit{ArrayTrieIterable} and
%\textit{LeapfrogTriejoin}.
%
%The Leapfrog join is a variant of the sort-merge join for unary relationships, originally described in~\cite{leapfrog1,leapfrog2}.
%To join $k$ unary relations $A_1(x)$, $A_2(x)$, \dots, $A_k(x)$ it takes one iterator per input relations and offers an iterator
%interface that yields the intersection of all relations.
%It requires that it's input iterators offer a \textit{key} method in $\mathcal{O}(1)$, a \textit{next} method and
%a \textit{leastUpperBound(key: Int)} both in $\mathcal{O} (\log n)$ ($n$ defined as the size of the input relationship).
%\textit{leastUpperBound} moves the iterator to the first position of the sought \textit{key} or the first position of the
%next higher value.
%An idiotmatic implementation of a Leapfrog join is shown in~\cref{lst:leapfrog-join}, for the optimized implementation see
%\texttt{leapfrogTriejoin.LeapfrogJoin} in our repository. % CODEREF
%\begin{listing}[H]
% \inputminted[linenos=true]{scala}{code/LeapfrogJoin.scala}
% \caption{Leapfrog join.}
% \label{lst:leapfrog-join}
%\end{listing}
%To support j-arity relations, $A(a_1, a_2, \dots, a_j)$ we add two methods to the iterator interface that represents the input
%relationships: \textit{up} and \textit{open}; both are required to work in $\mathcal{O} (\log n)$.
%We call this new iterator Trieiterator because it represents the relationship as a trie, see~\cref{fig:trie-example}.
%The implementation of a Trieiterator backed by a columnwise representation of the relation using one array
%per column is straight forward, we outline the basic ideas here and refer the interested reader to
%\texttt{leapfrogTriejoin.ArrayTrieIterable.TrieIteratorImpl} in our repository for further details. % CODEREF
%It helps to think about the Trieiterator as consisting out of a linear component, containing the functions
%\textit{key}, \textit{next} and \textit{leastUpperBound}, and horizontal component, made off the functions \textit{up} and \textit{open},
%to move the linear component from one trie level to another.
%
%First, we explain the horizontal component.
%They keep track of the current \textit{level} of the Trieiterator and the \textit{startPosition} and \textit{endPosition}
%for in the column, e.g. in~\cref{fig:trie-example} when the current \textit{level} is 1 (or x), the key equals 4, the
%\textit{startPosition} is 2 and
%the \textit{endPosition} is 5 because the value 4 occurs 3 times.
%With these bookkeeping variables, updated by \textit{up} and \textit{open}, one can implement the linear part by
%a binary search over the current column (given by \textit{level}) which is limited to \textit{startPosition} and \textit{endPosition}.
%
%\begin{figure}
% \centering
% \includesvg[height=5cm]{trie}
% \caption{A 3-ary relationship as table (left) and trie (right), to position the iterator at the tuple (1, 1, 5) one
% calls \textit{open} twice, \textit{key} returns now 5, after a call to \textit{next}, \textit{key} returns 6 and \textit{up}
% would lead to \textit{key} returning 1.}
% \label{fig:trie-example}
%\end{figure}
%The Leapfrog Triejoin combines TrieIterators and Leapfrog joins to join $k$ relationships of arbitrary arity.
%Its input is one Trieiterator per relationship, with these it builds one Leapfrog join per attribute which
%receives references to all Trieiterator of relationships containing the attribute, e.g. for the triangle query
%\textit{R(a, b), S(b, c), T(a, c)} the Leapfrog Triejoin receives three Trieiterator, for \textit{R}, \textit{S} and
%\textit{T},
%and builds three Leapfrog joins, for $a$, $b$, $c$, which receive references to two Trieiterators each.
%To generate the join result the Leapfrog Triejoin operates the horizontal components of the Trieiterators directly and
%uses the Leapfrog joins to operate the linear component.
%We show an idiomatic implementation of a Leapfrog Triejoin in~\cref{lst:leapfrog-triejoin} and~\ref{lst:leapfrog-triejoin-helpers}, a
%performance oriented
%implementation can be found in our repository in \texttt{leapfrogTriejoin.LeapfrogTriejoin}.
%These listings contain two important functions: the initialization function from line~\ref{line:lftjInitStart} to
%line~\ref{line:lftjInitEnd} % LINE
%and the \textit{moveToNextTuple} function at line~\ref{line:moveToNextTuple}. % LINE
%We go through these functions in order.
%The initializer gets two arguments: a mapping from variables to \textit{TrieIterators} (each \textit{TrieIterator} belongs to
%the list of attributes of its relationship) and the global variable ordering as a sequence of \textit{Strings}.
%First, it creates one \textit{LeapfrogJoin} per variable (line 5) which receives references % LINE
%to each \textit{TrieIterator} operating on a relationship with this attribute.
%Then it builds a mapping from variables to all \textit{TrieIterators} acting on a relationship with an attribute of the same name (line
%8). % LINE
%Finally, it initializes \textit{maxDepth}, \textit{action}, \textit{depth}, \textit{bindings} and \textit{atEnd} (line 10 to line 14). %
%LINES
%\textit{depth} and \textit{bindings} are an internal variable storing the index of the variable to bind currently and the
%current bindings for all variables up to \textit{depth}.
%\textit{atEnd} signals that the join has been completed to the client.
%\begin{listing}[H]
% \inputminted[mathescape, linenos=true]{scala}{code/LeapfrogTriejoinIdiomatic.scala}
% \caption{Shows the main methods of \textit{LeapfrogTriejoin}, the initializer and \textit{moveToNextTuple} functionality
% helper methods are detailed in~\cref{lst:leapfrog-triejoin-helpers}.}
% \label{lst:leapfrog-triejoin}
%\end{listing}
%
%\begin{listing}[H]
% \inputminted[linenos=true]{scala}{code/LeapfrogTriejoinHelpers.scala}
% \caption{\textit{LeapfrogTriejoin} helpers.}
% \label{lst:leapfrog-triejoin-helpers}
%\end{listing}
\subsection{Distributed worst-case optimal join in Myria} \label{subsec:myria}
In 2014, a Leapfrog Triejoin variant, dubbed Tributary Join, was used as a distributed join algorithm on a shared-nothing architecture called Myria~\cite{myria-detailed}.
They use Tributary Join as a local, serial worst-case optimal join algorithm, combined with the Hypercube shuffle algorithm, also called
\textit{Shares}, to partition the data between their machines~\cite{shares}.
The combination of a shuffle algorithm with a \textsc{WCOJ} allows them to distribute an unchanged serial worst-case optimal join
version by running it only on a subset of the data on each worker.
This approach is directly applicable to Spark.
We could implement a hypercube shuffle for Spark and then choose any \textsc{WCOJ} to run on each partition.
However, it is not obvious how well this approach scales because Shares replicates many of its input tuples~\cite{myria-detailed}.
The experiments on Myria indicate that the combination of Hypercube shuffles and Tributary Join does not scale well.
They report a speedup of 8 on 64 workers compared to the time it takes on 2 nodes, which, although unlikely to be optimal, is not
investigated in great detail.
Therefore, we decided to analyse the expected scaling behaviour of Shares for graph pattern matching.
Our main concern is that the number of duplicated tuples in the system increases with the query size (number of vertices) and
with the number of workers added to the system.
We provide a theoretical analysis of the number of duplicated tuples for different query sizes and available workers in a later section
of this thesis (\ref{subsubsec:shares}).
As a result of this investigation, we decided to not physically partition our data but to duplicate it to all workers and seek for
alternative
strategy to parallelize a worst-case optimal join.
To conclude, the implementation in Myria and in particular the Shares algorithm is the starting point of this thesis.
We see it as our baseline for a distributed \textsc{WCOJ} implementation.
In the coming section, we explain Shares in detail.
We note that an implementation of a distributed worst-case optimal join on Timely Dataflow exists.
However, it is not applicable to Spark.
Therefore, we treat it in the related work section (\ref{subsec:wcoj-timely-data-flow}) of this thesis.
\subsubsection{Shares}\label{subsubsec:shares}
Shares partitions the input relationships for a multi-way join over worker nodes, such that, all tuples, which could be joined,
end up on the same worker in a single shuffle round.
Hence, it allows running any multi-way join algorithm locally after one shuffle round.
The output of the join is the union of all local results.
The idea is to organize all workers in a logical hypercube, such that each worker can be addressed by its hypercube coordinate.
Then it is straightforward to find a mapping from the attribute values of a tuple to these coordinates so that joinable tuples
arrive at the same worker after one shuffle.
We first explain how to organize the workers in a hypercube and then how to map tuples to these workers.
Next, we treat the problem of choosing a good hypercube configuration.
Followed, by a summary about the optimality of Shares.
Finally, we provide an analysis of the scaling of Shares for graph pattern matching.
A hypercube is characterized by the number of dimensions and the size of each dimension.
\Cref{fig:hypercube} shows a hypercube with three dimensions labelled $a, b$ and $c$.
They have the size of 3, 2 and 2 for $a, b$ and $c$ respectively.
It is a possible configuration for the triangle query $triangle(a, b, c) \leftarrow R(a, b), S(b, c), T(a, c)$ with
12 workers.
\begin{figure}
\centering
\subfloat{\includegraphics[width=0.4\textwidth,valign=c]{figures/hypercube-example-table.png}}
\hspace{0.04\textwidth}
\subfloat{\includegraphics[width=0.55\textwidth,valign=c]{figures/hypercube-example.png}}
\caption{
Left: Three aliases of an edge relationship with one triangle.
The participating tuples are marked in red, green and blue.
Their hypercube coordinates are shown below.
Right: Example of a Shares hypercube configuration for the triangle query for 12 workers with three attributes/dimensions of the sizes
3, 2, 2.
The tuples marked in red, green and blue end up on the workers with red, green and blue rhombs respectively.
}
\label{fig:hypercube}
\end{figure}
Given an input query, Shares builds a hypercube with one dimension per variable in the input.
It then chooses the size of each dimension, such that the product is smaller than the number of workers.
We call $v$ the numbers of variables in the query and $p_0, \dots, p_v$ the sizes of each dimension.
This allows us to address each worker with a coordinate of the form $(0..p_0, \dots, 0..p_v)$.
If the product of all dimension sizes is smaller than the number of workers, additional workers are not used.
The process of finding the best sizes for the dimensions depends on the input query and the input relationships.
We discuss it in a later paragraph of this section.
With this topology in mind, it is straightforward to find a partitioning for all tuples from all relationships such that tuples that
could join are sent to the same worker.
We choose a hash function for each join variable $a$ which maps its values in the range of $[0..p_a]$.
Then each worker determines where to send the tuples it holds by hashing its values.
This results in a coordinate in the hypercube which is fixed for all join variables, which occur in the tuple, and unbounded for join
variables which do not occur in the tuple.
Then the tuple is sent to all workers with a matching coordinate.
\Cref{fig:hypercube} shows how tuples forming a triangle from three relationships are mapped to the workers.
The blue, green and red tuple in the relationships form a triangle.
The green and the red tuple are sent to 2 workers each and the blue tuple to three workers (marked with small rhombs).
They are sent to all workers along the axis where the coordinate is not determined by a value in the tuple.
We see that they all end up together on the worker with the coordinate (2, 0, 0).
This is where the triangle occurs in the output of the join.
\paragraph{Finding the best hypercube configuration}
The problem of finding the best hypercube configuration is to choose the sizes of its dimensions such that (1) the product of
all sizes is smaller than the number of available workers and (2) such that the number of tuples to a single worker is minimized.
(2) is backed by the assumption that the number of tuples is a good indicator for the amount of work;
this assumption is made in all papers discussing the problem~\cite{myria-detailed,shares-proof,shares}.
Therefore, we want to minimize the number of tuples on each single worker because the slowest worker determines the run-time.
Next, we discuss existing solutions and decide for one of them.
The original Shares paper proposes a non-convex solution which is hard to compute in practice~\cite{shares}.
Later, Beame et al. define a linear optimization problem which is solvable but leads to fractional hypercube
sizes~\cite{shares-proof}.
Hence, it is not possible to use their solution directly.
Rounding down would be an obvious solution but as discussed in~\cite{myria-detailed}, it can lead to highly suboptimal solutions, in
particular with low numbers of workers.
Hence, the paper further considers to use a higher number of \textit{virtual} workers and assign these to \textit{physical} workers
by a one-to-many mapping.
Anyhow, a higher number of workers lead to more replicated tuples.
Therefore, this solution does not scale well.
In the end, the paper that integrates Shares and the Tributary join in Myria suggests a practical solution.
They enumerate all integral hypercube configurations smaller or equal to the number of available workers.
For each configuration they estimate the number of assigned tuples, then they choose the configuration with the lowest estimated workload.
They use the following equation to estimate the workload, where $R$ are all relationships in the query, $var(r)$ gives the variables
occuring in the relationship $r$, $size(v)$ gives the hypercube size of the dimension for variable $v$.
\begin{equation} \label{eqn:estimated-workload}
workload = \sum_{r \in R}{|r| \times \frac{1}{\prod_{v \in var(r)}{size(v)}}}
\end{equation}
The term $\prod_{v \in var(r)}{size(v)}$ gives the numbers of workers that span the hyper-plain over which a relationship $r$ is
partitioned.
For example, in~\cref{fig:hypercube} the relationship $(a, b)$ is partitioned over the plain spanned by the dimensions $a$ and $b$ with
6 workers.
Each tuple in this relationship has a chance of $\frac{1}{6}$ to be assigned to any of these workers.
Hence, the workload caused by $(a, b)$ is $|(a, b)| \times \frac{1}{6}$.
The paper evaluates this strategy to assign hypercube configurations and finds that it is efficient and practical.
We choose to use the same solution for our work.
\paragraph{Shares is worst-case communication-optimal}
Shares, as described above, is shown to be worst-case optimal in its communication costs in MapReduce like systems for
n-ary joins using one shuffle round.
First, Beame et al. prove that the Shares scheme is optimal on databases without skew~\cite{shares-proof}. % skew in parallel
Later the same authors are able to give a proof that Shares is also an optimal algorithm for skewed databases if one knows the
\textit{heavy-hitter} tuples and splits the join into a skew free part and residual joins for the heavy hitters using different
hypercube configurations for each residual join~\cite{shares-skew-proof}. % worst-case
The implication of these proofs is that it is not possible to find a partitioning scheme for one shuffle round that replicates
less data than Shares.
This observation is central to our thesis because it is one argument to replicate the graph on all workers instead of using
a shuffle algorithm to partition it.
In the rest of this thesis, Shares refers to the original algorithm~\cite{shares} % Optimizing joins in MapReduce
and not the skew resilient variant \textit{SharesSkew}~\cite{shares-skew,shares-proof}.
This is mainly because even in the presence of skew the original Shares scheme offers good upper bounds, although it can not always match
the lowest bound possible~\cite{shares-skew}. % shares skew
But also because the skew resilient variant requires to know which tuples are \textit{heavy-hitters} (a definition of skew introducing
tuples).
Finally, while first experiments with SharesSkew exist~\cite{shares-skew}, we are not aware of an extensive study verifying it is possible
to integrate SharesSkew into a complete system.
Hence, we deem it out of scope for this thesis to attempt a full integration.
Some readers might ask if there are better multi-round algorithms which replicate fewer data.
Indeed, the authors of a Shares related paper raise the same question as future work~\cite{shares-skew-proof}.
They are able to answer this question for specific join queries in~\cite{shares-skew-proof,shares-skew}, e.g. chain-joins and cycle-joins.
Later, they present an algorithm which is multi-round optimal for all acyclic queries~\cite{gym} and one for all queries
over binary relationships~\cite{shares-binary}.
The papers about multi-round optimal partitioning schemes are rather theoretical.
To the best of our knowledge, only one paper provides practical experiments~\cite{shares-skew} but has no dedicated implementation section.
Also, they have not been shown optimal for general conjunctive join queries but only for special cases.
Two of the three papers~\cite[shares-skew-proof,gym] cannot handle clique joins which are an important class of joins in our thesis.
Additionally, they add additional complexity to the query optimizer, e.g. they require the input query to be represented as
generalized hypertree decomposition to calculate their intersection width~\cite{gym} or to find many different hypercube
configurations~\cite{shares-binary,shares-skew,shares-skew-proof} which is not trivial in
practice and computation-intensive as discussed in the last paragraph.
We leave it to future research to investigate the practical application of these algorithms to graph pattern matching.
The most interesting paper in this direction is~\cite{shares-binary}.
It develops a multi-round algorithm for n-ary joins on binary relationships like the edge relationship of a graph.
% Skew in parallel query processing -- proofs shares is optimal for databases without skew
% first mentioning of residual joins for skew -- connection with sharesskew given in related work of shares skew
% mentioned in "Algorithmic aspects of parallel" We show how we can extend the Hyper- cube algorithm from skew-free data to arbitrary
% data, and describe a worst-case optimal algorithm called SkewHC [7] "skew in parallel query processing.
% Worst-Case Optimal Algorithms for Parallel Query Processing
% ==========================================================
% Worst-Case Optimal Algorithms for Parallel Query Processing -- proves Shares worst-case optimal for any query by choosing different share
% configurations for skewed values.
% connection with SharesSkew?
% In [5] "Skew in parallel query processing", the authors showed that the HyperCube (HC) algorithm, first presented by Afrati and Ullman
%[2] "Optimizing joins in a map-reduce environment", can optimally compute any conjunctive query for a single round on data without skew.
%The work in [5] also presents one-round algorithms and lower bounds for skewed data but the upper and lower bounds do not necessarily
%coincide.
%Our setting and worst-case analysis can be viewed as the analogous version of the work
%of Ngo et al. [17] on worst-case optimal algorithms for multiway join processing. As we will show later, the worst-case instances for a given query q are different for the two settings in the case of one round, but coincide for all the families of queries we examine when we consider multiple rounds.
%Our
% SharesSkew
% ============
% SharesSkew: An Algorithm to Handle Skew for Joins in MapReduce
% More practical paper considering Shares and Skew, also via residual joins
% SharesSkew: An Algorithm to Handle Skew for Joins in MapReduce
% The only other work that investigates skew when comput-
%ing multiway joins in MapReduce is [5, 7]. In [5] "skew in parallel query processing", lower and
%upper bounds are given on the communication cost for algo-
%rithms that compute multiway joins in one round in share
%nothing architectures (it includes MapReduce but certain re-
%sults therein capture more general models as well). For the
%upper bound, the Shares algorithm is shown to either meet
%the lower bound (when there is no skew) or offer a good
%upper bound in the presence of skew. In both cases, the pa-
%rameters of the map function (i.e., the shares – see Section 3
%for details) are computed by a linear program which gives
%a solution to fractional edge packing of the hypergraph of
%the join. The main similarity of the algorithm we present in
%the present paper and the algorithm presented in [5] to han-
%dle skewed data is that, in both algorithms, the join to be
%computed is decomposed in a number of joins, called resid-
%ual joins. Each residual join is defined by a combination
%of heavy hitters and is applied on a different subset of the
%data. The combination of heavy hitters and the definition
%of a heavy hitter differ in the two papers, however.
% Uninteresting
% Communication steps for parallel query processing.
% n [4] "communication steps for parallel query processing" it is proven that with high
%probability the Shares algorithm 2 distributes tuples evenly
%on uniform databases (these are defined precisely in [4] to
%be databases which resemble the case of random data). This
%class of databases include databases where all relations have
%the same size and there is no skew.
% problem 1: Shares is only proved to be optimal with no skew, for skew it is optimal for some queries but not for others,
% using different HC configurations for different residual joins leads to shares being always optimal.
% but we are only talking about shares...
% problem 2: shares is proven to be one-round optimal. However, multi-round solutions exists.
% Excluded by being super expensive in Spark (disk-write-read)
% Excluded by multiple statements of them being not so great, need to find these again.
% Multiple rounds:
% GYM is an algorithm studying that.
% Shares skew labels it as future work, but shows optimality can only be reached using multiple round for some problems.
%
%(worst-case optimal algorithms for parallel query processing)
% The central remaining open question is to design worst-case optimal algorithms for
%multiple rounds for any conjunctive query.
% Already proves lower bounds for multi-rounds
% All papers either only on special queries or (GYM) acyclic queries
% but for GYM it's only formulated as a trade off and needs a bit of machinery as the intersection width
% "Intersection width is a new notion we introduce for queries and generalized hypertree decompos- itions (GHDs) of queries that
% captures how connected the adjacent components of the GHDs are."
% multi-round algorithms need heavy hitter knowledge
\paragraph{Analysis of Shares scalability}
Next, we analyse the scalability of Shares on growing graph patterns.
That is, self-joins over a single relationship which has two variables.
In this context, relationships of the join can be seen as the edges of the pattern and variables as vertices.
First, we fix the method to determine the best hypercube configuration $(p_1 \dots p_k)$, given a query.
For this, we use the method described above and used in~\cite{myria-detailed}.
Given the hypercube configuration and a query, we can estimate the workload of each worker by the formula~\ref{eqn:estimated-workload}.
Let $R$ be the set of all atoms in the join\footnote{An atom in a datalog join is the reference to a relationship,
e.g. $triangle(a, b, c) \leftarrow R(a, b), S(b, c), T(a, c)$ has three atoms named $R, S$ and $T$.
In this section, we differentiate atoms and relationships because multiple atoms can point the same underlying relationship which
becomes of particular importance.},
$size1(r)$ and $size2(r)$ be the size of the first respectively second hypercube dimension for the two variables in atom $r$.
Then, each worker receives $\sum_{r \in R}{ \frac{|r|}{size1(r) * size2(r)}}$ tuples under the assumption of uniform data distribution
and good hash functions.
Our argument is that the tuples of each atom $r$ are divided onto $size1(r) * size2(r)$ workers;
the workers that form the hypercube plain of its two variables.
In the special case of graph pattern matching where all atoms of the query are pointing to the same relationship,
we can optimize the hypercube shuffle such that a tuple is only sent once to a worker, although it might be assigned to it via
multiple atoms.
If we apply this optimization, we can predict the probability with which each tuple is assigned to a worker using the Poisson binomial
distribution.
The Poisson binomial distribution $\Pr(n, k, u_0, \dots, u_n)$ allows us to calculate the likelihood that $k$ out of $n$ independent,
binary and differently distributed trials succeed, under the condition that the $i$ the trial succeeds with a probability of $u_i$.
We use $n = |R|$, $k = 0$ and $u_i=1/(size1(r_i) * size2(r_i))$ to calculate the probability that a tuple is not assigned to an
arbitrary, fixed worker.
This allows us to predict the number of tuples assigned to each worker by $|E| * (1 - \Pr(|R|, 0, u_0, \dots, u_{|R|})$ with $E$ being
the edge relationship.
\Cref{table:shares-workload-estimate} shows the expected percentage of tuples from the edge relationship assigned to each worker for graph
patterns of different sizes calculated using Poisson binomial distribution and optimal shares assignments according to the method used
in~\cite{myria-detailed}.
As we can see in this table, the number of tuples assigned to each worker grows over linear in the size of the graph pattern.
Furthermore, doubling the number of workers is inefficient to counter this growth.
In particular, already a small clique queries of four vertices replicate over half of the tuples on all 64 workers.
5-clique queries require nearly a full broadcast with each worker holding 82\% or 90\% of all tuples with 128 respectively 64 workers.
The diamond query used in practice by the Twitter recommendation engine has to replicate far more than half of the tuples to all workers.
\begin{table}[t]
\centering
\begin{tabular}{lrr}
\toprule
Pattern & Edges & workload [64]/[128] \\ \midrule
Triangle & 3 & 0.18 / 0.12 \\
4-clique & 6 & 0.59 / 0.44 \\
5-clique & 10 & 0.90 / 0.82 \\
House & 5 & 0.42 / 0.32 \\
Diamond & 8 & 0.76 / 0.67 \\
\bottomrule
\end{tabular}
\caption{Workload on 64 and 128 workers in percentage of tuples of the edge table assigned to each worker estimated by using
Poison binominial distribution to estimate the workload and the method from~\cite{myria-detailed}
to determine the optimal shares configuration.
}
\label{table:shares-workload-estimate}
% See hc-workload-1.csv computed with a28fc458f4f8959a5af81a65f593ea22dcb8dd44
\end{table}
This has two reasons.
First, doubling the number of workers does not allow us to double the dimensions of the hypercube because a hypercube always needs
product of all dimension sizes to be built.
Second, the number of replicated tuples increases with a growing hypercube because each tuple is replicated to more workers;
namely $\prod_{r \in R / r} size1(r) * size2(r)$ workers.
This is because each tuple binds only two out of all variables.
Hence, it is replicated over many dimensions.
In light of the numbers presented in \cref{table:shares-workload-estimate} and in line~\cite{ammar2018distributed},
we conclude that the communication costs for Shares converge towards a full broadcast for bigger graph patterns and
scaling becomes increasingly inefficient.
By this observation and the fact that hypercube shuffling is an optimal scheme (see the last paragraph),
we decide against using any partitioning scheme in our work but replicate the edge relationship on all
workers.
\subsection{Compressed sparse row representation}\label{subsec:csr-background}
Compressed sparse row representation (short CSR) is a well known, compact representation for static graphs~\cite{csr,csr-first}.
To ease its explanation, we assume that the graph's vertices are identified by the numbers from 0 to $|V| - 1$.
However, our implementation allows the use of arbitrary vertice identifiers in $\mathcal{N}$ by storing the translation in an additional
array of size \textit{|V|}.
CSR uses two arrays to represent the edge relationship of the graph: one of size \textit{|E|} which is a projection of the edge relationship
onto the \textit{dst} attribute (called \textit{AdjacencyLists}) and a second of size \texttt{|V + 1|} which stores indices into the first
array (called \textit{Indices}).
To find all destinations directly reachable from a source \textit{src $\in$ V}, one accesses the second array at \textit{src} for the
correct index into the first array for a list of destinations.
\Cref{fig:csr-example} shows an example for a table and its \textsc{CSR}.
First, we note that vertices are not represented by their original ID but as numbers from
0 to |V| which gives the offset into the \textit{Indices} array where to find the offset into
the \textit{AdjacencyLists} array for their neighbours, e.g. vertex $2$ is represented by $1$ which
points to the offset $1$ in the \textit{Indices} array which in turn points to the adjacency
list of $2$ in \textit{AdjacencyLists}.
Additionally, we point out that the 3rd entry in the \textit{Indices} array is the same as the second.
This is because vertex $3$ has no outgoing edges.
Hence, it has no adjacency list to point to.
Therefore, it points to the same offset as the entry for $2$.
\begin{figure}
\centering
\subfloat{
\begin{tabular}{rr}
\toprule
a & b \\\midrule
1 & 2 \\
2 & 3 \\
2 & 4 \\
2 & 5 \\
2 & 6\\
4 & 2 \\
5 & 2 \\
5 & 6 \\
6 & 5 \\\bottomrule
\end{tabular}
}
\hspace{1.5cm}
\subfloat{
\includesvg[width=0.5\textwidth]{svg/csr-example}
}
\caption{Example of a table and its compressed sparse row representation.
The \textit{Indices} array gives offsets into the \textit{AdjacencyLists} array.
The vertices are represented as the indexes into the indices array, e.g. vertex id
$2$ is represented as $1$.
}
\label{fig:csr-example}
\end{figure}
The CSR format has two beneficial properties in the context of this thesis.
First, it allows locating all destinations for a source vertice by one array lookup;
hence, in constant time.
Second, the representation is only, roughly, half as big than a simple columnar representation.
A uncompressed columnar representation needs $2 \times |E|$ while CSR uses only $|V| + 1 + |E|$, note that for most real-world graph |V|
<< |E| holds (see~\cref{subsec:graph-analysis}).
\subsection{Sizes of public real-world graph datasets} \label{subsec:graph-analysis}
In this section, we present a short analysis of the sizes of real-world graph datasets.
For this, we collect data about all graphs from the SNAP and Laboratory of Web Algorithms dataset collection~\cite{snapnets,
law}.
The graphs in the Snap dataset are a bit older;
they have been collected between 2000 and 2010.
All Laboratory of Web Algorithms graphs have been collected between 2007 and 2018.
Both dataset collections are heavily used and cited in academia~\cite{ammar2018distributed,olddog,myria-detailed,fractal,longbin}.
Two of these papers are from 2019.
For our size calculation we assume that the graph is stored in compressed sparse row representation (see \cref{subsec:csr-background}) using
integers for the vertice ids.
Then, we determine the storage size in bits by the formulae $32 \times |V| + 32 \times |E|$ with 32 the size of an integer in bits, V the
set of all vertices in the graph and E the set of all edges in the graph.
\Cref{fig:graph-sizes} shows a histogram of sizes for all 157 graphs from the two datasets.
104 of these graphs are smaller than 1 GB and only 8 graphs are bigger than 100 GB.
The biggest graph is the friendship graph of Facebook from the year 2017 with 552.2 GB.
\begin{figure}
\centering
\includesvg[width=0.7\textwidth]{svg/graph-sizes}
\caption{
Sizes of all graphs from the SNAP and Laboratory of Web Algorithms dataset collection in gigabytes.
The histogram shows graphs up to 100 GB in buckets of 5 GB and in buckets of 50 GB after.
In total, we see data collected about 157 graphs.
}
\label{fig:graph-sizes}
\end{figure}
We conclude that even the biggest graph can be fitted in the main memory of many cluster machines today.
The vast majority could be fitted in the main memory of a simple desktop machine or laptop.
This supports our argument to replicate graph data over all machines.
% Parallelism in Spark
% ====================
%We give examples for both kind of parallelism in the triangle count query
%(\cref{fig:lineage-triangle}).
%Lets assume that the CSV file is partitioned in 10 equal parts and each part is read
%by one out of 10 workers.
%Then the resulting RDD has 10 partitions.
%The following filter can be applied to all 10 partitions in parallel.
%This computation is also task parallel because all three filters can be applied to the
%input set directly after reading it from disk.
%If we go one step further into the example of the triangle query and look at the first
%join, we see limitations to Spark's parallelism.
%Let's assume that we want to use a Hashjoin implementation.
%In this case, we have to build a hash table of either side of the join.
%Hence, the computation of the join needs to wait until this hash table has been build.
%This is clearly not task parallel and it's also not data parallel on the build site
%because we need the data from all partitions to construct a full hash table.
%The result is that we see an exchange operator in the DAG of \cref{fig:triangle-lineage}.
%This operator allows to reorganize the partitions of a RDD.
%In the case of a hash join, it would reorganize items from all partitions into a hash
%table and make copies of this hash table available to the tasks that compute the
%partitions of the join.
%In the last paragraphs, we covered that Spark uses data parallelism arising from the partitioning of the RDD's
%and task parallelism arising from the lineage-graph representation of the RDD's.
%Synchronization happens via exchange operators which allow to reorganize the paritioning of the RDD's.
%In the following, we explain how Spark exploits parallelism in its execution model.
%Spark uses a scheduler to assign \textit{tasks} to \textit{slots}.
%\textit{Tasks} are the smallest unit of work in Spark.
%They are created by dividing the RDD lineage graph into pipelinable \textit{stages}.
%Normally, a stage consists out of all transformations between two exchange operators.
%Each stage consists out of as many tasks as it has partitions.
%The stages of the triangle query are shown in \cref{fig:triangle-lineage}.
%We have four stages.
%Two to build the hash table for our hash join which start with reading the CSV from disk and end with the exchange operator before
%the join.
%The longest stage also reads the CSV from disk, includes the two streaming sites of the hash joins and finally aggregates all
%results per partition for the count.
%It ends with an exchange to aggregate the counts of all partitions; this aggregation is the last out of for \textit{stages}.
%
%These four stages lead to 31 tasks if we assume that each stage starts with reading the CSV into 10 partitions.
%This is because the first 3 stages have 10 tasks each and the last stage accumulating all counts after the last task is only as single
%task of summing up all partitions of its parent.
|
{"hexsha": "86b7dc4a48bb4ed0441d98463eeb16edd15e27ef", "size": 81455, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "background.tex", "max_stars_repo_name": "PerFuchs/master-thesis", "max_stars_repo_head_hexsha": "85386c266fecf72348114bcbafeeb896a9e74601", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-02T20:23:03.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-02T20:23:03.000Z", "max_issues_repo_path": "background.tex", "max_issues_repo_name": "PerFuchs/master-thesis", "max_issues_repo_head_hexsha": "85386c266fecf72348114bcbafeeb896a9e74601", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "background.tex", "max_forks_repo_name": "PerFuchs/master-thesis", "max_forks_repo_head_hexsha": "85386c266fecf72348114bcbafeeb896a9e74601", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 63.9364207221, "max_line_length": 634, "alphanum_fraction": 0.7701061936, "num_tokens": 19491}
|
import logging
import cv2
import numpy as np
COLOR_RED = (0, 0, 255)
COLOR_GREEN = (0, 255, 0)
COLOR_BLUE = (255, 0, 0)
COLOR_BLACK = (0, 0, 0)
COLOR_DARK_GREEN = (34, 139, 34)
COLOR_YELLOW = (0, 255, 255)
def draw(image, pred_boxes_scores, gt_boxes, pred_landmarks, gt_landmarks):
pred_boxes = pred_boxes_scores[:, :4]
scores = pred_boxes_scores[:, 4]
# logger.debug("score:%r", scores)
scores = ['{:.3f}'.format(s) for s in scores]
draw_boxes(image, pred_boxes, COLOR_RED, scores)
draw_boxes(image, gt_boxes, COLOR_GREEN)
for landmarks in pred_landmarks: # 一张图里可能有多个人脸
draw_points(image, landmarks, COLOR_RED)
for landmarks in gt_landmarks:
draw_points(image, landmarks, COLOR_GREEN)
return image
logger = logging.getLogger(__name__)
def draw_boxes(image, boxes, color, texts=None):
if texts:
for box, text in zip(boxes, texts):
draw_box(image, box, color, text)
else:
for box in boxes:
draw_box(image, box, color)
def draw_box(image, box, color, text=None):
box = box.astype(np.int32)
# logger.debug("画框box: %r", box)
cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), color=color, thickness=1)
if text: cv2.putText(image, text, (box[0], box[1]), color=COLOR_RED, fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1)
def draw_points(image, points, color=COLOR_RED):
for p in points:
draw_point(image, p, color)
def draw_point(image, point, color):
if type(point)==np.ndarray:
# logger.debug("画点point: %r", point)
point = tuple(np.array(point,np.int).tolist())
cv2.circle(image, point, 1, color, 4)
|
{"hexsha": "9dadd592f674d9b58fcbb9d5fb225127291e5a04", "size": 1668, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils/image_utils.py", "max_stars_repo_name": "piginzoo/Pytorch_Retinaface", "max_stars_repo_head_hexsha": "3bb028e078a36f5cf90f67dc1de313d2472ee464", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-16T12:35:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-16T12:35:10.000Z", "max_issues_repo_path": "utils/image_utils.py", "max_issues_repo_name": "piginzoo/Pytorch_Retinaface", "max_issues_repo_head_hexsha": "3bb028e078a36f5cf90f67dc1de313d2472ee464", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils/image_utils.py", "max_forks_repo_name": "piginzoo/Pytorch_Retinaface", "max_forks_repo_head_hexsha": "3bb028e078a36f5cf90f67dc1de313d2472ee464", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2631578947, "max_line_length": 120, "alphanum_fraction": 0.6594724221, "include": true, "reason": "import numpy", "num_tokens": 489}
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Author: Dirk Eilander (contact: dirk.eilancer@vu.nl)
# Created: Nov-2017
from .dd_ops import LDD, NextXY
import rasterio
import numpy as np
def read_dd_rasterio(fn, ddtype='ldd', **ddkwargs):
with rasterio.open(fn, 'r') as src:
if ddtype == 'ldd':
dd_r = LDD(src.read(1), src.transform, nodata=src.nodata, **ddkwargs)
elif ddtype == 'nextxy':
dd_r = NextXY(src.read(), src.transform, nodata=src.nodata, **ddkwargs)
return dd_r
def read_dd_pcraster(fn, transform, nodata=255):
import pcraster as pcr
lddmap = pcr.readmap(str(fn))
ldd_data = pcr.pcr2numpy(lddmap, nodata)
ldd = LDD(ldd_data, transform, nodata=nodata)
return ldd
def read_dd_cmfbin(fn, transform, height, width, nodata=-9999, **ddkwargs):
a = np.fromfile(file=str(fn), dtype='int32').reshape((2, int(height), int(width)))
nextxy = NextXY(a, transform, nodata=nodata, **ddkwargs)
return nextxy
def read_raster(fn):
return None
def write_raster_like(fn_out, fn_like, raster, **kwargs):
# copy input gtiff profile
with rasterio.open(fn_like, 'r') as src:
assert src.shape == raster.shape[-2:], "number of rows and cols should match"
profile = src.profile.copy()
count = raster.shape[0] if raster.ndim == 3 else 1
profile.update(count=count, dtype=str(raster.dtype))
profile.update(**kwargs)
# write output
with rasterio.open(fn_out, 'w', **profile) as dst:
dst.write(raster, 1)
|
{"hexsha": "749340b496e9a723712d91fd08c885cd7b19a6ec", "size": 1578, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/1-prepare/cmftools/nb/nb_io.py", "max_stars_repo_name": "DirkEilander/compound_hotspots", "max_stars_repo_head_hexsha": "f9d7960633be80e8e24d2f2563df367cc3f060c6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-17T07:02:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T07:02:13.000Z", "max_issues_repo_path": "src/1-prepare/cmftools/nb/nb_io.py", "max_issues_repo_name": "DirkEilander/compound_hotspots", "max_issues_repo_head_hexsha": "f9d7960633be80e8e24d2f2563df367cc3f060c6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/1-prepare/cmftools/nb/nb_io.py", "max_forks_repo_name": "DirkEilander/compound_hotspots", "max_forks_repo_head_hexsha": "f9d7960633be80e8e24d2f2563df367cc3f060c6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-01-17T02:48:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-17T02:48:28.000Z", "avg_line_length": 35.0666666667, "max_line_length": 87, "alphanum_fraction": 0.63878327, "include": true, "reason": "import numpy", "num_tokens": 454}
|
import sys
import os
import cv2
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])
import numpy as np
import mxnet as mx
input_path = sys.argv[1].rstrip(os.sep)
mod = mx.mod.Module.load('mnist_lenet', 35, context=mx.gpu(2))
mod.bind(
data_shapes=[('data', (1, 1, 28, 28))],
for_training=False)
filenames = os.listdir(input_path)
for filename in filenames:
filepath = os.sep.join([input_path, filename])
img = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
img = (img.astype(np.float)-128) * 0.00390625
img = img.reshape((1, 1)+img.shape)
mod.forward(Batch([mx.nd.array(img)]))
prob = mod.get_outputs()[0].asnumpy()
prob = np.squeeze(prob)
pred_label = np.argmax(prob)
print('Predicted digit for {} is {}'.format(filepath, pred_label))
|
{"hexsha": "767a98e17e6a6605329357f883f3e594e25ed98b", "size": 801, "ext": "py", "lang": "Python", "max_stars_repo_path": "chap8/mxnet/recognize_digit.py", "max_stars_repo_name": "wang420349864/dlcv_for_beginners", "max_stars_repo_head_hexsha": "080c7d3bbb4a68e4fb79e33231ccc666ada16dcc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1424, "max_stars_repo_stars_event_min_datetime": "2017-01-04T12:08:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T02:57:24.000Z", "max_issues_repo_path": "chap8/mxnet/recognize_digit.py", "max_issues_repo_name": "wang420349864/dlcv_for_beginners", "max_issues_repo_head_hexsha": "080c7d3bbb4a68e4fb79e33231ccc666ada16dcc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 39, "max_issues_repo_issues_event_min_datetime": "2017-03-16T08:48:28.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-03T11:30:23.000Z", "max_forks_repo_path": "chap8/mxnet/recognize_digit.py", "max_forks_repo_name": "wang420349864/dlcv_for_beginners", "max_forks_repo_head_hexsha": "080c7d3bbb4a68e4fb79e33231ccc666ada16dcc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 703, "max_forks_repo_forks_event_min_datetime": "2017-02-22T19:35:45.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-21T01:31:27.000Z", "avg_line_length": 29.6666666667, "max_line_length": 70, "alphanum_fraction": 0.6828963795, "include": true, "reason": "import numpy", "num_tokens": 226}
|
#
# Date created: 2022-03-14
# Author: aradclif
#
#
############################################################################################
function sizeblock(N::Int)
block = Expr(:block)
for d = 1:N
ex = Expr(:(=), Symbol(:D_, d), Expr(:call, :size, :A, d))
push!(block.args, ex)
end
block
end
function sizeproductsblock(N::Int)
block = Expr(:block)
for k = 3:N
ex = Expr(:(=), Symbol(:D_, ntuple(identity, k - 1)...),
Expr(:call, :*, ntuple(d -> Symbol(:D_, d), k - 1)...))
push!(block.args, ex)
end
block
end
function sumprodliteral(N::Int)
e = Expr(:call, :+, Symbol(:i_, 1))
for k = 2:N
if k == 2
ex = Expr(:call, :-, Expr(:call, :*, Symbol(:D_, 1), Symbol(:i_, 2)), Symbol(:D_, 1))
push!(e.args, ex)
else
ex = Expr(:call, :-,
Expr(:call, :*, ntuple(d -> Symbol(:D_, d), k - 1)..., Symbol(:i_, k)),
Expr(:call, :*, ntuple(d -> Symbol(:D_, d), k - 1)...))
push!(e.args, ex)
end
end
e
end
function sumprodprecomputed(N::Int)
e = Expr(:call, :+, Symbol(:i_, 1))
for k = 2:N
if k == 2
ex = Expr(:call, :-, Expr(:call, :*, Symbol(:D_, 1), Symbol(:i_, 2)), Symbol(:D_, 1))
push!(e.args, ex)
else
ex = Expr(:call, :-,
Expr(:call, :*, Symbol(:D_, ntuple(identity, k - 1)...), Symbol(:i_, k)),
Symbol(:D_, ntuple(identity, k - 1)...))
push!(e.args, ex)
end
end
e
end
function sumprodprecomputed2(N::Int)
e = Expr(:call, :+, Symbol(:i_, 1))
for k = 2:N
if k == 2
ex = Expr(:call, :*, Symbol(:D_, 1), Symbol(:i_, 2))
push!(e.args, ex)
else
ex = Expr(:call, :*, Symbol(:D_, ntuple(identity, k - 1)...), Symbol(:i_, k))
push!(e.args, ex)
end
end
e
end
function sumprodconstant(N::Int)
Expr(:(=), :D_sp,
Expr(:call, :+, ntuple(d -> Expr(:call, :*, ntuple(i -> Symbol(:D_, i), d)...), N - 1)...))
# # # Equivalent, but perhaps more clear
# e = Expr(:(=), :D_sp)
# r = Expr(:call, :+)
# for k = 1:(N - 1)
# ex = Expr(:call, :*, ntuple(d -> Symbol(:D_, d), k)...)
# push!(r.args, ex)
# end
# push!(e.args, r)
# e
end
function sumprodconstant2(N::Int)
Expr(:(=), :D_sp, Expr(:call, :+, ntuple(d -> Symbol(:D_, ntuple(identity, d)...), N - 1)...))
end
prog = "a == b ? 5 : 10"
ex = Meta.parse(prog)
Meta.show_sexpr(ex)
# Expr(:?, Expr(:call, :(==), :a, :b), 5, 10)
prog2 = "if a == b
5
else
10
end"
ex2 = Meta.parse(prog2)
Meta.show_sexpr(ex2)
prog3 = "ifelse(a == b, 5, 10)"
ex3 = Meta.parse(prog3)
Meta.show_sexpr(ex3)
prog4 = "a = -a"
ex4 = Meta.parse(prog4)
Meta.show_sexpr(ex4)
function compareblock(N::Int, D)
block = Expr(:block)
params = D.parameters
a = Expr(:ref, :A, ntuple(d -> Symbol(:i_, d), N)...)
b = Expr(:ref, :B, ntuple(d -> params[d] == Static.One ? 1 : Symbol(:i_, d), N)...)
c = Expr(:ref, :C, ntuple(d -> params[d] == Static.One ? 1 : Symbol(:i_, d), N)...)
# y = Expr(:(=), :yes, Expr(:call, :(==), a, b))
# e = Expr(:(=), c, Expr(:if, y, sumprodliteral(N), c))
# push!(block.args, y)
e = Expr(:(=), c, Expr(:if, Expr(:call, :(==), a, b), sumprodliteral(N), c))
push!(block.args, e)
block
end
function compareblock2(N::Int, D)
block = Expr(:block)
params = D.parameters
a = Expr(:ref, :A, ntuple(d -> Symbol(:i_, d), N)...)
b = Expr(:ref, :B, ntuple(d -> params[d] == Static.One ? 1 : Symbol(:i_, d), N)...)
c = Expr(:ref, :C, ntuple(d -> params[d] == Static.One ? 1 : Symbol(:i_, d), N)...)
d = sumprodprecomputed2(N)
push!(d.args, :D_sp)
e = Expr(:(=), c, Expr(:if, Expr(:call, :(==), a, b), d, c))
push!(block.args, e)
block
end
# function compareblock3(N::Int, D)
# block = Expr(:block)
# params = D.parameters
# a = Expr(:ref, :A, ntuple(d -> Symbol(:i_, d), N)...)
# b = Expr(:ref, :B, ntuple(d -> params[d] == Static.One ? 1 : Symbol(:i_, d), N)...)
# c = Expr(:ref, :C, ntuple(d -> params[d] == Static.One ? 1 : Symbol(:i_, d), N)...)
# d = sumprodprecomputed2(N)
# push!(d.args, :D_sp)
# # e = Expr(:if, Expr(:call, :(==), a, b), Expr(:(=), c, d))
# j = Expr(:(=), :j, c)
# e = Expr(:(=), c, Expr(:if, Expr(:call, :(==), a, b), d, :j))
# push!(block.args, j)
# push!(block.args, e)
# block
# end
function compareblock4(N::Int, D)
block = Expr(:block)
params = D.parameters
a = Expr(:ref, :A, ntuple(d -> Symbol(:i_, d), N)...)
b = Expr(:ref, :B, ntuple(d -> params[d] == Static.One ? 1 : Symbol(:i_, d), N)...)
c = Expr(:ref, :C, ntuple(d -> Symbol(:i_, d), N)...)
e = Expr(:(=), c, Expr(:call, :(==), a, b))
push!(block.args, e)
block
end
function compareblock6(N::Int, D)
block = Expr(:block)
params = D.parameters
a = Expr(:ref, :A, ntuple(d -> Symbol(:i_, d), N)...)
b = Expr(:ref, :B, ntuple(d -> params[d] == Static.One ? 1 : Symbol(:i_, d), N)...)
c = Expr(:ref, :C, ntuple(d -> params[d] == Static.One ? 1 : Symbol(:i_, d), N)...)
d = sumprodprecomputed2(N)
push!(d.args, :D_sp)
e = Expr(:(=), c, Expr(:call, :ifelse, Expr(:call, :(==), a, b), d, c))
push!(block.args, e)
block
end
# p. 26
function outerloopgen(N::Int, D)
loops = Expr(:for)
block = Expr(:block)
params = D.parameters
for d = N:-1:1
if params[d] != Static.One
ex = Expr(:(=), Symbol(:i_, d), Expr(:call, :axes, :A, d))
push!(block.args, ex)
end
end
push!(loops.args, block)
loops
end
function innerloopgen(N::Int, D)
loops = Expr(:for)
block = Expr(:block)
params = D.parameters
for d = N:-1:1
if params[d] == Static.One
ex = Expr(:(=), Symbol(:i_, d), Expr(:call, :axes, :A, d))
push!(block.args, ex)
end
end
push!(loops.args, block)
loops
end
function innerpost(N::Int, D)
params = D.parameters
block = Expr(:block)
b = Expr(:ref, :B, ntuple(d -> params[d] == Static.One ? 1 : Symbol(:i_, d), N)...)
c = Expr(:ref, :C, ntuple(d -> params[d] == Static.One ? 1 : Symbol(:i_, d), N)...)
e1 = Expr(:(=), b, :m)
e2 = Expr(:(=), c, :j)
push!(block.args, e1)
push!(block.args, e2)
block
end
function compareblock5(N::Int, D)
block = Expr(:block)
params = D.parameters
a = Expr(:ref, :A, ntuple(d -> Symbol(:i_, d), N)...)
d = sumprodprecomputed2(N)
push!(d.args, :D_sp)
yₑ = Expr(:(=), :y, Expr(:call, :(>), a, :m))
mₑ = Expr(:(=), :m, Expr(:if, :y, a, :m))
jₑ = Expr(:(=), :j, Expr(:if, :y, d, :j))
push!(block.args, yₑ)
push!(block.args, mₑ)
push!(block.args, jₑ)
block
end
function findmax5_quote(N::Int, D)
block1 = sizeblock(N)
block2 = sizeproductsblock(N)
block3 = Expr(:block, sumprodconstant(N), Expr(:(=), :D_sp, Expr(:call, :-, :D_sp)))
outerloops = outerloopgen(N, D)
block4 = Expr(:block, Expr(:(=), :j, 1), Expr(:(=), :m, Expr(:call, :typemin, :T)))
# push!(outerloops.args, block4)
innerloops = innerloopgen(N, D)
block5 = compareblock5(N, D)
push!(innerloops.args, block5)
push!(block4.args, innerloops)
# push!(outerloops.args, block4)
# push!(outerloops.args, innerloops)
block6 = innerpost(N, D)
# push!(outerloops.args, block6)
push!(block4.args, block6.args...)
push!(outerloops.args, block4)
return quote
$block1
$block2
$block3
$outerloops
end
end
function findequal_quote(N::Int, D)
loops = loopgen(N)
block1 = sizeblock(N)
block2 = compareblock(N, D)
push!(loops.args, block2)
return quote
$block1
$loops
end
end
function findequal_quote2(N::Int, D)
loops = loopgen(N)
block1 = sizeblock(N)
block2 = sizeproductsblock(N)
block3 = Expr(:block, sumprodconstant(N), Expr(:(=), :D_sp, Expr(:call, :-, :D_sp)))
block4 = compareblock2(N, D)
push!(loops.args, block4)
return quote
$block1
$block2
$block3
@turbo $loops
end
end
# function findequal_quote3(N::Int, D)
# loops = loopgen(N)
# block1 = sizeblock(N)
# block2 = sizeproductsblock(N)
# block3 = Expr(:block, sumprodconstant(N), Expr(:(=), :D_sp, Expr(:call, :-, :D_sp)))
# block4 = compareblock3(N, D)
# push!(loops.args, block4)
# return quote
# $block1
# $block2
# $block3
# @turbo $loops
# end
# end
function findequal_quote4(N::Int, D)
loops = loopgen(N)
block = compareblock4(N, D)
push!(loops.args, block)
return quote
@turbo $loops
end
end
function findequal_quote6(N::Int, D)
loops = loopgen(N)
block1 = sizeblock(N)
block2 = sizeproductsblock(N)
block3 = Expr(:block, sumprodconstant(N), Expr(:(=), :D_sp, Expr(:call, :-, :D_sp)))
block4 = compareblock6(N, D)
push!(loops.args, block4)
return quote
$block1
$block2
$block3
@turbo $loops
end
end
@generated function findequal!(C::AbstractArray{Tₒ, N}, A::AbstractArray{T, N},
B::AbstractArray{T, N}, dims::D) where {Tₒ, T, N, D}
findequal_quote2(N, D)
end
# @generated function findequal3!(C::AbstractArray{Tₒ, N}, A::AbstractArray{T, N},
# B::AbstractArray{T, N}, dims::D) where {Tₒ, T, N, D}
# findequal_quote3(N, D)
# end
@generated function findequal4!(C::AbstractArray{Tₒ, N}, A::AbstractArray{T, N},
B::AbstractArray{T, N}, dims::D) where {Tₒ, T, N, D}
findequal_quote4(N, D)
end
@generated function findmax5!(C::AbstractArray{Tₒ, N}, A::AbstractArray{T, N},
B::AbstractArray{T, N}, dims::D) where {Tₒ, T, N, D}
findmax5_quote(N, D)
end
@generated function findequal6!(C::AbstractArray{Tₒ, N}, A::AbstractArray{T, N},
B::AbstractArray{T, N}, dims::D) where {Tₒ, T, N, D}
findequal_quote6(N, D)
end
A = reshape([1:(4*3*5);], 4, 3, 5);
A = rand(1:10, 4, 3, 5);
dims = (2,);
Dᴮ′ = ntuple(d -> d ∈ dims ? StaticInt(1) : size(A, d), ndims(A));
findequal_quote(ndims(A), typeof(Dᴮ′))
findequal_quote2(ndims(A), typeof(Dᴮ′))
findequal_quote3(ndims(A), typeof(Dᴮ′))
findequal_quote6(ndims(A), typeof(Dᴮ′))
findmax5_quote(ndims(A), typeof(Dᴮ′))
# C0 = deepcopy(C);
B = maximum(A, dims=dims);
C = ones(Int, size(B));
CartesianIndices(A)[C] == argmax(A, dims=dims)
compareblock(ndims(A), typeof(Dᴮ′))
compareblock2(ndims(A), typeof(Dᴮ′))
compareblock3(ndims(A), typeof(Dᴮ′))
function lvfindmax(A::AbstractArray{T, N}, dims::NTuple{M, Int}) where {T, N, M}
B = lvmaximum(A, dims=dims)
Dᴮ′ = ntuple(d -> d ∈ dims ? StaticInt(1) : size(A, d), N)
C = ones(Int, size(B))
findequal!(C, A, B, Dᴮ′)
C
end
C2 = lvfindmax(A, dims)
CartesianIndices(A)[C2] == argmax(A, dims=dims)
LinearIndices(A)[argmax(A, dims=dims)]
# function lvfindmax3(A::AbstractArray{T, N}, dims::NTuple{M, Int}) where {T, N, M}
# B = lvmaximum(A, dims=dims)
# Dᴮ′ = ntuple(d -> d ∈ dims ? StaticInt(1) : size(A, d), N)
# C = ones(Int, size(B))
# findequal3!(C, A, B, Dᴮ′)
# C
# end
# C3 = lvfindmax3(A, dims)
# function lvfindmax4(A::AbstractArray{T, N}, dims::NTuple{M, Int}) where {T, N, M}
# B = lvmaximum(A, dims=dims)
# Dᴮ′ = ntuple(d -> d ∈ dims ? StaticInt(1) : size(A, d), N)
# C = similar(A, Bool)
# findequal4!(C, A, B, Dᴮ′)
# C
# end
# C4 = lvfindmax4(A, dims)
# reshape(findall(C4), size(B)) == argmax(A, dims=dims) == reshape(CartesianIndices(A)[C4], size(B))
function lvfindmax6(A::AbstractArray{T, N}, dims::NTuple{M, Int}) where {T, N, M}
B = maximum(A, dims=dims)
Dᴮ′ = ntuple(d -> d ∈ dims ? StaticInt(1) : size(A, d), N)
C = ones(Int, size(B))
findequal6!(C, A, B, Dᴮ′)
B, CartesianIndices(A)[C]
end
B6, C6 = lvfindmax6(A, dims)
lvfindmax6(A, dims) == findmax(A, dims=dims)
function lvfindmax5(A::AbstractArray{T, N}, dims::NTuple{M, Int}) where {T, N, M}
Dᴮ′ = ntuple(d -> d ∈ dims ? StaticInt(1) : size(A, d), N)
B = similar(A, Dᴮ′)
C = similar(B, Int)
findmax5!(C, A, B, Dᴮ′)
B, CartesianIndices(A)[C]
end
C5 = lvfindmax5(A, dims)
CartesianIndices(A)[C5] == argmax(A, dims=dims)
lvfindmax5(A, dims)
findmax(A, dims=dims)
lvfindmax5(A, dims) == findmax(A, dims=dims)
function lvfindequal(A::AbstractArray{T, 3}, B::AbstractArray{T, 3}) where {T}
C = similar(B, Int)
D_1 = size(A, 1)
D_2 = size(A, 2)
D_3 = size(A, 3)
D_12 = D_1 * D_2
D_sp = (*)(D_1) + D_1 * D_2
D_sp = -D_sp
@turbo for i_3 = axes(A, 3), i_1 = axes(A, 1)
j = 1
m = B[i_1, 1, i_3]
for i_2 = axes(A, 2)
# j = if A[i_1, i_2, i_3] == m
# i_1 + D_1 * i_2 + D_12 * i_3 + D_sp
# else
# j
# end
yes = A[i_1, i_2, i_3] == m
j = yes ? i_1 + D_1 * i_2 + D_12 * i_3 + D_sp : j
end
C[i_1, 1, i_3] = j
end
C
end
C5 = lvfindequal(A, B)
CartesianIndices(A)[C5] == argmax(A, dims=dims)
# an experiment -- seems to work
function lvfindmax5(A::AbstractArray{T, 3}, dims::NTuple{M, Int}) where {T, M}
Dᴮ′ = ntuple(d -> d ∈ dims ? StaticInt(1) : size(A, d), 3)
B = similar(A, Dᴮ′)
C = similar(B, Int)
D_1 = size(A, 1)
D_2 = size(A, 2)
D_3 = size(A, 3)
D_12 = D_1 * D_2
D_sp = (*)(D_1) + D_1 * D_2
D_sp = -D_sp
@turbo for i_3 = axes(A, 3), i_1 = axes(A, 1)
j = 1
m = typemin(T)
for i_2 = axes(A, 2)
# j = if A[i_1, i_2, i_3] == m
# i_1 + D_1 * i_2 + D_12 * i_3 + D_sp
# else
# j
# end
yes = A[i_1, i_2, i_3] > m
m = yes ? A[i_1, i_2, i_3] : m
j = yes ? i_1 + D_1 * i_2 + D_12 * i_3 + D_sp : j
end
B[i_1, 1, i_3] = m
C[i_1, 1, i_3] = j
end
B, C
end
B5, C5 = lvfindmax5(A, (1,))
A[C5] == B5
b5, c5 = findmax(A, dims=1)
b5 == B5
c5 == CartesianIndices(A)[C5]
|
{"hexsha": "d088025761b786a04f7965b5425efef71ba6be9a", "size": 14299, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/lvfindmax_v1.jl", "max_stars_repo_name": "andrewjradcliffe/VectorizedArgMinMax.jl", "max_stars_repo_head_hexsha": "8c5789142b95e5cb348d3970c08341f3bd818e0d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/lvfindmax_v1.jl", "max_issues_repo_name": "andrewjradcliffe/VectorizedArgMinMax.jl", "max_issues_repo_head_hexsha": "8c5789142b95e5cb348d3970c08341f3bd818e0d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lvfindmax_v1.jl", "max_forks_repo_name": "andrewjradcliffe/VectorizedArgMinMax.jl", "max_forks_repo_head_hexsha": "8c5789142b95e5cb348d3970c08341f3bd818e0d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7276507277, "max_line_length": 100, "alphanum_fraction": 0.5205958459, "num_tokens": 5184}
|
"""
A collection of IO related functions to support
1. reading scale factors
2. reading optimal scale factors
3. transforming scale factors
Author : Mike Stanley
Created : May 12, 2020
Modified : March 29, 2022
================================================================================
"""
from glob import glob
import json
import netCDF4 as nc4
import numpy as np
from os.path import expanduser
import pandas as pd
import PseudoNetCDF as pnc
from tqdm import tqdm
import xbpch
# operational constants
BASE_DIR = expanduser('~') + '/Research/Carbon_Flux'
COLUMN_LIST_FP = BASE_DIR + '/data/gosat_meta_data/gosat_columns.txt'
def read_sf_objs(base_df_dir, sf_prefix):
"""
Reads in all files in directory with provided scale factor prefix.
E.g. ./scale_factors/sf_*
where base_df_dir == 'scale_factors' and sf_prefix == 'sf_'
Parameters:
base_df_dir (str) : base directory where all scale factors can be found
sf_prefix (str) : prefix for each scale factor file
Returns:
list of sf objects
NOTE:
- tracerinfo and diaginfo files must be present in the given directory
- all scale factor files are assumed to have the same prefix form
"""
# obtain the scale factor file names (NOTE: file order doesn't matter)
file_names = glob(base_df_dir + '/' + sf_prefix + '*')
return [pnc.pncopen(fn, format='bpch') for fn in file_names]
def create_sf_arr(list_of_sf_objs, var_oi='IJ-EMS-$_CO2bal'):
"""
Creates a 4D stacked array all scale factors across all OSSEs
and months.
Parameters:
list_of_sf_objs (list) : list of pnc objects -- inputting the output
from read_sf_objs will work well
var_oi (str) : the variable of interest in each of the above
elements
Returns:
- numpy array (# iterations, lon, lat)
- longitude array
- latitude array
"""
# extract the scale factors from each object
extr_arrs = [sf_i.variables[var_oi].array()[0, :, :, :]
for sf_i in list_of_sf_objs]
# make sure that longitude comes before latitude
extr_arrs_ll = []
for sf_i in extr_arrs:
if sf_i.shape[1] != 72:
extr_arrs_ll.append(np.swapaxes(sf_i, axis1=1, axis2=2))
else:
extr_arrs_ll.append(sf_i)
# stack the above
stacked_arrs = np.stack(extr_arrs_ll, axis=0)
# obtain longitude and latitude
lon = list_of_sf_objs[0].variables['longitude'].array()
lat = list_of_sf_objs[0].variables['latitude'].array()
return stacked_arrs, lon, lat
def read_opt_sfs(file_path):
"""
Read in a numpy file containing optimal scale factors
Parameters:
file_path (str) : loction of optimal scale factors (numpy .npy file)
Returns:
numpy array containing optimal scale factors (M x 72 x 46)
"""
assert file_path[-3:] == 'npy'
opt_sf = np.load(
file=file_path
)
return opt_sf
def read_geo_info(file_path):
"""
Reads geographic point and region infomation from a json containing
individual locations and regions.
Parameters:
file_path (str) : path to the json file
Return:
dictionary with geographic info
"""
with open(file_path) as json_file:
geo_dict = json.load(json_file)
# pop out the "information" key \\ useless
geo_dict.pop('information')
return geo_dict
def get_single_loc_info(geo_dict, location_nm):
"""
Retrieves point and extent information for a dictionary of the form
returned by read_geo_info
Parameters:
geo_dict (dict) :
location_nm (str) :
Returns:
tuple - (lon, lat) (tuple), extent (list)
"""
# get lon/lat point
lon_lat = tuple(geo_dict['locations'][location_nm]['point'])
# get the extent
extent_lst = geo_dict['locations'][location_nm]['extent']
return lon_lat, extent_lst
def get_regional_info(geo_dict, region_nm):
"""
Retrieves point and extent information for a dictionary of the form
returned by read_geo_info
Parameters:
geo_dict (dict) :
region_nm (str) :
Returns:
tuple - lon_pts, lat_pts
"""
# get the reference location name
ref_loc = geo_dict['regions'][region_nm]['reference_point']
# get the information for the above location
pt, ext = get_single_loc_info(geo_dict=geo_dict, location_nm=ref_loc)
# get lat/lon perturb values
lon_perturb = geo_dict['regions'][region_nm]['lon_perturb']
lat_perturb = geo_dict['regions'][region_nm]['lat_perturb']
# create the nump arrays for the grid
lon_pts = np.arange(lon_perturb[0], lon_perturb[1]) + pt[0]
lat_pts = np.arange(lat_perturb[0], lat_perturb[1]) + pt[1]
return lon_pts, lat_pts
def create_netcdf_flux_file(
write_loc,
lon, lat, time, co2_vals,
co2_field_nm='CO2_SRCE_CO2bf',
dims=(72, 46, 8)
):
"""
Create a netcdf file for a single time instance of flux
The array of interest contained within is Time x lon x lat (72 x 46)
Parameters:
write_loc (str) : file path destination
lon (np arr) :
lat (np arr) :
co2_vals (np arr) : T x Lon x Lat array
co2_field_nm (str) : name of co2 field in the netcdf file
dims (tuple) : lon/lat/time array size tuple
Returns:
None - writes netcdf file to path specified in write_loc
"""
assert len(co2_vals.shape) == 3
# create and save netcdf file
f = nc4.Dataset(write_loc, 'w', format='NETCDF4')
# create dimensions
f.createDimension('lon', dims[0])
f.createDimension('lat', dims[1])
f.createDimension('time', dims[2])
# build variables
longitude = f.createVariable('Longitude', 'f4', 'lon')
latitude = f.createVariable('Latitude', 'f4', 'lat')
co2_srce = f.createVariable(co2_field_nm, 'f4', ('time', 'lon', 'lat'))
# passing data into variables
longitude[:] = lon
latitude[:] = lat
co2_srce[:, :, :] = co2_vals
# close the dataset
f.close()
def generate_nc_files(
bpch_files, output_dir, tracer_path, diag_path,
co2_var_nm='CO2_SRCE_CO2bf',
dims=(72, 46, 8)
):
"""
Creates one netcdf file for each binary punch file path provided in
bpch_files.
e.g.
input - [nep.geos.4x5.001, nep.geos.4x5.002] <- bpch files
output = [nep.geos.4x5.001, nep.geos.4x5.002] <- netcdf files
Parameters:
bpch_files (str) : an ordered sequential collection of daily
bpch files
output_dir (str) : output directory for netcdf files
tracer_path (str) : path to tracer file
diag_path (str) : path to diag file
co2_var_nm (str) : name of co2 variable of interest
dims (tuple) : lon/lat/time array size tuple
Returns:
None - write netcdf file to path in output_file
"""
# read in the binary punch files
bpch_data = xbpch.open_mfbpchdataset(
bpch_files,
dask=True,
tracerinfo_file=tracer_path,
diaginfo_file=diag_path
)
# create new output file names
output_file_nms = [
output_dir + '/' + fp.split('/')[-1] for fp in bpch_files
]
# extract non-time dependent info from first bpch file
lon = bpch_data.variables['lon'].values
lat = bpch_data.variables['lat'].values
time = bpch_data.variables['time'].values
co2_arr = bpch_data.variables[co2_var_nm].values
# create time indices to extract each day
time_idxs = np.arange(
0, dims[2] * len(output_file_nms)
).reshape(len(output_file_nms), dims[2])
# create netcdf files
for time_count, file_nm in enumerate(output_file_nms):
# find the time indices for this file
time_idx = time_idxs[time_count, :]
# create netcdf file with time_count index co2 values
create_netcdf_flux_file(
write_loc=file_nm,
lon=lon,
lat=lat,
time=time[time_idx],
co2_vals=co2_arr[time_idx, :, :],
co2_field_nm=co2_var_nm
)
def open_netcdf_flux(file_path, co2_field_nm='CO2_SRCE_CO2bf'):
"""
Open a single netcdf file as generated by generate_nc_files and return
- co2 values
- lon
- lat
in tuple
Parameters:
file_path (str) :
co2_field_nm (str) : name of co2 field
Returns:
co2 values, lon, lat (all numpy arrays)
"""
# read in the file
f_in = nc4.Dataset(file_path, 'r')
# extract arrays
co2_arr = np.array(f_in.variables[co2_field_nm][:, :, :])
lon = np.array(f_in.variables['Longitude'][:])
lat = np.array(f_in.variables['Latitude'][:])
# close the file
f_in.close()
return co2_arr, lon, lat
def read_flux_files(
file_dir,
file_pre,
tracer_fp=None,
diag_fp=None
):
"""
Since scale factors and results are examined on a monthly time-scale, raw
3hr flux files need to be processed to produce a monthly flux for each grid
point.
Array objects within have 72x46 dimensions
Assumptions -
1. flux files are bpch files
Parameters:
file_dir (str) : directory where files are stored
file_pre (str) : prefix for flux files, e.g. nep.geos.4x5.2010
tracer_fp (str) : path to relevant tracer file
(if none, will look in file_dir)
diag_fp (str) : path to relevant diag file
(if none, will look in file_dir)
Returns:
xbpch object which will contain a flux of interest in additiona to
dimension parameters (e.g. lon/lat/lev)
"""
if tracer_fp:
tracer_fp_1 = tracer_fp
else:
tracer_fp_1 = file_dir + '/tracerinfo.dat'
if diag_fp:
diag_fp_1 = diag_fp
else:
diag_fp_1 = file_dir + '/diaginfo.dat'
# find the flux file names
file_names = sorted(
[file_nm for file_nm in glob(file_dir + '/%s*' % file_pre)]
)
assert len(file_names) > 0
# read in all the prior fluxes
fluxes = xbpch.open_mfbpchdataset(
file_names,
dask=True,
tracerinfo_file=tracer_fp_1,
diaginfo_file=diag_fp_1
)
return fluxes
def generate_txt_files(
bpch_files, output_dir, tracer_path, diag_path,
co2_var_nm='CO2_SRCE_CO2bf',
dims=(8, 72, 46)
):
"""
Creates one txt file for each binary punch file path provided in
bpch_files. The expected dimension of each day's flux file is shown in
the "dims" variable.
When flattening arrays, the indices move fastest on the right side,
so, latitidue is moving the fastest, followed by longitude, followed by
time.
e.g.
input - [nep.geos.4x5.001, nep.geos.4x5.002] <- bpch files
output = [nep.geos.4x5.001, nep.geos.4x5.002] <- txt files
Parameters:
bpch_files (str) : an ordered sequential collection of daily
bpch files
output_dir (str) : output directory for netcdf files
tracer_path (str) : path to tracer file
diag_path (str) : path to diag file
co2_var_nm (str) : name of co2 variable of interest
dims (tuple) : lon/lat/time array size tuple
Returns:
None - write txt file to path in output_file
"""
# read in the binary punch files
bpch_data = xbpch.open_mfbpchdataset(
bpch_files,
dask=True,
tracerinfo_file=tracer_path,
diaginfo_file=diag_path
)
# extract the array from the above
bpch_arr = bpch_data[co2_var_nm].values
# create new output file names
output_file_nms = [
output_dir + '/' + fp.split('/')[-1] for fp in bpch_files
]
# create time indices to extract each day
time_idxs = np.arange(
0, dims[0] * len(output_file_nms)
).reshape(len(output_file_nms), dims[0])
# for each output file name, generate a new text file
for time_count, output_file_nm in enumerate(output_file_nms):
# find the time indices for this file
time_idx = time_idxs[time_count, :]
# create a flattened version of the above data with the time filter
data_arr = bpch_arr[time_idx, :, :]
assert data_arr.shape == dims
data_flat = data_arr.flatten()
# write to file
np.savetxt(fname=output_file_nm, X=data_flat)
def generate_txt_files_np(
flux_arr, bpch_files, output_dir,
dims=(8, 72, 46)
):
"""
Operates the same as generate_txt_files above, but takes a numpy array as
input in addition to the bpch file paths. The bpch file paths are still
includes so that new files names remain consistent with old formats.
Creates one txt file for each binary punch file path provided in
bpch_files. The expected dimension of each day's flux file is shown in
the "dims" variable.
When flattening arrays, the indices move fastest on the right side,
so, latitidue is moving the fastest, followed by longitude, followed by
time.
e.g.
input - [nep.geos.4x5.001, nep.geos.4x5.002] <- bpch files
output = [nep.geos.4x5.001, nep.geos.4x5.002] <- txt files
Parameters:
flux_arr (np arr) : array of fluxes to be written
bpch_files (list) : an ordered sequential collection of daily
bpch files
output_dir (str) : output directory for netcdf files
dims (tuple) : lon/lat/time array size tuple
Returns:
None - write txt file to path in output_file
"""
# create new output file names
output_file_nms = [
output_dir + '/' + fp.split('/')[-1] for fp in bpch_files
]
# create time indices to extract each day
time_idxs = np.arange(
0, dims[0] * len(output_file_nms)
).reshape(len(output_file_nms), dims[0])
# for each output file name, generate a new text file
for time_count, output_file_nm in enumerate(output_file_nms):
# find the time indices for this file
time_idx = time_idxs[time_count, :]
# create a flattened version of the above data with the time filter
data_arr = flux_arr[time_idx, :, :]
assert data_arr.shape == dims
data_flat = data_arr.flatten()
# write to file
np.savetxt(fname=output_file_nm, X=data_flat)
def read_flux_txt_files(flux_files):
"""
Utility to read flux files of the type created by generate_txt_files.
In particular, these are arrays with shape 8x72x46, where the indices are
moving fastest to slowest right to left.
Parameters:
flux_files (list) : sequential list of files to read
"""
flux_arrs = []
for file_nm in tqdm(flux_files):
flux_arrs.append(
np.loadtxt(file_nm).reshape(8, 72, 46)
)
return np.concatenate(flux_arrs)
def find_time_idxs(start, end, fluxes):
"""
Find the numpy arr indices between two month indexes (counting from 0)
Parameters:
start (int) : start month index
end (int) : end month index
fluxes (xbpch obj) : i.e. output of read_flux_files
Returns:
numpy array with time indices
"""
assert start < end
if end > 12:
less = np.where(
fluxes.time.values >= np.datetime64('1985-%i-01' % start)
)[0]
geq = np.where(
fluxes.time.values >= np.datetime64('1985-%i-01' % start)
)[0]
elif start > 9:
less = np.where(
fluxes.time.values < np.datetime64('1985-%i-01' % end)
)[0]
geq = np.where(
fluxes.time.values >= np.datetime64('1985-%i-01' % start)
)[0]
elif end > 9:
less = np.where(
fluxes.time.values < np.datetime64('1985-%i-01' % end)
)[0]
geq = np.where(
fluxes.time.values >= np.datetime64('1985-0%i-01' % start)
)[0]
else:
less = np.where(
fluxes.time.values < np.datetime64('1985-0%i-01' % end)
)[0]
geq = np.where(
fluxes.time.values >= np.datetime64('1985-0%i-01' % start)
)[0]
# find the intersection between the above
time_idxs = np.intersect1d(geq, less)
return time_idxs
def find_month_idxs(fluxes, month_list):
"""
Find the indices for each month for a given xbpch obj.
Parameters:
fluxes (xbpch object) : i.e. output of read_flux_files
month_list (list) : list of months to be returned in dict
Returns:
dictionary of month abbreviations with numpy array values
NOTE:
- this function can only handle a single year.
"""
# find the month indices
month_idxs = {
'jan': None, 'feb': None, 'mar': None,
'apr': None, 'may': None, 'jun': None,
'jul': None, 'aug': None, 'sep': None,
'oct': None, 'nov': None, 'dec': None
}
for month_idx, month in enumerate(month_idxs.keys()):
# find the start and end values
start_val = month_idx + 1
end_val = month_idx + 2
# find the time indices
month_idxs[month] = find_time_idxs(
start=start_val,
end=end_val,
fluxes=fluxes
)
# only keep months of interest
month_dict = {
key: value for key, value in month_idxs.items() if key in month_list
}
return month_dict
def read_cfn_files(file_dir):
"""
Read the cost function output files from GEOS-Chem Adjoint Runs.
These files should be organized in one directory to work with this function
Parameters:
file_dir (str) :
Returns:
float - the cost function evaluation
"""
# get file names for cost functions
cfn_fp = glob(file_dir + '/cfn*')
cfn = []
for i, fp in enumerate(cfn_fp):
with open(fp, 'r') as f:
cfn.append(float(f.readlines()[0].replace(
' %i ' % (i + 1), ''
).replace(' ', '').replace('\n', '')))
return cfn
"""
IO WITH GOSAT OBSERVATIONS
"""
def get_ij(lon, lat, disize=5, djsize=4, iipar=72, jjpar=46):
"""
Given a lon/lat, returns the corresponding grid coordinate for 4x5 grid.
NOTE:
1. This function is essentially copied from GET_IJ in
/code/modified/grid_mod.f
2. d(i/j)size are default as noted in /code/CMN_SIZE
3. (ii/jj)par_l are default as noted in /code/CMN_SIZE for 4x5 grid
4. Be aware that I changed some of the indexing since fortran indexes from 1
Parameters:
lon (float) : longitude
lat (float) : latitude
disize (float) : size (in degree) of a longitude grid box
djsize (float) : size (in degree) of a longitude grid box
iipar (int) : limit of longitude index
jjpar (int) : limit of latitude index
Returns:
tlon, tlat (longitude grid coord, latitude grid coord)
"""
tlon = int((lon + 180.) / disize + 0.5)
tlat = int((lat + 90.) / djsize + 0.5)
if tlon >= iipar:
tlon -= iipar
# if tlat > jjpar:
# tlat -= jjpar
# check for impossible points
if (tlon >= iipar) | (tlat >= jjpar) | (tlon < 0) | (tlat < 0):
print(tlon, tlat)
raise ValueError
return int(tlon), int(tlat)
def read_gosat_data(fp):
"""
Read in and process GOSAT Data.
Parameters:
fp (str) : file path to GOSAT Obs
Returns:
List of lists with satellite observations
"""
# read in the file
gs_data = []
with open(fp, 'r') as f:
for line in f.readlines():
gs_data.append(line.replace('\n', '').split(', '))
# convert strings to floats
gs_data = [[float(num) for num in line] for line in gs_data]
return gs_data
def create_gosat_df(fp, column_list_fp=COLUMN_LIST_FP):
"""
Read in GOSAT file using read_gosat_data and then creates a pandas
dataframe with the following columns:
1. lon
2. lat
3. xco2
4. xco2 uncertainty
5.-10. year, month, day, hour, min, sec
Note, the key observation used to establish this data is that the meat of
each observation is on every 7th line.
Parameters:
fp (str) : path to file
column_list_fp (str) : path to column names file
Returns:
pandas dataframe with the above columns
"""
# read in the column names
with open(column_list_fp, 'r') as f:
col_names = f.readlines()
col_names = [i.replace('\n', '') for i in col_names]
# read in the file
raw_gosat_file = read_gosat_data(fp)
# get the observation indices
obs_idxs = np.arange(0, len(raw_gosat_file), 7)
# create a shorted observation list
obs_list = [raw_gosat_file[idx] for idx in obs_idxs]
# return a pandas dataframe
return pd.DataFrame(obs_list, columns=col_names)
def create_gosat_df_year(obs_dir, year, column_list_fp=COLUMN_LIST_FP):
"""
reads in all simulated GOSAT observation files from directory for a given
year and generates a pandas dataframe with the following columns:
1. lon
2. lat
3. xco2
4. xco2 uncertainty
5.-10. year, month, day, hour, min, sec
Parameters:
obs_dir (str) : directory with GOSAT observations of interest
year (int) : year of observation
column_list (str) : list of column names, specified above
Returns:
pandas dataframe with the above columns
"""
# get all file names in the given directory
all_fps = glob(obs_dir + '/*')
# only keep those from our year of interest
fps_year = [i for i in all_fps if int(i[-12:-8]) == year]
# for each file path, apply create_gosat_df()
obs_dfs = [
create_gosat_df(fp, column_list_fp=column_list_fp) for fp in fps_year
]
# concatenate these dfs together
obs_df = pd.concat(obs_dfs, ignore_index=True)
# sort the above by time stamps
obs_df.sort_values(['oyear', 'omonth', 'oday'], inplace=True)
return obs_df.reset_index(drop=True)
|
{"hexsha": "f98d0988f4f989dd7d3f97f4565928d908dc2505", "size": 22245, "ext": "py", "lang": "Python", "max_stars_repo_path": "carbonfluxtools/io.py", "max_stars_repo_name": "mcstanle/carbonfluxtools", "max_stars_repo_head_hexsha": "9cb428a16ebb0b96e3cc08c3fdbac3e71751fbfc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "carbonfluxtools/io.py", "max_issues_repo_name": "mcstanle/carbonfluxtools", "max_issues_repo_head_hexsha": "9cb428a16ebb0b96e3cc08c3fdbac3e71751fbfc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "carbonfluxtools/io.py", "max_forks_repo_name": "mcstanle/carbonfluxtools", "max_forks_repo_head_hexsha": "9cb428a16ebb0b96e3cc08c3fdbac3e71751fbfc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6662371134, "max_line_length": 80, "alphanum_fraction": 0.6237356709, "include": true, "reason": "import numpy", "num_tokens": 5899}
|
import numpy as np
import torch
import networks
from string import ascii_letters, punctuation, digits
alphabet = ascii_letters + punctuation + digits + " "
def to_ord(c, all_chars, alphabet):
if not (c in all_chars):
alphabet += c
all_chars[c] = all_chars["counter"]
all_chars["counter"] = all_chars["counter"] + 1
return all_chars[c]
def encode_token(t, alphabet, max_length=90):
M = max_length
C = len(alphabet)
all_chars = {c: idx for idx, c in enumerate(alphabet)}
all_chars["counter"] = len(all_chars)
x = [[to_ord(c, all_chars, alphabet) for c in t]]
index = 0
encode = np.zeros((C, M), dtype=np.float32)
encode[np.array(x[index]), np.arange(len(x[index]))] = 1.0
return torch.from_numpy(encode)
def pytorch_cos_sim(a, b):
if not isinstance(a, torch.Tensor):
a = torch.tensor(a)
if not isinstance(b, torch.Tensor):
b = torch.tensor(b)
if len(a.shape) == 1:
a = a.unsqueeze(0)
if len(b.shape) == 1:
b = b.unsqueeze(0)
a_norm = torch.nn.functional.normalize(a, p=2, dim=1)
b_norm = torch.nn.functional.normalize(b, p=2, dim=1)
return torch.mm(a_norm, b_norm.transpose(0, 1))
def main():
t1 = encode_token("test", alphabet).unsqueeze(0)
t2 = encode_token("test2", alphabet).unsqueeze(0)
emb = networks.TwoLayerCNN(C=len(alphabet), M=90, embedding=128, channel=8, mtc_input=1)
v1 = emb(t1)
v2 = emb(t2)
print(pytorch_cos_sim(v1, v2))
# emb = torch.load("model.torch", map_location=torch.device('cpu')).embedding_net
if __name__ == '__main__':
main()
|
{"hexsha": "fe84096702c23ad222b834b8fa048071e86d7a42", "size": 1622, "ext": "py", "lang": "Python", "max_stars_repo_path": "inference.py", "max_stars_repo_name": "imvladikon/string-embed", "max_stars_repo_head_hexsha": "49e5ab0ada37b497dac51974aff16eeac65627a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inference.py", "max_issues_repo_name": "imvladikon/string-embed", "max_issues_repo_head_hexsha": "49e5ab0ada37b497dac51974aff16eeac65627a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inference.py", "max_forks_repo_name": "imvladikon/string-embed", "max_forks_repo_head_hexsha": "49e5ab0ada37b497dac51974aff16eeac65627a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.037037037, "max_line_length": 92, "alphanum_fraction": 0.6411837238, "include": true, "reason": "import numpy", "num_tokens": 463}
|
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
warnings.filterwarnings("ignore")
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
import networkx as nx
from sklearn.linear_model import LogisticRegression
from ge import LINE
from ge.classify import read_node_label, Classifier
from ge.utils import show
def evaluate_embeddings(embeddings):
X, Y = read_node_label('../data/flight/labels-usa-airports.txt', skip_head=True)
tr_frac = 0.8
print("Training classifier using {:.2f}% nodes...".format(
tr_frac * 100))
clf = Classifier(embeddings=embeddings, clf=LogisticRegression(solver='liblinear'))
return clf.split_train_evaluate(X, Y, tr_frac)
if __name__ == "__main__":
G = nx.read_edgelist('../data/flight/usa-airports.edgelist', create_using=nx.Graph(), nodetype=None)
# model = LINE(G, embedding_size=128, order='first')
# model = LINE(G, embedding_size=128, order='second')
model = LINE(G, embedding_size=128, order='all')
model.train(batch_size=1024, epochs=10, verbose=2)
embeddings = model.get_embeddings()
metric=evaluate_embeddings(embeddings)
print(metric)
show(metric)
|
{"hexsha": "74179540ca45752e69da5991b744580a68d2c8d7", "size": 1200, "ext": "py", "lang": "Python", "max_stars_repo_path": "demo/line_classification.py", "max_stars_repo_name": "237085795/GraphEmbedding_annotion", "max_stars_repo_head_hexsha": "973ee7dad5e65585407800720e4beb7137687a0e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "demo/line_classification.py", "max_issues_repo_name": "237085795/GraphEmbedding_annotion", "max_issues_repo_head_hexsha": "973ee7dad5e65585407800720e4beb7137687a0e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "demo/line_classification.py", "max_forks_repo_name": "237085795/GraphEmbedding_annotion", "max_forks_repo_head_hexsha": "973ee7dad5e65585407800720e4beb7137687a0e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3333333333, "max_line_length": 104, "alphanum_fraction": 0.7333333333, "include": true, "reason": "import networkx", "num_tokens": 298}
|
## ---------------------------------------------------------------------------- ##
# 08/03/2015 #
# #
# www.henesis.eu #
# #
# Alessandro Bacchini - alessandro.bacchini@henesis.eu #
# #
# Copyright (c) 2015, Henesis s.r.l. part of Camlin Group #
# #
# The MIT License (MIT) #
# #
# Permission is here by granted, free of charge, to any person obtaining a copy #
# of this software and associated documentation files (the "Software"), to deal #
# in the Software without restriction, including without limitation the rights #
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #
# copies of the Software, and to permit persons to whom the Software is #
# furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all #
# copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #
# SOFTWARE. #
## ---------------------------------------------------------------------------- ##
import unittest
import pyerg
import numpy as np
ERG_1_FILENAME = "../test-data/Test-Dataset-1_175937.erg"
#ERG_2_FILENAME = "../data/test_data2.erg"
#ERG_3_FILENAME = "../data/test_data3.erg"
#ERG_4_FILENAME = "../data/fortran_data.erg"
class TestPyergReader(unittest.TestCase):
def setUp(self):
self.parser = pyerg.Reader()
def tearDown(self):
self.pareser = None
def test_Open(self):
parser = self.parser
parser.open(ERG_1_FILENAME)
self.assertEquals(parser.recordSize(), 56)
self.assertEquals(parser.numQuanities(), 12)
self.assertEquals(parser.records(), 602372)
self.assertTrue(parser.isErg())
#parser.open(ERG_2_FILENAME)
#self.assertEquals(parser.recordSize(), 56)
#self.assertEquals(parser.numQuanities(), 12)
#self.assertEquals(parser.records(), 602372)
#self.assertTrue(parser.isErg())
#self.assertRaises(NameError, parser.open, ERG_3_FILENAME)
#parser.open(ERG_4_FILENAME)
#self.assertEquals(parser.recordSize(), 48)
#self.assertEquals(parser.numQuanities(), 10)
#self.assertEquals(parser.records(), 13410)
#self.assertFalse(parser.isErg())
#self.assertTrue(parser.isFortran())
def test_Has(self):
parser = self.parser
parser.open(ERG_1_FILENAME)
self.assertTrue(parser.has("Data_8"))
self.assertTrue(parser.has("Data_1"))
self.assertTrue(parser.has("Data_2"))
self.assertTrue(parser.has("Data_3"))
self.assertTrue(parser.has("Data_4"))
self.assertTrue(parser.has("UserOut_00"))
self.assertTrue(parser.has("UserOut_01"))
self.assertTrue(parser.has("UserOut_02"))
self.assertTrue(parser.has("Data_5"))
self.assertTrue(parser.has("Data_6"))
self.assertTrue(parser.has("Data_7"))
self.assertTrue(parser.has("Data_9"))
self.assertFalse(parser.has("Data_10"))
self.assertFalse(parser.has("none"))
self.assertFalse(parser.has("$none$"))
#parser.open(ERG_4_FILENAME)
#self.assertTrue(parser.has("Data_8"))
#self.assertTrue(parser.has("data_0"))
#self.assertTrue(parser.has("data_1"))
#self.assertTrue(parser.has("data_2"))
#self.assertTrue(parser.has("data_3"))
#self.assertTrue(parser.has("data_4"))
#self.assertTrue(parser.has("data_5"))
#self.assertTrue(parser.has("data_6"))
#self.assertTrue(parser.has("data_7"))
#self.assertTrue(parser.has("data_8"))
#self.assertFalse(parser.has("$none$"))
def test_Index(self):
parser = self.parser
parser.open(ERG_1_FILENAME)
self.assertEquals(parser.index("Data_8"), 0)
self.assertEquals(parser.index("Data_1"), 1)
self.assertEquals(parser.index("Data_2"), 2)
self.assertEquals(parser.index("Data_3"), 3)
self.assertEquals(parser.index("Data_4"), 4)
self.assertEquals(parser.index("UserOut_00"), 5)
self.assertEquals(parser.index("UserOut_01"), 6)
self.assertEquals(parser.index("UserOut_02"), 7)
self.assertEquals(parser.index("Data_5"), 8)
self.assertEquals(parser.index("Data_6"), 9)
self.assertEquals(parser.index("Data_7"), 10)
self.assertEquals(parser.index("Data_9"), 11)
self.assertRaises(NameError, parser.index, "Data_10")
self.assertRaises(NameError, parser.index, "none")
self.assertRaises(NameError, parser.index, "$none$")
#parser.open(ERG_4_FILENAME)
#self.assertEquals(parser.index("Data_8"), 9)
#self.assertEquals(parser.index("data_0"), 0)
#self.assertEquals(parser.index("data_1"), 1)
#self.assertEquals(parser.index("data_2"), 2)
#self.assertEquals(parser.index("data_3"), 3)
#self.assertEquals(parser.index("data_4"), 4)
#self.assertEquals(parser.index("data_5"), 5)
#self.assertEquals(parser.index("data_6"), 6)
#self.assertEquals(parser.index("data_7"), 7)
#self.assertEquals(parser.index("data_8"), 8)
#self.assertRaises(NameError, parser.index, "$none$")
def test_DatasetSize(self):
parser = self.parser
parser.open(ERG_1_FILENAME)
rows = parser.records()
self.assertEquals(parser.quantitySize(0), 8*rows)
self.assertEquals(parser.quantitySize(1), 4*rows)
self.assertEquals(parser.quantitySize(2), 4*rows)
self.assertEquals(parser.quantitySize(3), 4*rows)
self.assertEquals(parser.quantitySize(4), 4*rows)
self.assertEquals(parser.quantitySize(5), 4*rows)
self.assertEquals(parser.quantitySize(6), 4*rows)
self.assertEquals(parser.quantitySize(7), 4*rows)
self.assertEquals(parser.quantitySize(8), 4*rows)
self.assertEquals(parser.quantitySize(9), 4*rows)
self.assertEquals(parser.quantitySize(10), 4*rows)
self.assertEquals(parser.quantitySize(11), 4*rows)
self.assertRaises(NameError, parser.quantitySize, 12)
self.assertEquals(parser.quantitySize("Data_8"), 8*rows)
#parser.open(ERG_4_FILENAME)
#rows = parser.records()
#for i in xrange(parser.numQuanities()):
# self.assertEquals(parser.quantitySize(i), 4*rows)
#self.assertEquals(parser.quantitySize("data_0"), 4*rows)
#self.assertEquals(parser.quantitySize("data_4"), 4*rows)
#self.assertEquals(parser.quantitySize("Data_8"), 4*rows)
def test_DatasetName(self):
parser = self.parser
parser.open(ERG_1_FILENAME)
self.assertEquals(parser.quantityName(0), "Data_8")
self.assertEquals(parser.quantityName(1), "Data_1")
self.assertEquals(parser.quantityName(2), "Data_2")
self.assertEquals(parser.quantityName(3), "Data_3")
self.assertEquals(parser.quantityName(4), "Data_4")
self.assertEquals(parser.quantityName(5), "UserOut_00")
self.assertEquals(parser.quantityName(6), "UserOut_01")
self.assertEquals(parser.quantityName(7), "UserOut_02")
self.assertEquals(parser.quantityName(8), "Data_5")
self.assertEquals(parser.quantityName(9), "Data_6")
self.assertEquals(parser.quantityName(10), "Data_7")
self.assertEquals(parser.quantityName(11), "Data_9")
self.assertRaises(NameError, parser.quantityName, 12)
def test_DatasetUnit(self):
parser = self.parser
parser.open(ERG_1_FILENAME)
self.assertEquals(parser.quantityUnit(0), "s")
self.assertEquals(parser.quantityUnit(1), "m")
self.assertEquals(parser.quantityUnit(2), "rad/s")
self.assertEquals(parser.quantityUnit(3), "Nm")
self.assertEquals(parser.quantityUnit(4), "")
self.assertEquals(parser.quantityUnit(5), "")
self.assertEquals(parser.quantityUnit(6), "")
self.assertEquals(parser.quantityUnit(7), "")
self.assertEquals(parser.quantityUnit(8), "m/s")
self.assertEquals(parser.quantityUnit(9), "m/s")
self.assertEquals(parser.quantityUnit(10), "m/s^2")
self.assertEquals(parser.quantityUnit(11), "")
self.assertRaises(NameError, parser.quantityUnit, 12)
def test_ReadAll(self):
parser = self.parser
parser.open(ERG_1_FILENAME)
data = parser.readAll()
self.assertIn('Data_8', data.keys())
t = data['Data_8']
self.assertEquals(len(t), parser.records())
def test_Read(self):
parser = self.parser
parser.open(ERG_1_FILENAME)
t = parser.read('Data_8')
self.assertEquals(len(t), parser.records())
t2 = parser.read('Data_8', start=10, count=90)
self.assertEquals(len(t2), 90)
self.assertTrue(np.all(t2 == t[10:100]))
class TestPyerg(unittest.TestCase):
def test_read(self):
parser = pyerg.Reader()
parser.open(ERG_1_FILENAME)
t0 = parser.read('Data_8')
data = pyerg.read(ERG_1_FILENAME)
self.assertIn('Data_8', data.keys())
t1 = data['Data_8']
self.assertTrue(np.all(t0 == t1))
def test_CanRead(self):
self.assertTrue(pyerg.can_read(ERG_1_FILENAME))
#self.assertTrue(pyerg.can_read(ERG_2_FILENAME))
#self.assertFalse(pyerg.can_read(ERG_3_FILENAME))
#self.assertTrue(pyerg.can_read(ERG_4_FILENAME))
def test_version(self):
self.assertTrue(isinstance(pyerg.__version__, str))
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "b776e75457dfca6c2e5420e1ed117a69bb2f2d00", "size": 10951, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_pyerg.py", "max_stars_repo_name": "henesissrl/pyerg", "max_stars_repo_head_hexsha": "7793257a46fc083c387c4d30b8620173f0362abc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-08-14T23:26:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-14T20:48:44.000Z", "max_issues_repo_path": "test/test_pyerg.py", "max_issues_repo_name": "henesissrl/pyerg", "max_issues_repo_head_hexsha": "7793257a46fc083c387c4d30b8620173f0362abc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2018-10-23T21:45:49.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-12T07:58:15.000Z", "max_forks_repo_path": "test/test_pyerg.py", "max_forks_repo_name": "henesissrl/pyerg", "max_forks_repo_head_hexsha": "7793257a46fc083c387c4d30b8620173f0362abc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-05-29T09:03:07.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-19T13:15:02.000Z", "avg_line_length": 44.1572580645, "max_line_length": 82, "alphanum_fraction": 0.5942836271, "include": true, "reason": "import numpy", "num_tokens": 2369}
|
# Common libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Restrict minor warnings
import warnings
warnings.filterwarnings('ignore')
# Import test and train data
df_train = pd.read_csv('../input/train.csv')
df_Test = pd.read_csv('../input/test.csv')
df_test = df_Test
# First 5 data points
df_train.head()
# Datatypes of the attributes
df_train.dtypes
pd.set_option('display.max_columns', None) # we need to see all the columns
df_train.describe()
# From both train and test data
df_train = df_train.drop(['Soil_Type7', 'Soil_Type15'], axis = 1)
df_test = df_test.drop(['Soil_Type7', 'Soil_Type15'], axis = 1)
# Also drop 'Id'
df_train = df_train.iloc[:,1:]
df_test = df_test.iloc[:,1:]
size = 10
corrmat = df_train.iloc[:,:size].corr()
f, ax = plt.subplots(figsize = (10,8))
sns.heatmap(corrmat,vmax=0.8,square=True);
data = df_train.iloc[:,:size]
# Get name of the columns
cols = data.columns
# Calculate the pearson correlation coefficients for all combinations
data_corr = data.corr()
# Threshold ( only highly correlated ones matter)
threshold = 0.5
corr_list = []
data_corr
# Sorting out the highly correlated values
for i in range(0, size):
for j in range(i+1, size):
if data_corr.iloc[i,j]>= threshold and data_corr.iloc[i,j]<1\
or data_corr.iloc[i,j] <0 and data_corr.iloc[i,j]<=-threshold:
corr_list.append([data_corr.iloc[i,j],i,j])
# Sorting the values
s_corr_list = sorted(corr_list,key= lambda x: -abs(x[0]))
# print the higher values
for v,i,j in s_corr_list:
print("%s and %s = %.2f" % (cols[i], cols[j], v))
df_train.iloc[:,:10].skew()
# Pair wise scatter plot with hue being 'Cover_Type'
for v,i,j in s_corr_list:
sns.pairplot(data = df_train, hue='Cover_Type', size= 6, x_vars=cols[i], y_vars=cols[j])
plt.show()
# A violin plot is a hybrid of a box plot and a kernel density plot, which shows peaks in the data.
cols = df_train.columns
size = len(cols) - 1 # We don't need the target attribute
# x-axis has target attributes to distinguish between classes
x = cols[size]
y = cols[0:size]
for i in range(0, size):
sns.violinplot(data=df_train, x=x, y=y[i])
plt.show()
df_train.Wilderness_Area2.value_counts()
### Group one-hot encoded variables of a category into one single variable
cols = df_train.columns
r,c = df_train.shape
# Create a new dataframe with r rows, one column for each encoded category, and target in the end
new_data = pd.DataFrame(index= np.arange(0,r), columns=['Wilderness_Area', 'Soil_Type', 'Cover_Type'])
# Make an entry in data for each r for category_id, target_value
for i in range(0,r):
p = 0;
q = 0;
# Category1_range
for j in range(10,14):
if (df_train.iloc[i,j] == 1):
p = j-9 # category_class
break
# Category2_range
for k in range(14,54):
if (df_train.iloc[i,k] == 1):
q = k-13 # category_class
break
# Make an entry in data for each r
new_data.iloc[i] = [p,q,df_train.iloc[i, c-1]]
# plot for category1
sns.countplot(x = 'Wilderness_Area', hue = 'Cover_Type', data = new_data)
plt.show()
# Plot for category2
plt.rc("figure", figsize = (25,10))
sns.countplot(x='Soil_Type', hue = 'Cover_Type', data= new_data)
plt.show()
# Checking the value count for different soil_types
for i in range(10, df_train.shape[1]-1):
j = df_train.columns[i]
print (df_train[j].value_counts())
# Let's drop them
df_train = df_train.drop(['Soil_Type8', 'Soil_Type25'], axis=1)
df_test = df_test.drop(['Soil_Type8', 'Soil_Type25'], axis=1)
df_train1 = df_train # To be used for algos like SVM where we need normalization and StandardScaler
df_test1 = df_test # To be used under normalization and StandardScaler
# Checking for data transformation (take only non-categorical values)
df_train.iloc[:,:10].skew()
#Horizontal_Distance_To_Hydrology
from scipy import stats
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Horizontal_Distance_To_Hydrology'], fit = stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Horizontal_Distance_To_Hydrology'], plot=plt)
df_train1['Horizontal_Distance_To_Hydrology'] = np.sqrt(df_train1['Horizontal_Distance_To_Hydrology'])
# Plot again after sqrt transformation
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Horizontal_Distance_To_Hydrology'], fit = stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Horizontal_Distance_To_Hydrology'], plot=plt)
#Vertical_Distance_To_Hydrology
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Vertical_Distance_To_Hydrology'], fit = stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Vertical_Distance_To_Hydrology'], plot=plt)
#Horizontal_Distance_To_Roadways
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Horizontal_Distance_To_Roadways'], fit=stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Horizontal_Distance_To_Roadways'], plot=plt)
df_train1['Horizontal_Distance_To_Roadways'] = np.sqrt(df_train1['Horizontal_Distance_To_Roadways'])
# Plot again after sqrt transformation
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Horizontal_Distance_To_Roadways'], fit = stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Horizontal_Distance_To_Roadways'], plot=plt)
#Hillshade_9am
fig = plt.figure(figsize=(8,6))
sns.distplot(df_train1['Hillshade_9am'],fit=stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Hillshade_9am'],plot=plt)
df_train1['Hillshade_9am'] = np.square(df_train1['Hillshade_9am'])
# Plot again after square transformation
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Hillshade_9am'], fit = stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Hillshade_9am'], plot=plt)
# Hillshade_Noon
fig = plt.figure(figsize=(8,6))
sns.distplot(df_train1['Hillshade_Noon'],fit=stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Hillshade_Noon'],plot=plt)
df_train1['Hillshade_Noon'] = np.square(df_train1['Hillshade_Noon'])
# Plot again after square transformation
fig = plt.figure(figsize=(8,6))
sns.distplot(df_train1['Hillshade_Noon'],fit=stats.norm)
fig = plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Hillshade_Noon'],plot=plt)
# Horizontal_Distance_To_Fire_Points
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Horizontal_Distance_To_Fire_Points'], fit=stats.norm)
plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Horizontal_Distance_To_Fire_Points'],plot=plt)
df_train1['Horizontal_Distance_To_Fire_Points'] = np.sqrt(df_train1['Horizontal_Distance_To_Fire_Points'])
# Plot again after sqrt transformation
plt.figure(figsize=(8,6))
sns.distplot(df_train1['Horizontal_Distance_To_Fire_Points'], fit=stats.norm)
plt.figure(figsize=(8,6))
res = stats.probplot(df_train1['Horizontal_Distance_To_Fire_Points'],plot=plt)
# To be used in case of algorithms like SVM
df_test1[['Horizontal_Distance_To_Hydrology','Horizontal_Distance_To_Fire_Points'\
,'Horizontal_Distance_To_Roadways']] = np.sqrt(df_test1[['Horizontal_Distance_To_Hydrology',\
'Horizontal_Distance_To_Fire_Points','Horizontal_Distance_To_Roadways']])
# To be used in case of algorithms like SVM
df_test1[['Hillshade_9am','Hillshade_Noon']] = np.square(df_test1[['Hillshade_9am','Hillshade_Noon']])
from sklearn.preprocessing import StandardScaler
# Taking only non-categorical values
Size = 10
X_temp = df_train.iloc[:,:Size]
X_test_temp = df_test.iloc[:,:Size]
X_temp1 = df_train1.iloc[:,:Size]
X_test_temp1 = df_test1.iloc[:,:Size]
X_temp1 = StandardScaler().fit_transform(X_temp1)
X_test_temp1 = StandardScaler().fit_transform(X_test_temp1)
r,c = df_train.shape
X_train = np.concatenate((X_temp,df_train.iloc[:,Size:c-1]),axis=1)
X_train1 = np.concatenate((X_temp1, df_train1.iloc[:,Size:c-1]), axis=1) # to be used for SVM
y_train = df_train.Cover_Type.values
from sklearn import svm
from sklearn.model_selection import train_test_split
#from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.model_selection import RandomizedSearchCV,GridSearchCV
# Setting parameters
x_data, x_test_data, y_data, y_test_data = train_test_split(X_train1,y_train,test_size=0.2, random_state=123)
svm_para = [{'kernel':['rbf'],'C': [1,10,100,100]}]
#classifier = GridSearchCV(svm.SVC(),svm_para,cv=3,verbose=2)
#classifier.fit(x_data,y_data)
#classifier.best_params_
#classifier.grid_scores_
# Parameters optimized using the code in above cell
C_opt = 10 # reasonable option
clf = svm.SVC(C=C_opt,kernel='rbf')
clf.fit(X_train1,y_train)
clf.score(X_train1,y_train)
# y_pred = clf.predict(X_test1)
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import classification_report
# setting parameters
x_data, x_test_data, y_data, y_test_data = train_test_split(X_train,y_train,test_size= 0.3, random_state=0)
etc_para = [{'n_estimators':[20,30,100], 'max_depth':[5,10,15], 'max_features':[0.1,0.2,0.3]}]
# Default number of features is sqrt(n)
# Default number of min_samples_leaf is 1
ETC = GridSearchCV(ExtraTreesClassifier(),param_grid=etc_para, cv=10, n_jobs=-1)
ETC.fit(x_data, y_data)
ETC.best_params_
ETC.grid_scores_
print ('Best accuracy obtained: {}'.format(ETC.best_score_))
print ('Parameters:')
for key, value in ETC.best_params_.items():
print('\t{}:{}'.format(key,value))
# Classification Report
Y_pred = ETC.predict(x_test_data)
target = ['class1', 'class2','class3','class4','class5','class6','class7' ]
print (classification_report(y_test_data, Y_pred, target_names=target))
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
def plot_learning_curve(model,title, X, y,n_jobs = 1, ylim = None, cv = None,train_sizes = np.linspace(0.1, 1, 5)):
# Figrue parameters
plt.figure(figsize=(10,8))
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel('Training Examples')
plt.ylabel('Score')
train_sizes, train_score, test_score = learning_curve(model, X, y, cv = cv, n_jobs=n_jobs, train_sizes=train_sizes)
# Calculate mean and std
train_score_mean = np.mean(train_score, axis=1)
train_score_std = np.std(train_score, axis=1)
test_score_mean = np.mean(test_score, axis=1)
test_score_std = np.std(test_score, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_score_mean - train_score_std, train_score_mean + train_score_std,\
alpha = 0.1, color = 'r')
plt.fill_between(train_sizes, test_score_mean - test_score_std, test_score_mean + test_score_std,\
alpha = 0.1, color = 'g')
plt.plot(train_sizes, train_score_mean, 'o-', color="r", label="Training score")
plt.plot(train_sizes, test_score_mean, 'o-', color="g", label="Cross-validation score")
plt.legend(loc = "best")
return plt
# 'max_features': 0.3, 'n_estimators': 100, 'max_depth': 15, 'min_samples_leaf: 1'
etc = ExtraTreesClassifier(bootstrap=True, oob_score=True, n_estimators=100, max_depth=10, max_features=0.3, \
min_samples_leaf=1)
etc.fit(X_train, y_train)
# yy_pred = etc.predict(X_test)
etc.score(X_train, y_train)
# Plotting learning curve
title = 'Learning Curve (ExtraTreeClassifier)'
# cross validation with 50 iterations to have a smoother curve
cv = ShuffleSplit(n_splits=50, test_size=0.2, random_state=0)
model = etc
plot_learning_curve(model,title,X_train, y_train, n_jobs=-1,ylim=None,cv=cv)
plt.show()
|
{"hexsha": "b8c6376e51944fb83146db60cd199788362ea62d", "size": 11541, "ext": "py", "lang": "Python", "max_stars_repo_path": "kaggle/forest-cover-type-prediction/script_38.py", "max_stars_repo_name": "josepablocam/janus-public", "max_stars_repo_head_hexsha": "4713092b27d02386bdb408213d8edc0dc5859eec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "kaggle/forest-cover-type-prediction/script_38.py", "max_issues_repo_name": "josepablocam/janus-public", "max_issues_repo_head_hexsha": "4713092b27d02386bdb408213d8edc0dc5859eec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "kaggle/forest-cover-type-prediction/script_38.py", "max_forks_repo_name": "josepablocam/janus-public", "max_forks_repo_head_hexsha": "4713092b27d02386bdb408213d8edc0dc5859eec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.2178571429, "max_line_length": 119, "alphanum_fraction": 0.7398838922, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3188}
|
from sys import argv
import numpy as np
import re
import itertools
import scipy.misc
import os
import array
from scipy.misc import imsave
class asmprocessor(object):
'''
Generate features from ASM files
'''
# helper methods
def asmToPng(self, asmFile, loc): # djaihfaig.asm
f = open(asmFile)
ln = os.path.getsize(asmFile) # orginal asm file size
witdth = int(ln**0.5) # define a sqrt(length) as its witdth
rem = ln%witdth # reminder
a = array.array("B") #
a.fromfile(f, ln-rem)
g = np.reshape(a, (len(a)/witdth, witdth))
g = np.uint8(g)
imsave(loc + asmFile.split('/')[-1].split('.')[0] + '.png', g)
# convert image to list
def readImage(self, filename):
'''
convert image to a list, keep first [numThreshold] pixels
'''
f = open(filename,'rb')
ln = os.path.getsize(filename) # length of file in bytes
width = 256
rem = ln%width
a = array.array("B") # uint8 array
a.fromfile(f,ln-rem)
f.close()
g = np.reshape(a,(len(a)/width,width))
g = np.uint8(g)
g.resize((numThreshold,))
return [filename.split('/')[-1].split('.')[0]] + list(g)
if __name__ == '__main__':
if len(argv) != 4:
print"""
Usage: python %s [asm_data_path] [png_data_path] [numThreshold]
""" % argv[0]
exit(1)
asmprc = asmprocessor()
path = argv[1]
pngpath = argv[2]
numThreshold=int(argv[3])
for filename in os.listdir(path):
asmprc.asmToPng(path + '/' + filename, pngpath + '/')
result = []
for pngfile in os.listdir(pngpath):
result.append(asmprc.readImage(pngpath + '/' + pngfile))
f = open('asmfeature.txt', 'w')
for i in range(len(result)):
f.write(str(result[i])+'\n')
f.close()
|
{"hexsha": "16e3e335fbd9af42bd52a7721d9f6783a930f774", "size": 1879, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/preprocess/AsmPreProcessing.py", "max_stars_repo_name": "ankit-vaghela30/Distributed-Malware-classification", "max_stars_repo_head_hexsha": "5479b5a9590c1ec436d937b287b7ffe08ff568b1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/preprocess/AsmPreProcessing.py", "max_issues_repo_name": "ankit-vaghela30/Distributed-Malware-classification", "max_issues_repo_head_hexsha": "5479b5a9590c1ec436d937b287b7ffe08ff568b1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/preprocess/AsmPreProcessing.py", "max_forks_repo_name": "ankit-vaghela30/Distributed-Malware-classification", "max_forks_repo_head_hexsha": "5479b5a9590c1ec436d937b287b7ffe08ff568b1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.6323529412, "max_line_length": 71, "alphanum_fraction": 0.5641298563, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 522}
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import pathlib
import sys
from collections import defaultdict
import numpy as np
import torch
from torch.utils.data import DataLoader
from common.args import Args
from common.subsample import MaskFunc
from common.utils import save_reconstructions
from data import transforms
from data.mri_data import SliceData
from models.unet.unet_model import UnetModel
class DataTransform:
"""
Data Transformer for running U-Net models on a test dataset.
"""
def __init__(self, resolution, which_challenge, mask_func=None):
"""
Args:
resolution (int): Resolution of the image.
which_challenge (str): Either "singlecoil" or "multicoil" denoting the dataset.
mask_func (common.subsample.MaskFunc): A function that can create a mask of
appropriate shape.
"""
if which_challenge not in ('singlecoil', 'multicoil'):
raise ValueError(f'Challenge should either be "singlecoil" or "multicoil"')
self.resolution = resolution
self.which_challenge = which_challenge
self.mask_func = mask_func
def __call__(self, kspace, target, attrs, fname, slice):
"""
Args:
kspace (numpy.Array): k-space measurements
target (numpy.Array): Target image
attrs (dict): Acquisition related information stored in the HDF5 object
fname (pathlib.Path): Path to the input file
slice (int): Serial number of the slice
Returns:
(tuple): tuple containing:
image (torch.Tensor): Normalized zero-filled input image
mean (float): Mean of the zero-filled image
std (float): Standard deviation of the zero-filled image
fname (pathlib.Path): Path to the input file
slice (int): Serial number of the slice
"""
kspace = transforms.to_tensor(kspace)
if self.mask_func is not None:
seed = tuple(map(ord, fname))
masked_kspace, _ = transforms.apply_mask(kspace, self.mask_func, seed)
else:
masked_kspace = kspace
# Inverse Fourier Transform to get zero filled solution
image = transforms.ifft2(masked_kspace)
# Crop input image
image = transforms.complex_center_crop(image, (self.resolution, self.resolution))
# Absolute value
image = transforms.complex_abs(image)
# Apply Root-Sum-of-Squares if multicoil data
if self.which_challenge == 'multicoil':
image = transforms.root_sum_of_squares(image)
# Normalize input
image, mean, std = transforms.normalize_instance(image)
image = image.clamp(-6, 6)
return image, mean, std, fname, slice
def create_data_loaders(args):
mask_func = None
if args.mask_kspace:
mask_func = MaskFunc(args.center_fractions, args.accelerations)
data = SliceData(
root=args.data_path / f'{args.challenge}_{args.data_split}',
transform=DataTransform(args.resolution, args.challenge, mask_func),
sample_rate=1.,
challenge=args.challenge
)
data_loader = DataLoader(
dataset=data,
batch_size=args.batch_size,
num_workers=4,
pin_memory=True,
)
return data_loader
def load_model(checkpoint_file):
checkpoint = torch.load(checkpoint_file)
args = checkpoint['args']
model = UnetModel(1, 1, args.num_chans, args.num_pools, args.drop_prob).to(args.device)
if args.data_parallel:
model = torch.nn.DataParallel(model)
model.load_state_dict(checkpoint['model'])
return model
def run_unet(args, model, data_loader):
model.eval()
reconstructions = defaultdict(list)
with torch.no_grad():
for (input, mean, std, fnames, slices) in data_loader:
input = input.unsqueeze(1).to(args.device)
recons = model(input).to('cpu').squeeze(1)
for i in range(recons.shape[0]):
recons[i] = recons[i] * std[i] + mean[i]
reconstructions[fnames[i]].append((slices[i].numpy(), recons[i].numpy()))
reconstructions = {
fname: np.stack([pred for _, pred in sorted(slice_preds)])
for fname, slice_preds in reconstructions.items()
}
return reconstructions
def main(args):
data_loader = create_data_loaders(args)
model = load_model(args.checkpoint)
reconstructions = run_unet(args, model, data_loader)
save_reconstructions(reconstructions, args.out_dir)
def create_arg_parser():
parser = Args()
parser.add_argument('--mask-kspace', action='store_true',
help='Whether to apply a mask (set to True for val data and False '
'for test data')
parser.add_argument('--data-split', choices=['val', 'test'], required=True,
help='Which data partition to run on: "val" or "test"')
parser.add_argument('--checkpoint', type=pathlib.Path, required=True,
help='Path to the U-Net model')
parser.add_argument('--out-dir', type=pathlib.Path, required=True,
help='Path to save the reconstructions to')
parser.add_argument('--batch-size', default=16, type=int, help='Mini-batch size')
parser.add_argument('--device', type=str, default='cuda', help='Which device to run on')
return parser
if __name__ == '__main__':
args = create_arg_parser().parse_args(sys.argv[1:])
main(args)
|
{"hexsha": "534b353c8b3d08ed38463e0dd11087335dcd4455", "size": 5699, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/unet/run_unet.py", "max_stars_repo_name": "vaibhavsaxena11/fastMRI", "max_stars_repo_head_hexsha": "9e1f1574ce25ee56e4c4e35c3b916119d4259ec5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2021-06-16T20:04:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-17T18:57:37.000Z", "max_issues_repo_path": "models/sensitivity/run_sensitivity.py", "max_issues_repo_name": "seansegal/fastMRI", "max_issues_repo_head_hexsha": "44ebd517d792c5f6e66c64c004d0e0603057e7e1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/sensitivity/run_sensitivity.py", "max_forks_repo_name": "seansegal/fastMRI", "max_forks_repo_head_hexsha": "44ebd517d792c5f6e66c64c004d0e0603057e7e1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-06-19T18:00:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-14T05:08:16.000Z", "avg_line_length": 37.2483660131, "max_line_length": 92, "alphanum_fraction": 0.647306545, "include": true, "reason": "import numpy", "num_tokens": 1258}
|
import sys
print(sys.version)
from mpi4py import MPI
comm = MPI.COMM_WORLD
num_procs = comm.Get_size()
rank = comm.Get_rank()
from sklearn.multiclass import OneVsRestClassifier
rank_i = rank%3
import gc
import pandas as pd
import numpy as np
import os
from sklearn.model_selection import GridSearchCV, cross_val_score, cross_validate, KFold
import pickle
df = pd.read_csv('0_labelled_documents.csv')
df = (df
.query('driver_coded==1')
.query('relevant==1')
.sort_values('id')
.sample(frac=1, random_state=1)
.reset_index(drop=True)
)
df.loc[df['representative_relevant_sample']==1,'random_sample'] = 1
print(df.shape)
def KFoldRandom(n_splits, X, no_test, shuffle=False, discard=True):
kf = KFold(n_splits=n_splits, shuffle=shuffle)
for train, test in kf.split(X):
if not discard:
train = list(train) + [x for x in test if x in no_test]
test = [x for x in test if x not in no_test]
yield (train, test)
from sklearn.metrics import roc_curve, accuracy_score, roc_auc_score, precision_recall_curve, f1_score
from sklearn.metrics import precision_score, recall_score
def evaluate_preds(y_true, y_pred, targets):
res = {}
for average in ["micro","macro","weighted", "samples"]:
try:
res[f'ROC AUC {average}'] = roc_auc_score(y_true, y_pred, average=average)
except:
res[f'ROC AUC {average}'] = np.NaN
res[f'F1 {average}'] = f1_score(y_true, y_pred.round(), average=average)
res[f'precision {average}'] = precision_score(y_true, y_pred.round(), average=average)
res[f'recall {average}'] = recall_score(y_true, y_pred.round(), average=average)
for i, target in enumerate(targets):
try:
res[f'ROC AUC - {target}'] = roc_auc_score(y_true[:,i], y_pred[:,i])
except:
res[f'ROC AUC - {target}'] = np.NaN
res[f'precision - {target}'] = precision_score(y_true[:,i], y_pred[:,i].round())
res[f'recall - {target}'] = recall_score(y_true[:,i], y_pred[:,i].round())
res[f'F1 - {target}'] = f1_score(y_true[:,i], y_pred[:,i].round())
res[f'accuracy - {target}'] = accuracy_score(y_true[:,i], y_pred[:,i].round())
res[f'n_target - {target}'] = y_true[:,i].sum()
return res
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVC
targets = ['6 - Temperature','6 - Precipitation','6 - Other']
df['labels'] = list(df[targets].values)
class_weight = {}
for i, t in enumerate(targets):
cw = df[(df['random_sample']==1) & (df[t]==0)].shape[0] / df[(df['random_sample']==1) & (df[t]==1)].shape[0]
class_weight[i] = cw
class_weight
y = np.matrix(df[targets])
pipeline = Pipeline([
('vect', TfidfVectorizer()),
('clf', OneVsRestClassifier(SVC(probability=True))),
])
parameters = [
{
'vect__max_df': (0.5, 0.75, 1.0),
'vect__min_df': (10, 15, 20),
'vect__ngram_range': ((1, 1), (1, 2)),
'clf__estimator__kernel': ['rbf'],
'clf__estimator__gamma': [1e-3, 1e-4],
'clf__estimator__C': [1, 10, 100, 1000],
'clf__estimator__class_weight': [None, 'balanced']
},
{
'vect__max_df': (0.5, 0.75, 1.0),
'vect__min_df': (10, 15, 20),
'vect__ngram_range': ((1, 1), (1, 2)),
'clf__estimator__kernel': ['linear'],
'clf__estimator__C': [1, 10, 100, 1000],
'clf__estimator__class_weight': [None, 'balanced']
}
]
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
outer_cv = KFoldRandom(3, df.index, df[df['random_sample']!=1].index, discard=False)
outer_scores = []
clfs = []
for k, (train, test) in enumerate(outer_cv):
if k!=rank_i:
continue
inner_cv = KFoldRandom(3, train, df[df['random_sample']!=1].index, discard=False)
clf = GridSearchCV(pipeline, parameters, scoring="f1_macro", n_jobs=8, verbose=1, cv=inner_cv)
clf.fit(df.loc[train, 'content'], y[train])
inner_scores = pd.DataFrame(clf.cv_results_)
inner_scores.to_csv(f'cv_3/svm_inner_drivers_{rank_i}.csv', index=False)
y_pred = clf.predict_proba(df.loc[test,'content'])
ai = np.expand_dims(np.argmax(y_pred, axis=1), axis=1)
maximums = np.maximum(y_pred.max(1),0.51)
np.put_along_axis(y_pred, ai, maximums.reshape(ai.shape), axis=1)
eps = evaluate_preds(y[test], y_pred, targets)
best_params = inner_scores.sort_values('mean_test_score',ascending=False).to_dict('records')[0]['params']
for key, value in best_params.items():
eps[key] = value
eps["rank_k"] = rank_i
pd.DataFrame([eps]).to_csv(f'cv_3/svm_outer_drivers_{rank_i}.csv',index=False)
|
{"hexsha": "4af6de3bd5a29c812e249ab17d97523b5c9c15fa", "size": 4810, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/2-cluster-ml-scripts/cv_svm_drivers.py", "max_stars_repo_name": "mcallaghan/regional-impacts-map", "max_stars_repo_head_hexsha": "10b95189255e5f626f94bc140ed16b7bcd7ca33e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-10-11T23:40:03.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-26T16:49:29.000Z", "max_issues_repo_path": "analysis/2-cluster-ml-scripts/cv_svm_drivers.py", "max_issues_repo_name": "mcallaghan/regional-impacts-map", "max_issues_repo_head_hexsha": "10b95189255e5f626f94bc140ed16b7bcd7ca33e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/2-cluster-ml-scripts/cv_svm_drivers.py", "max_forks_repo_name": "mcallaghan/regional-impacts-map", "max_forks_repo_head_hexsha": "10b95189255e5f626f94bc140ed16b7bcd7ca33e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-10-17T04:35:20.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-13T14:48:34.000Z", "avg_line_length": 35.1094890511, "max_line_length": 112, "alphanum_fraction": 0.6457380457, "include": true, "reason": "import numpy", "num_tokens": 1392}
|
#!/usr/bin/env python
# -*- coding: utf-8; -*-
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from IPython.core.display import display
from cycler import cycler
import matplotlib as mpl
import re
from ads.common import logger
mpl.rcParams["image.cmap"] = "BuGn"
mpl.rcParams["axes.prop_cycle"] = cycler(
color=["teal", "blueviolet", "forestgreen", "peru", "y", "dodgerblue", "r"]
)
from ads.evaluations.evaluation_plot import EvaluationPlot
from ads.evaluations.statistical_metrics import ModelEvaluator
from ads.dataset.dataset_with_target import ADSDatasetWithTarget
from ads.common.model import ADSModel
from ads.common.decorator.runtime_dependency import runtime_dependency
class ADSEvaluator(object):
"""ADS Evaluator class. This class holds field and methods for creating and using
ADS evaluator objects.
Attributes
----------
evaluations : list[DataFrame]
list of evaluations.
is_classifier : bool
Whether the model has a non-empty `classes_` attribute indicating the presence of class labels.
legend_labels : dict
List of legend labels. Defaults to `None`.
metrics_to_show : list[str]
Names of metrics to show.
models : list[ads.common.model.ADSModel]
The object built using `ADSModel.from_estimator()`.
positive_class : str or int
The class to report metrics for binary dataset, assumed to be true.
show_full_name :bool
Whether to show the name of the evaluator in relevant contexts.
test_data : ads.common.data.ADSData
Test data to evaluate model on.
training_data : ads.common.data.ADSData
Training data to evaluate model.
Positive_Class_names : list
Class attribute listing the ways to represent positive classes
Methods
-------
add_metrics(func, names)
Adds the listed metics to the evaluator it is called on
del_metrics(names)
Removes listed metrics from the evaluator object it is called on
add_models(models, show_full_name)
Adds the listed models to the evaluator object
del_models(names)
Removes the listed models from the evaluator object
show_in_notebook(plots, use_training_data, perfect, baseline, legend_labels)
Visualize evalutation plots in the notebook
calculate_cost(tn_weight, fp_weight, fn_weight, tp_weight, use_training_data)
Returns a cost associated with the input weights
"""
Positive_Class_Names = ["yes", "y", "t", "true", "1"]
def __init__(
self,
test_data,
models,
training_data=None,
positive_class=None,
legend_labels=None,
show_full_name=False,
):
"""Creates an ads evaluator object.
Parameters
----------
test_data : ads.common.data.ADSData instance
Test data to evaluate model on.
The object can be built using `ADSData.build()`.
models : list[ads.common.model.ADSModel]
The object can be built using `ADSModel.from_estimator()`.
Maximum length of the list is 3
training_data : ads.common.data.ADSData instance, optional
Training data to evaluate model on and compare metrics against test data.
The object can be built using `ADSData.build()`
positive_class : str or int, optional
The class to report metrics for binary dataset. If the target classes is True or False,
positive_class will be set to True by default. If the dataset is multiclass or multilabel,
this will be ignored.
legend_labels : dict, optional
List of legend labels. Defaults to `None`.
If legend_labels not specified class names will be used for plots.
show_full_name : bool, optional
Show the name of the evaluator object. Defaults to `False`.
Examples
--------
>>> train, test = ds.train_test_split()
>>> model1 = MyModelClass1.train(train)
>>> model2 = MyModelClass2.train(train)
>>> evaluator = ADSEvaluator(test, [model1, model2])
>>> legend_labels={'class_0': 'one', 'class_1': 'two', 'class_2': 'three'}
>>> multi_evaluator = ADSEvaluator(test, models=[model1, model2],
... legend_labels=legend_labels)
"""
self.evaluations = []
if isinstance(training_data, ADSDatasetWithTarget):
training_data, _ = training_data.train_test_split(test_size=0.0)
if isinstance(test_data, ADSDatasetWithTarget):
test_data, _ = test_data.train_test_split(test_size=0.0)
self.test_data = test_data
self.training_data = training_data
self.classes = []
self.is_classifier = (
hasattr(models[0], "classes_") and models[0].classes_ is not None
)
pclass = positive_class
if self.is_classifier:
self.classes = list(models[0].classes_)
if len(self.classes) == 2:
self.metrics_to_show = [
"accuracy",
"hamming_loss",
"precision",
"recall",
"f1",
"auc",
]
if positive_class is None or positive_class not in self.classes:
pclass = next(
(
x
for x in list(self.classes)
if str(x).lower() in ADSEvaluator.Positive_Class_Names
),
self.classes[0],
)
logger.info(
f"Using {pclass} as the positive class. Use `positive_class` to set this value."
)
else:
# Multi-class
self.metrics_to_show = [
"accuracy",
"hamming_loss",
"precision_weighted",
"precision_micro",
"recall_weighted",
"recall_micro",
"f1_weighted",
"f1_micro",
]
else:
# Regression
self.metrics_to_show = ["r2_score", "mse", "mae"]
self.positive_class = pclass
self.legend_labels = legend_labels
for m in models:
if not (isinstance(m, ADSModel)):
try:
m = ADSModel.from_estimator(m.est)
except:
logger.info("This model cannot be converted to an ADS Model.")
self.evaluations = [pd.DataFrame(), pd.DataFrame()]
self.model_names = []
self.add_models(models, show_full_name=show_full_name)
def add_metrics(self, funcs, names):
"""Adds the listed metrics to the evaluator object it is called on.
Parameters
----------
funcs : list
The list of metrics to be added. This function will be provided `y_true`
and `y_pred`, the true and predicted values for each model.
names : list[str])
The list of metric names corresponding to the functions.
Returns
-------
Nothing
Examples
--------
>>> def f1(y_true, y_pred):
... return np.max(y_true - y_pred)
>>> evaluator = ADSEvaluator(test, [model1, model2])
>>> evaluator.add_metrics([f1], ['Max Residual'])
>>> evaluator.metrics
Output table will include the desired metric
"""
if len(funcs) != len(names):
raise ValueError("Could not find 1 unique name for each function")
for name, f in zip(names, funcs):
f_res = []
for m in self.evaluations[1].columns:
res = f(
self.evaluations[1][m]["y_true"], self.evaluations[1][m]["y_pred"]
)
f_res.append(res)
pd_res = pd.DataFrame(
[f_res], columns=self.evaluations[1].columns, index=[name]
)
self.evaluations[1] = pd.concat([self.evaluations[1], pd_res])
if self.evaluations[0].shape != (0, 0):
f_res = []
for m in self.evaluations[0].columns:
res = f(
self.evaluations[0][m]["y_true"],
self.evaluations[0][m]["y_pred"],
)
f_res.append(res)
pd_res = pd.DataFrame(
[f_res], columns=self.evaluations[0].columns, index=[name]
)
self.evaluations[0] = pd.concat([self.evaluations[0], pd_res])
if name not in self.metrics_to_show:
self.metrics_to_show.append(name)
setattr(self, "train_evaluations", self.evaluations[0])
setattr(self, "test_evaluations", self.evaluations[1])
def del_metrics(self, names):
"""Removes the listed metrics from the evaluator object it is called on.
Parameters
----------
names : list[str]
The list of names of metrics to be deleted. Names can be found by calling
`evaluator.test_evaluations.index`.
Returns
-------
None
`None`
Examples
--------
>>> evaluator = ADSEvaluator(test, [model1, model2])
>>> evaluator.del_metrics(['mse])
>>> evaluator.metrics
Output table will exclude the desired metric
"""
self.evaluations[1].drop(index=names, inplace=True)
if self.evaluations[0].shape != (0, 0):
self.evaluations[0].drop(index=names, inplace=True)
self.metrics_to_show = [met for met in self.metrics_to_show if met not in names]
def add_models(self, models, show_full_name=False):
"""Adds the listed models to the evaluator object it is called on.
Parameters
----------
models : list[ADSModel]
The list of models to be added
show_full_name : bool, optional
Whether to show the full model name. Defaults to False.
** NOT USED **
Returns
-------
Nothing
Examples
--------
>>> evaluator = ADSEvaluator(test, [model1, model2])
>>> evaluator.add_models("model3])
"""
if type(models) is list:
total_train_metrics = self.evaluations[0]
total_test_metrics = self.evaluations[1]
for i, m in enumerate(models):
# if hasattr(m, 'classes_') != self.is_classifier:
# raise ValueError("All models should belong to same problem type.")
# calculate evaluations on testing and training data (if X_train is not None)
m_name = self._get_model_name(m.name)
if self.training_data is not None:
y_pred, y_score = self._score_data(m, self.training_data.X)
train_metrics = ModelEvaluator(
y_true=self.training_data.y,
y_pred=y_pred,
model_name=m_name,
classes=m.classes_ if self.is_classifier else None,
y_score=y_score,
positive_class=self.positive_class,
).get_metrics()
total_train_metrics = pd.concat(
[total_train_metrics, train_metrics], axis=1
)
y_pred, y_score = self._score_data(m, self.test_data.X)
test_metrics = ModelEvaluator(
y_true=self.test_data.y,
y_pred=y_pred,
model_name=m_name,
classes=m.classes_ if self.is_classifier else None,
y_score=y_score,
positive_class=self.positive_class,
).get_metrics()
total_test_metrics = pd.concat(
[total_test_metrics, test_metrics], axis=1, sort=False
)
self.evaluations = [total_train_metrics, total_test_metrics]
setattr(self, "train_evaluations", self.evaluations[0])
setattr(self, "test_evaluations", self.evaluations[1])
def del_models(self, names):
"""Removes the listed models from the evaluator object it is called on.
Parameters
----------
names : list[str]
the list of models to be delete. Names are the model names by default, and
assigned internally when conflicts exist. Actual names can be found using
`evaluator.test_evaluations.columns`
Returns
-------
Nothing
Examples
--------
>>> model3.rename("model3")
>>> evaluator = ADSEvaluator(test, [model1, model2, model3])
>>> evaluator.del_models([model3])
"""
if type(names) is list:
self.model_names = [n for n in self.model_names if n not in names]
self.evaluations[1].drop(columns=names, inplace=True)
if self.evaluations[0].shape != (0, 0):
self.evaluations[0].drop(columns=names, inplace=True)
def show_in_notebook(
self,
plots=None,
use_training_data=False,
perfect=False,
baseline=True,
legend_labels=None,
):
"""Visualize evaluation plots.
Parameters
----------
plots : list, optional
Filter the plots that are displayed. Defaults to None. The name of the plots are as below:
- regression - residuals_qq, residuals_vs_fitted
- binary classification - normalized_confusion_matrix, roc_curve, pr_curve
- multi class classification - normalized_confusion_matrix, precision_by_label, recall_by_label, f1_by_label
use_training_data : bool, optional
Use training data to generate plots. Defaults to `False`.
By default, this method uses test data to generate plots
legend_labels : dict, optional
Rename legend labels, that used for multi class classification plots. Defaults to None.
legend_labels dict keys are the same as class names. legend_labels dict values are strings.
If legend_labels not specified class names will be used for plots.
Returns
-------
None
Nothing. Outputs several evaluation plots as specified by `plots`.
Examples
--------
>>> evaluator = ADSEvaluator(test, [model1, model2])
>>> evaluator.show_in_notebook()
>>> legend_labels={'class_0': 'green', 'class_1': 'yellow', 'class_2': 'red'}
>>> multi_evaluator = ADSEvaluator(test, [model1, model2],
... legend_labels=legend_labels)
>>> multi_evaluator.show_in_notebook(plots=["normalized_confusion_matrix",
... "precision_by_label", "recall_by_label", "f1_by_label"])
"""
# get evaluations
if use_training_data:
if self.training_data is None:
raise ValueError(
"Training data is not provided. Re-build ADSData with training and test data"
)
model_evaluation = self.evaluations[0]
else:
model_evaluation = self.evaluations[1]
legend_labels = (
legend_labels if legend_labels is not None else self.legend_labels
)
# pass to plotting class
EvaluationPlot.plot(
model_evaluation, plots, len(self.classes), perfect, baseline, legend_labels
)
def calculate_cost(
self, tn_weight, fp_weight, fn_weight, tp_weight, use_training_data=False
):
"""Returns a cost associated with the input weights.
Parameters
----------
tn_weight : int, float
The weight to assign true negatives in calculating the cost
fp_weight : int, float
The weight to assign false positives in calculating the cost
fn_weight : int, float
The weight to assign false negatives in calculating the cost
tp_weight : int, float
The weight to assign true positives in calculating the cost
use_training_data : bool, optional
Use training data to pull the metrics. Defaults to False
Returns
-------
:class:`pandas.DataFrame`
DataFrame with the cost calculated for each model
Examples
--------
>>> evaluator = ADSEvaluator(test, [model1, model2])
>>> costs_table = evaluator.calculate_cost(0, 10, 1000, 0)
"""
if len(self.classes) != 2:
raise ValueError(
"The calculate_cost api is not supported for non-binary classification datasets."
)
cost_per_model = []
if use_training_data:
if self.training_data is None:
raise ValueError(
"Training data is not provided. Re-build ADSData with training and test data."
)
ev = self.evaluations[0]
else:
ev = self.evaluations[1]
list_of_model = ev.columns
for m in list_of_model:
tn, fp, fn, tp = ev[m]["raw_confusion_matrix"].ravel()
cost_per_model.append(
tn * tn_weight + fp * fp_weight + fn * fn_weight + tp * tp_weight
)
cost_df = pd.DataFrame({"model": list_of_model, "cost": cost_per_model})
return cost_df
class EvaluationMetrics(object):
"""Class holding evaluation metrics.
Attributes
----------
ev_test : list
evaluation test metrics
ev_train : list
evaluation training metrics
use_training : bool
use training data
less_is_more : list
metrics list
Methods
-------
show_in_notebook()
Shows visualization metrics as a color coded table
"""
DEFAULT_LABELS_MAP = {
"accuracy": "Accuracy",
"hamming_loss": "Hamming distance",
"kappa_score_": "Cohen's kappa coefficient",
"precision": "Precision",
"recall": "Recall",
"f1": "F1",
"auc": "ROC AUC",
}
def __init__(
self, ev_test, ev_train, use_training=False, less_is_more=None, precision=4
):
self.ev_test = ev_test
self.ev_train = ev_train
self.use_training = use_training
self.precision = precision
if isinstance(less_is_more, list):
self.less_is_more = [
"hamming_loss",
"hinge_loss",
"mse",
"mae",
] + less_is_more
else:
self.less_is_more = ["hamming_loss", "hinge_loss", "mse", "mae"]
def __repr__(self):
self.show_in_notebook()
return ""
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, value):
"""
Set precision to @property of the class.
"""
if not isinstance(value, int):
if not (isinstance(value, float) and value.is_integer()):
raise TypeError("'value' must be integer")
value = int(value)
if value < 0:
raise ValueError("'value' must be non-negative")
self._precision = value
def show_in_notebook(self, labels=DEFAULT_LABELS_MAP):
"""
Visualizes evaluation metrics as a color coded table.
Parameters
----------
labels : dictionary
map printing specific labels for metrics display
Returns
-------
Nothing
"""
def highlight_max(s):
"""Highlight the maximum in a Series yellow.
Parameters
----------
s : series object
the series being evaluated
Returns
-------
list
containing background color data or empty if not max
"""
if s.name not in self.less_is_more:
is_max = s == s.max()
else:
is_max = s == s.min()
return ["background-color: lightgreen" if v else "" for v in is_max]
table_styles = [
dict(props=[("text-align", "right")]),
dict(selector="caption", props=[("caption-side", "top")]),
]
def _pretty_label(df, labels, copy=False):
"""
Output specified labels in proper format.
If the labels are provided in then used them. Otherwise, use default.
Parameters
----------
labels : dictionary
map printing specific labels for metrics display
Returns
-------
dataframe
dataframe with index names modified according to input labels
"""
if copy:
df = df.copy()
for k, v in labels.items():
df.rename(index={k: v}, inplace=True)
return df
@runtime_dependency(
module="ipywidgets", object="HTML", install_from="oracle-ads[notebook]"
)
def _display_metrics(df, data_name, labels, precision):
"""
display metrics on web page
Parameters
----------
df : dataframe
metrics in dataframe format
data_name : string
name of data given metrics df describe
labels : dictionary
map printing specific labels for metrics display
precision : int
precision for metrics display
Returns
-------
Nothing
"""
display(
HTML(
_pretty_label(df, labels)
.style.apply(highlight_max, axis=1)
.set_precision(precision)
.set_properties(**{"text-align": "center"})
.set_table_attributes("class=table")
.set_caption(
'<div align="left"><b style="font-size:20px;">'
+ "Evaluation Metrics ("
+ data_name
+ "):</b></div>"
)
.render()
)
)
_display_metrics(self.ev_test, "testing data", labels, self.precision)
if self.use_training:
_display_metrics(self.ev_train, "training data", labels, self.precision)
@property
def raw_metrics(self, metrics=None, use_training_data=False):
"""Returns the raw metric numbers
Parameters
----------
metrics : list, optional
Request metrics to pull. Defaults to all.
use_training_data : bool, optional
Use training data to pull metrics. Defaults to False
Returns
-------
dict
The requested raw metrics for each model. If `metrics` is `None` return all.
Examples
--------
>>> evaluator = ADSEvaluator(test, [model1, model2])
>>> raw_metrics_dictionary = evaluator.raw_metrics()
"""
[train_met, test_met] = self.evaluations
test_d = test_met.to_dict()
if use_training_data and train_met is not None:
train_d = train_met.add_suffix("_train").to_dict()
test_d.update(train_d)
for m, data in test_d.items():
ret = dict()
for k, v in data.items():
if isinstance(v, np.ndarray):
ret[k] = v.tolist()
else:
ret[k] = v
test_d[m] = ret
return test_d
@property
def metrics(self):
"""Returns evaluation metrics
Returns
-------
metrics
HTML representation of a table comparing relevant metrics.
Examples
--------
>>> evaluator = ADSEvaluator(test, [model1, model2])
>>> evaluator.metrics
Outputs table displaying metrics.
"""
ev_test = self.evaluations[1].loc[self.metrics_to_show]
use_training = self.evaluations[0].shape != (0, 0)
ev_train = (
self.evaluations[0].loc[self.metrics_to_show] if use_training else None
)
return ADSEvaluator.EvaluationMetrics(ev_test, ev_train, use_training)
"""
Internal methods
"""
def _get_model_name(self, name, show_full_name=False):
name_edit = re.sub(r" ?\([^)]+\)", "", name)
## if name only has '(' without ')', the code above wouldnt remove the argument followed by '('.
if "(" in name_edit and not show_full_name:
name_edit = name.split("(")[0]
logger.info("Use `show_full_name=True` to show the full model name.")
if name_edit in self.model_names:
name_edit += "_1"
num_tries = 1
while name_edit in self.model_names:
num_tries += 1
name_edit = name_edit[:-1] + str(num_tries)
if num_tries == 1:
logger.info(
f"The name '{name_edit[:-2]}' is used by multiple models. "
f"Use the `rename()` method to change the name."
)
self.model_names.append(name_edit)
return name_edit
def _score_data(self, est, X):
y_pred = est.predict(X)
y_score = None
# we will compute y_score only for binary classification cases because only for binary classification can
# we use it for ROC Curves and AUC etc
if self.is_classifier and hasattr(est.est, "predict_proba"):
if len(est.classes_) == 2:
# positive label index is assumed to be 0 if the ADSModel does not have a positive class defined
positive_class_index = 0
# For prediction probability, we only consider the positive class.
if self.positive_class is not None:
if self.positive_class not in list(est.classes_):
raise ValueError(
"Invalid positive class '%s' for model %s. Positive class should be one of %s."
% (
self.positive_class,
est.est.__class__.__name__,
list(est.classes_),
)
)
positive_class_index = list(est.classes_).index(self.positive_class)
y_score = est.predict_proba(X)[:, positive_class_index]
else:
y_score = est.predict_proba(X)
return y_pred, y_score
|
{"hexsha": "04518e986c99553cc048008e586d5a08ac2c2d66", "size": 27831, "ext": "py", "lang": "Python", "max_stars_repo_path": "ads/evaluations/evaluator.py", "max_stars_repo_name": "oracle/accelerated-data-science", "max_stars_repo_head_hexsha": "d594ed0c8c1365daf4cf9e860daebc760fa9a24b", "max_stars_repo_licenses": ["UPL-1.0", "Apache-2.0"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2022-02-22T19:07:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T17:21:42.000Z", "max_issues_repo_path": "ads/evaluations/evaluator.py", "max_issues_repo_name": "oracle/accelerated-data-science", "max_issues_repo_head_hexsha": "d594ed0c8c1365daf4cf9e860daebc760fa9a24b", "max_issues_repo_licenses": ["UPL-1.0", "Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ads/evaluations/evaluator.py", "max_forks_repo_name": "oracle/accelerated-data-science", "max_forks_repo_head_hexsha": "d594ed0c8c1365daf4cf9e860daebc760fa9a24b", "max_forks_repo_licenses": ["UPL-1.0", "Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1575433912, "max_line_length": 124, "alphanum_fraction": 0.5440336316, "include": true, "reason": "import numpy", "num_tokens": 5538}
|
"""
Interpolation for BASTA: Across/between tracks
"""
import os
import sys
import copy
import h5py
import numpy as np
from tqdm import tqdm
from scipy import spatial
from scipy import interpolate
from basta import sobol_numbers
from basta import interpolation_helpers as ih
from basta import plot_interp as ip
# ======================================================================================
# Interpolation helper routines
# ======================================================================================
def _check_sobol(grid, res):
"""
Checks and unpacks whether sobol interpolation has been requested, and if Cartesian
interpolation has been requested for Sobol grid, which is invalid.
Parameters
----------
grid : h5py file
Handle of grid to process
res : dict
Dictionary of all the inputted resolution parameters
Returns
-------
sobol : float/bool
The scale value for across resolution if Sobol interpolation, False if not Sobol
"""
# Read gridtype from header | Allow for usage of both h5py 2.10.x and 3.x.x
# --> If things are encoded as bytes, they must be made into standard strings
gridtype = grid["header/library_type"][()]
if isinstance(gridtype, bytes):
gridtype = gridtype.decode("utf-8")
# Check type and inputted scale resolution
if "sobol" in gridtype.lower():
if res["scale"] < 1.0:
errstr = "For Sobol type grid only an increase in tracks via the 'scale' "
errstr += "parameter is possible, please enter a value > 1."
raise KeyError(errstr)
sobol = res["scale"]
elif "cartesian" in gridtype.lower():
if res["scale"] > 1.0:
sobol = res["scale"]
else:
sobol = False
elif "isochrones" in gridtype.lower():
if res["scale"] < 1.0:
errstr = "For isochrone grids only an increase in isochrones via the "
errstr += "'scale' parameter is possible, please enter a value > 1."
raise KeyError(errstr)
sobol = res["scale"]
else:
raise KeyError(
"Interpolation not possible for grid of type {0}".format(gridtype)
)
# Highlight redundant resolution for the user
if sobol:
for var in res:
if (var not in ["scale", "baseparam"]) and res[var] != 0:
prtstr = "Gridresolution in '{0}' is set but ignored, ".format(var)
prtstr += "as 'scale' is set for Sobol interpolation."
print(prtstr)
return sobol
def _calc_across_points(
base, baseparams, tri, sobol, outbasename, debug=False, verbose=False
):
"""
Determine the new points for tracks in the base parameters, for either Cartesian
or Sobol sampling. For Sobol, it determines a new Sobol sampling which satisfy an
increase in number of tracks given a scale value. For Cartesian, given a set of
whole numbers for each interpolation dimension, it will assign equally spaced points
between the existing points, with possible 'overflow' in the positive direction
of the parameter.
Also plots the old vs new base of the interpolation, no plot for dim(base) = 1,
corner plot for dim(base) > 2.
Parameters
----------
base : array
The current base of the grid, formed as (number of tracks, parameters in base).
baseparams : dict
Dictionary of the parameters forming the grid, with the required resolution of
the parameters.
tri : object
Triangulation of the base.
sobol : float/bool
Scale resolution for Sobol-sampled interpolation, False for Cartesian.
outbasename : str
Name of the outputted plot of the base.
Returns
-------
newbase : array
A base of the new points in the base, same structure as input base.
trindex : array
List of simplexes of the new points, for determination of the enveloping
tracks.
"""
if not sobol:
# Cartesian routine. Stores arrays of points to be added and all points
newbase = None
wholebase = copy.deepcopy(base)
# For each interpolation parameter, add the desired number of points
for i, (par, res) in enumerate(baseparams.items()):
newpoints = None
# Unique values of the parameter
uniq = np.unique(wholebase[:, i])
# New spacing in parameter
diff = np.mean(np.diff(uniq)) / (res + 1)
# For each requested new point, add an offsetted copy of the base
for j in range(res):
points = wholebase.copy()
points[:, i] += diff * (j + 1)
if type(newpoints) != np.ndarray:
newpoints = points
else:
newpoints = np.vstack((newpoints, points))
# Update the arrays
wholebase = np.vstack((wholebase, newpoints))
if type(newbase) != np.ndarray:
newbase = newpoints
else:
newbase = np.vstack((newbase, newpoints))
# Find all points within triangulation
mask = tri.find_simplex(newbase)
newbase = newbase[mask != -1]
trindex = tri.find_simplex(newbase)
elif sobol:
# Check that we increase the number of tracks
lorgbase = len(base)
lnewbase = int(lorgbase * sobol)
assert lnewbase > lorgbase
ndim = len(baseparams)
l_trim = 1
# Try sampling the parameter space, and retry until increase met
while l_trim / sobol < lorgbase:
# Extract Sobol sequences
lnewbase = int(lnewbase * 1.2)
sob_nums = np.zeros((lnewbase, ndim))
iseed = 1
for i in range(lnewbase):
iseed, sob_nums[i, :] = sobol_numbers.i8_sobol(ndim, iseed)
# Assign parameter values by sequence
newbase = []
for npar in range(ndim):
Cmin = min(base[:, npar])
Cmax = max(base[:, npar])
newbase.append((Cmax - Cmin) * sob_nums[:, npar] + Cmin)
# Remove points outside subgrid
newbase = np.asarray(newbase).T
mask = tri.find_simplex(newbase)
newbase = newbase[mask != -1]
l_trim = len(newbase[:, 0])
# Compute new simplex list
trindex = tri.find_simplex(newbase)
# Plot of old vs. new base of subgrid
if len(baseparams) > 1 and (debug or verbose):
outname = outbasename.split(".")[-2] + "_all"
outname += "." + outbasename.split(".")[-1]
success = ip.base_corner(baseparams, base, newbase, tri, sobol, outname)
if success:
print(
"Initial across interpolation base has been plotted in",
"figure",
outname,
)
return newbase, trindex
# ======================================================================================
# Interpolation across tracks
# ======================================================================================
def _interpolate_across(
grid,
outfile,
resolution,
limits,
intpolparams,
basepath="grid/",
intpol_freqs=False,
along_var="xcen",
outbasename="",
debug=False,
verbose=False,
):
"""
Interpolates a grid across the tracks, within a box of observational limits.
Parameters
----------
grid : h5py file
Handle of grid to process
outfile : h5py file
Handle of output grid to write to
resolution : dict
Required resolution. Must contain "param" with a valid parameter name from the
grid and "value" with the desired precision/resolution.
limits : dict
Constraints on the selection in the grid. Must be valid parameter names in the
grid. Example of the form: {'Teff': [5000, 6000], 'FeH': [-0.2, 0.2]}
intpolparams : list
List of parameters to be interpolated, avoid interpolating *everything*.
basepath : str, optional
Path in the grid where the tracks are stored. The default value applies to
standard grids of tracks. It must be modified for isochrones!
intpol_freqs : bool
Whether or not to interpolate individual oscillation frequencies.
along_var : str
User-defined parameter to use as base along the track in interpolation routine.
outbasename : str
Name and destionaion of plot for old vs new base of grid.
debug : bool, optional
Activate debug mode. Will print extra info upon a failed interpolation of a track.
verbose : bool, optional
Print information to console and make simple diagnostic plots. Will be
automatically set by debug.
Returns
-------
grid : h5py file
Handle of grid to process
outfile : h5py file
Handle of output grid to write to
fail : bool
Boolean to indicate whether the routine has failed or succeeded
"""
print("\n********************\nAcross interpolation\n********************")
# Parameters possibly in header
headvars = [
"tracks",
"isochs",
"massini",
"age",
"FeHini",
"MeHini",
"yini",
"alphaMLT",
"ove",
"gcut",
"eta",
"alphaFe",
"dif",
]
# Determine whether the grid is iscohrones or tracks (convert to allow all h5py's)
gridtype = grid["header/library_type"][()]
if isinstance(gridtype, bytes):
gridtype = gridtype.decode("utf-8")
if "track" in gridtype:
isomode = False
modestr = "track"
dname = "dage"
# Determine the number to assign the new tracks
tracklist = list(grid[basepath + "tracks"].items())
newnum = max([int(f[0].split("track")[-1]) for f in tracklist]) + 1
numfmt = len(tracklist[0][0].split("track")[-1])
# Form basis of varied parameters
bpars = [par.decode("UTF-8") for par in grid["header/active_weights"]]
baseparams = {par: resolution[par] for par in bpars}
const_vars = {}
for par in headvars:
if (par not in bpars) and (par in grid["header"]):
const_vars[par] = grid[os.path.join("header", par)][0]
# Collect the headvars, as they are constant along the track
headvars = list(np.unique(list(bpars) + list(const_vars)))
sobol = _check_sobol(grid, resolution)
elif "isochrone" in gridtype:
isomode = True
modestr = "isochrone"
dname = "dmass"
newnum = 0
# Parameters for forming basis
bpars = [par.decode("UTF-8") for par in grid["header/active_weights"]]
baseparams = {par: resolution[par] for par in bpars}
const_vars = {}
isochhead = os.path.join("header", basepath)
for par in headvars:
if (par not in bpars) and (par in grid[isochhead]):
const_vars[par] = grid[os.path.join(isochhead, par)][0]
# Only propagate the present parameters
headvars = list(np.unique(list(bpars) + list(const_vars)))
sobol = _check_sobol(grid, resolution)
# Check frequency limits
if "freqs" in limits:
freqlims = limits["freqs"]
del limits["freqs"]
# Extract tracks/isochrones within user-specified limits
print("Locating limits and restricting sub-grid ... ", flush=True)
selectedmodels = ih.get_selectedmodels(grid, basepath, limits, cut=False)
# If Cartesian method, save tracks/isochrones within limits to new grid
fail = False
if grid != outfile and not sobol:
for name, index in selectedmodels:
if not isomode:
index2d = np.array(np.transpose([index, index]))
if not (any(index) and sum(index) > 2):
outfile[os.path.join(name, "FeHini_weight")] = -1
else:
# Write everything from the old grid to the new in the region
for key in grid[name].keys():
keypath = os.path.join(name, key)
if "_weight" in key:
outfile[keypath] = grid[keypath][()]
elif "osc" in key:
if intpol_freqs:
outfile[keypath] = grid[keypath][index2d]
else:
outfile[keypath] = grid[keypath][index]
# Form the base array for interpolation
base = np.zeros((len(selectedmodels), len(baseparams)))
for i, name in enumerate(selectedmodels):
for j, bpar in enumerate(baseparams):
parm = grid[basepath + name][bpar][0]
base[i, j] = parm
# Determine the base params for new tracks
print("\nBuilding triangulation ... ", end="", flush=True)
triangulation = spatial.Delaunay(base)
new_points, trindex = _calc_across_points(
base, baseparams, triangulation, sobol, outbasename, debug, verbose
)
print("done!")
# List of tracknames for accessing grid
tracknames = list(selectedmodels)
# List to sort out failed tracks/isochrones at the end
success = np.ones(len(new_points[:, 0]), dtype=bool)
# Set up for debugging during run
if debug:
debugpath = "intpolout"
if not os.path.exists(debugpath):
os.mkdir(debugpath)
#############
# Main loop #
#############
numnew = len(new_points)
print("Interpolating {0} tracks/isochrones ... ".format(numnew))
# Use a progress bar (with the package tqdm; will write to stderr)
pbar = tqdm(total=numnew, desc="--> Progress", ascii=True)
# Use tqdm for progress bar
for tracknum, (point, tind) in enumerate(zip(new_points, trindex)):
# Update progress bar in the start of the loop to count skipped tracks
pbar.update(1)
# Directory of the track/isochrone
if not isomode:
libname = (
basepath + "tracks/track" + str(int(newnum + tracknum)).zfill(numfmt)
)
else:
FeH = point[bpars.index("FeHini")]
age = point[bpars.index("age")]
libname = basepath + "FeH={0:.4f}/age={1:.4f}".format(FeH, age)
# Form the basis of interpolation, and collect minmax of the along track variable
ind = triangulation.simplices[tind]
count = sum([sum(selectedmodels[tracknames[i]]) for i in ind])
intbase = np.zeros((count, len(bpars) + 1))
y = np.zeros((count))
minmax = np.zeros((len(ind), 3))
ir = 0
# Loop over the enveloping tracks
for j, i in enumerate(ind):
track = tracknames[i]
bvar = grid[basepath + track][along_var][selectedmodels[track]]
minmax[j, :] = [min(bvar), max(bvar), abs(np.median(np.diff(bvar)))]
for k, a in enumerate(list(bvar)):
intbase[k + ir, : len(base[i])] = base[i]
intbase[k + ir, -1] = a
ir += len(bvar)
minmax = [max(minmax[:, 0]), min(minmax[:, 1]), np.mean(minmax[:, 2])]
if minmax[0] > minmax[1]:
warstr = "Warning: Interpolating {0} {1} ".format(
modestr, newnum + tracknum
)
warstr += "was aborted due to no overlap in {0}".format(along_var)
warstr += " of the enveloping {0}!".format(modestr)
print(warstr)
success[tracknum] = False
outfile[os.path.join(libname, "FeHini_weight")] = -1
continue
# Assume equal spacing, but approximately the same number of points
try:
Npoints = abs(int(np.ceil((minmax[1] - minmax[0]) / minmax[2])))
except:
prtstr = "Choice of base parameter '{:s}' resulted".format(along_var)
prtstr += " in an error when determining it's variance along the "
prtstr += "{:s}, consider choosing another.".format(modestr)
raise ValueError(prtstr)
# The base along the new track
newbvar = np.linspace(minmax[0], minmax[1], Npoints)
newbase = np.ones((len(newbvar), len(bpars) + 1))
for i, p in enumerate(point):
newbase[:, i] *= p
newbase[:, -1] = newbvar
sub_triangle = spatial.Delaunay(intbase)
try:
# Interpolate and write each individual parameter, apart from oscillations
for key in intpolparams:
keypath = os.path.join(libname, key)
# Weights are given a placeholder value
if "_weight" in key:
outfile[keypath] = 1.0
elif key == along_var:
outfile[keypath] = newbase[:, -1]
elif "name" in key:
outfile[keypath] = len(newbase[:, -1]) * [b"interpolated-entry"]
elif ("osc" in key) or (key in const_vars):
continue
else:
ir = 0
for j, i in enumerate(ind):
track = tracknames[i]
yind = selectedmodels[track]
y[ir : ir + sum(yind)] = grid[basepath + track][key][yind]
ir += sum(yind)
intpol = interpolate.LinearNDInterpolator(sub_triangle, y)
newparam = intpol(newbase)
if any(np.isnan(newparam)):
nan = "{0} {1} had NaN value(s)!".format(
modestr, newnum + tracknum
)
raise ValueError(nan)
outfile[keypath] = newparam
# Dealing with oscillations
if intpol_freqs:
osc = []
osckey = []
sections = [0]
for i in ind:
# Extract the oscillation fequencies and id's
track = tracknames[i]
for model in np.where(selectedmodels[track])[0]:
osc.append(grid[basepath + track]["osc"][model])
osckey.append(grid[basepath + track]["osckey"][model])
sections.append(len(osc))
newosckey, newosc = ih.interpolate_frequencies(
fullosc=osc,
fullosckey=osckey,
agevec=intbase,
newagevec=newbase,
sections=sections,
freqlims=freqlims,
debug=debug,
trackid=newnum + tracknum,
)
Npoints = len(newosc)
# Writing variable length arrays to an HDF5 file is a bit tricky,
# but can be done using datasets with a special datatype.
# --> Here we follow the approach from BASTA/make_tracks
dsetosc = outfile.create_dataset(
name=os.path.join(libname, "osc"),
shape=(Npoints, 2),
dtype=h5py.special_dtype(vlen=np.float),
)
dsetosckey = outfile.create_dataset(
name=os.path.join(libname, "osckey"),
shape=(Npoints, 2),
dtype=h5py.special_dtype(vlen=np.int),
)
for i in range(Npoints):
dsetosc[i] = newosc[i]
dsetosckey[i] = newosckey[i]
# Dealing with constants of the track
outfile[os.path.join(libname, "FeHini_weight")] = 1.0
for par, parval in zip(baseparams, point):
keypath = os.path.join(libname, par)
try:
outfile[keypath]
except:
outfile[keypath] = np.ones(len(newbase[:, -1])) * parval
for par in const_vars:
keypath = os.path.join(libname, par)
if par in ["tracks", "isochs"]:
continue
try:
outfile[keypath]
except:
outfile[keypath] = np.ones(len(newbase[:, -1])) * const_vars[par]
# Bayesian weight along track
par = "massfin" if dname == "dmass" else "age"
parpath = os.path.join(libname, par)
keypath = os.path.join(libname, dname)
outfile[keypath] = ih.bay_weights(outfile[parpath])
if debug:
debugnum = str(int(newnum + tracknum)).zfill(numfmt)
plotpath = os.path.join(
debugpath, "debug_kiel_{0}.png".format(debugnum)
)
if not os.path.exists(plotpath):
try:
tracks = [tracknames[i] for i in ind]
selmods = [selectedmodels[t] for t in tracks]
ip.across_debug(
grid,
outfile,
basepath,
along_var,
libname,
tracks,
selmods,
plotpath,
)
print(
"Plotted debug Kiel for {0} {1}".format(modestr, debugnum)
)
except:
print(
"Debug plotting failed for {0} {1}".format(
modestr, debugnum
)
)
except KeyboardInterrupt:
print("BASTA interpolation stopped manually. Goodbye!")
sys.exit()
except:
# If it fails, delete progress for the track, and just mark it as failed
try:
del outfile[libname]
except:
None
success[tracknum] = False
print("Error:", sys.exc_info()[1])
outfile[os.path.join(libname, "FeHini_weight")] = -1
print("Interpolation failed for {0}".format(libname))
if debug:
print("Point at:")
[print(name, value, ", ") for name, value in zip(bpars, point)]
print("Simplex formed by the {0}s:".format(modestr))
print(", ".join([tracknames[i] for i in ind]))
####################
# End of main loop #
####################
pbar.close()
# Plot the new resulting base
plotted = ip.base_corner(
baseparams, base, new_points[success], triangulation, sobol, outbasename
)
if plotted:
print("Across interpolation base has been plotted in figure", outbasename)
# Remove all previous tracks, to conserve sobol homogeniety
if grid == outfile and sobol:
for name in tracknames:
namepath = os.path.join(basepath, name)
del outfile[namepath]
# Re-add frequency limits for combined approaches
if intpol_freqs:
limits["freqs"] = freqlims
# Write the new tracks to the header, and recalculate the weights
outfile = ih._extend_header(outfile, basepath, headvars)
outfile = ih._recalculate_weights(outfile, basepath, headvars)
return grid, outfile, fail
|
{"hexsha": "5de7b835ebe51944c4573c9759dfb24c36ab9d9d", "size": 23458, "ext": "py", "lang": "Python", "max_stars_repo_path": "basta/interpolation_across.py", "max_stars_repo_name": "BASTAcode/BASTA", "max_stars_repo_head_hexsha": "6de8b8b866787d6745c4e77378bb94e0bab97090", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2021-10-01T06:46:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-04T09:59:33.000Z", "max_issues_repo_path": "basta/interpolation_across.py", "max_issues_repo_name": "BASTAcode/BASTA", "max_issues_repo_head_hexsha": "6de8b8b866787d6745c4e77378bb94e0bab97090", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "basta/interpolation_across.py", "max_forks_repo_name": "BASTAcode/BASTA", "max_forks_repo_head_hexsha": "6de8b8b866787d6745c4e77378bb94e0bab97090", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-10-17T10:20:38.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-03T20:37:24.000Z", "avg_line_length": 37.7745571659, "max_line_length": 90, "alphanum_fraction": 0.543396709, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5316}
|
! { dg-do run }
!
! PR fortran/38669
! Loop bounds temporaries used before being defined for elemental subroutines
!
! Original testcase by Harald Anlauf <anlauf@gmx.de>
program gfcbu84_main
implicit none
integer :: jplev, k_lev
integer :: p(42)
real :: r(42)
integer, pointer :: q(:)
jplev = 42
k_lev = 1
call random_number (r)
p = 41 * r + 1
allocate (q(jplev))
q = 0
call tq_tvgh (q(k_lev:), p(k_lev:))
if (any (p /= q)) call abort
q = 0
call tq_tvgh (q(k_lev:), (p(k_lev:)))
if (any (p /= q)) call abort
q = 0
call tq_tvgh (q(k_lev:), (p(p(k_lev:))))
if (any (p(p) /= q)) call abort
deallocate (q)
contains
elemental subroutine tq_tvgh (t, p)
integer ,intent (out) :: t
integer ,intent (in) :: p
t=p
end subroutine tq_tvgh
end program gfcbu84_main
|
{"hexsha": "7c7875bbfcd3945e89e407a7bb144fd0c69a1ad8", "size": 852, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "tests/CompileTests/Fortran_tests/gfortranTestSuite/gfortran.dg/elemental_subroutine_7.f90", "max_stars_repo_name": "maurizioabba/rose", "max_stars_repo_head_hexsha": "7597292cf14da292bdb9a4ef573001b6c5b9b6c0", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 488, "max_stars_repo_stars_event_min_datetime": "2015-01-09T08:54:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T07:15:46.000Z", "max_issues_repo_path": "tests/CompileTests/Fortran_tests/gfortranTestSuite/gfortran.dg/elemental_subroutine_7.f90", "max_issues_repo_name": "sujankh/rose-matlab", "max_issues_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 174, "max_issues_repo_issues_event_min_datetime": "2015-01-28T18:41:32.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:51:05.000Z", "max_forks_repo_path": "tests/CompileTests/Fortran_tests/gfortranTestSuite/gfortran.dg/elemental_subroutine_7.f90", "max_forks_repo_name": "sujankh/rose-matlab", "max_forks_repo_head_hexsha": "7435d4fa1941826c784ba97296c0ec55fa7d7c7e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 146, "max_forks_repo_forks_event_min_datetime": "2015-04-27T02:48:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T07:32:53.000Z", "avg_line_length": 20.7804878049, "max_line_length": 77, "alphanum_fraction": 0.5950704225, "num_tokens": 308}
|
import os
os.environ['THEANO_FLAGS'] = 'device=gpu'
import numpy as np
import theano as th
import theano.tensor as tt
rng = np.random
N = 400 # training sample size
feats = 784 # number of input variables
## generate a dataset: D = (input_values, target_class)
D = (rng.randn(N, feats), rng.randint(size=N, low=0, high=2))
training_steps = 10000
x = tt.dmatrix('x')
y = tt.dvector('y')
w = th.shared(rng.randn(feats), name='w')
b = th.shared(0., name='b')
print('Initial model:')
print(w.get_value())
print(b.get_value())
## building computation graph
prob = 1 / (1 + tt.exp(-tt.dot(x, w) - b))
pred = prob > 0.5
# cross-entropy loss function
xent = -y * tt.log(prob) - (1-y) * tt.log(1 - prob)
cost = xent.mean() + 0.01 * (w**2).sum()
gw, gb = tt.grad(cost, [w, b])
## compile graph
train = th.function(
inputs=[x, y],
outputs=[pred, xent],
updates=[(w, w - 0.1*gw), (b, b - 0.1*gb)]
)
predict = th.function(
inputs=[x],
outputs=pred
)
## train
for i in range(training_steps):
pred, err = train(D[0], D[1])
print('Final model:')
print(w.get_value())
print(b.get_value())
print('# target values on D:')
print(D[1])
print('# prediction on D:')
p = predict(D[0])
print(p)
print('# differences:')
print(np.abs(p - D[1]))
|
{"hexsha": "ee1eaaa08b44d93087aafe499a7e8a7947483838", "size": 1264, "ext": "py", "lang": "Python", "max_stars_repo_path": "logistic.regression.py", "max_stars_repo_name": "metapsycho/learning.theano", "max_stars_repo_head_hexsha": "3600083a6a9dbc76615aa1e650c38ff8f83653cf", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "logistic.regression.py", "max_issues_repo_name": "metapsycho/learning.theano", "max_issues_repo_head_hexsha": "3600083a6a9dbc76615aa1e650c38ff8f83653cf", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "logistic.regression.py", "max_forks_repo_name": "metapsycho/learning.theano", "max_forks_repo_head_hexsha": "3600083a6a9dbc76615aa1e650c38ff8f83653cf", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.4237288136, "max_line_length": 61, "alphanum_fraction": 0.6234177215, "include": true, "reason": "import numpy,import theano", "num_tokens": 401}
|
import brica
import numpy as np
import cv2
import math
from oculoenv.geom import Matrix4
class HP(object):
""" Hippocampal formation module.
Create allocentric panel image.
"""
def __init__(self):
self.timing = brica.Timing(2, 1, 0)
# Allocentric panel map image
self.map_image = np.zeros((128, 128, 3), dtype=np.uint8)
# Allocentric panel map image
self.not_overlaid_map_image = np.zeros((128, 128, 3), dtype=np.uint8)
# blured Allocentric panel map image
self.blured_map_image = np.zeros((128, 128, 3), dtype=np.uint8)
# copied from retina.py
width = 128
self.blur_rates, self.inv_blur_rates = self._create_rate_datas(width, sigma=0.48, clipping_gain=1.8, gain=1.0) # original param are sigma=0.32, clipping_gain=1.2, gain=1.0
self.inv_blur_rates += 0.2
self.inv_blur_rates = np.clip(self.inv_blur_rates, 0.0, 1.0)
self.blur_rates = 1.0 - self.inv_blur_rates
self.gray_rates, self.inv_gray_rates = self._create_rate_datas(width, gain=0.5)
self.inv_gray_rates += 0.6
self.inv_gray_rates = np.clip(self.inv_gray_rates, 0.0, 1.0)
self.gray_rates = 1.0 - self.inv_gray_rates
def __call__(self, inputs):
if 'from_retina' not in inputs:
raise Exception('HP did not recieve from Environment')
# This image input from environment is a kind of cheat and not biologically
# acculate.
if inputs['from_retina'] is not None:
image, angle = inputs['from_retina'] # (128, 128, 3), (2)
# Transform input image into allocentric panel image
transforemed_image = self._extract_transformed_image(image, angle)
# Overlay into existing map image
self._overlay_extracted_image(self.map_image, transforemed_image)
self.not_overlaid_map_image = transforemed_image
self.blured_map_image = self._create_inv_retina_image(transforemed_image)
return dict(to_pfc=(angle, self.map_image, self.not_overlaid_map_image, self.blured_map_image))
def _get_perspective_mat(self, fovy, aspect_ratio, znear, zfar):
ymax = znear * math.tan(fovy * math.pi / 360.0)
xmax = ymax * aspect_ratio
t = 2.0 * znear
t2 = 2.0 * xmax
t3 = 2.0 * ymax
t4 = zfar - znear
m = [[t/t2, 0.0, 0.0, 0.0],
[0.0, t/t3, 0.0, 0.0],
[0.0, 0.0, (-zfar-znear)/t4, -1.0],
[0.0, 0.0, (-t*zfar)/t4, 0.0]]
m = np.transpose(np.array(m, dtype=np.float32))
mat = Matrix4(m)
return mat
def _extract_transformed_image(self, image, angle):
# In order to use black color as a blank mask, set lower clip value for
# input image
mask_threshold = 3
image = np.clip(image, mask_threshold, 255)
angle_h = angle[0]
angle_v = angle[1]
m0 = Matrix4()
m1 = Matrix4()
m0.set_rot_x(angle_v)
m1.set_rot_y(angle_h)
camera_mat = m1.mul(m0)
camera_mat_inv = camera_mat.invert()
camera_fovy = 50
pers_mat = self._get_perspective_mat(camera_fovy, 1.0, 0.04, 100.0)
mat = pers_mat.mul(camera_mat_inv)
plane_distance = 3.0
point_srcs = [[ 1.0, 1.0, -plane_distance, 1.0],
[-1.0, 1.0, -plane_distance, 1.0],
[-1.0,-1.0, -plane_distance, 1.0],
[ 1.0,-1.0, -plane_distance, 1.0]]
point_src_2ds = []
point_dst_2ds = []
for point_src in point_srcs:
ps_x = (point_src[0] * 0.5 + 0.5) * 127.0
ps_y = (-point_src[1] * 0.5 + 0.5) * 127.0
point_src_2ds.append([ps_x, ps_y])
p = mat.transform(np.array(point_src, dtype=np.float32))
w = p[3]
x = p[0]/w
y = p[1]/w
pd_x = (x * 0.5 + 0.5) * 127.0
pd_y = (-y * 0.5 + 0.5) * 127.0
point_dst_2ds.append([pd_x, pd_y])
point_src_2ds = np.float32(point_src_2ds)
point_dst_2ds = np.float32(point_dst_2ds)
h,w,c = image.shape
M = cv2.getPerspectiveTransform(point_dst_2ds, point_src_2ds)
transformed_image = cv2.warpPerspective(image, M, (h,w))
return transformed_image
def _overlay_extracted_image(self, base_image, ext_image):
GRID_DIVISION = 8
GRID_WIDTH = 128 // GRID_DIVISION
for ix in range(GRID_DIVISION):
pixel_x = GRID_WIDTH * ix
for iy in range(GRID_DIVISION):
pixel_y = GRID_WIDTH * iy
base_region_image = base_image[pixel_y:pixel_y+GRID_WIDTH,
pixel_x:pixel_x+GRID_WIDTH, :]
ext_region_image = ext_image[pixel_y:pixel_y+GRID_WIDTH,
pixel_x:pixel_x+GRID_WIDTH, :]
ext_region_image_sum = np.sum(ext_region_image, axis=2)
has_zero = np.any(ext_region_image_sum==0)
if not has_zero:
base_image[pixel_y:pixel_y+GRID_WIDTH,
pixel_x:pixel_x+GRID_WIDTH, :] = ext_region_image // 2 + base_region_image // 2
# copied from retina.py
def _gauss(self, x, sigma):
sigma_sq = sigma * sigma
return 1.0 / np.sqrt(2.0 * np.pi * sigma_sq) * np.exp(-x*x/(2 * sigma_sq))
# copied from retina.py
def _create_rate_datas(self, width, sigma=0.32, clipping_gain=1.2, gain=1.0):
""" Create mixing rate.
Arguments:
width: (int) width of the target image.
sigma: (float) standard deviation of the gaussian.
clipping_gain: (float) To make the top of the curve flat, apply gain > 1.0
gain: (float) Final gain for the mixing rate.
e.g.) if gain=0.8, mixing rates => 0.2~1.0
Returns:
Float ndarray (128, 128, 1): Mixing rates and inverted mixing rates.
"""
rates = [0.0] * (width * width)
hw = width // 2
for i in range(width):
x = (i - hw) / float(hw)
for j in range(width):
y = (j - hw) / float(hw)
r = np.sqrt(x*x + y*y)
rates[j*width + i] = self._gauss(r, sigma=sigma)
rates = np.array(rates)
# Normalize
rates = rates / np.max(rates)
# Make top flat by multipying and clipping
rates = rates * clipping_gain
rates = np.clip(rates, 0.0, 1.0)
# Apply final gain
if gain != 1.0:
rates = rates * gain + (1-gain)
rates = rates.reshape([width, width, 1])
inv_rates = 1.0 - rates
return rates, inv_rates
# copied from retina.py
def _create_blur_image(self, image):
h = image.shape[0]
w = image.shape[1]
# Resizeing to 1/2 size
resized_image0 = cv2.resize(image,
dsize=(h//2, w//2),
interpolation=cv2.INTER_LINEAR)
# Resizeing to 1/4 size
resized_image1 = cv2.resize(resized_image0,
dsize=(h//4, w//4),
interpolation=cv2.INTER_LINEAR)
# Resizeing to 1/8 size
resized_image2 = cv2.resize(resized_image1,
dsize=(h//8, w//8),
interpolation=cv2.INTER_LINEAR)
# Resizing to original size
blur_image = cv2.resize(resized_image2,
dsize=(h, w),
interpolation=cv2.INTER_LINEAR)
# Conver to Grayscale
gray_blur_image = cv2.cvtColor(blur_image, cv2.COLOR_BGR2GRAY)
gray_blur_image = np.reshape(gray_blur_image,
[gray_blur_image.shape[0],
gray_blur_image.shape[0], 1])
gray_blur_image = np.tile(gray_blur_image, 3)
return blur_image, gray_blur_image
# copied from retina.py
def _create_inv_retina_image(self, image):
blur_image, gray_blur_image = self._create_blur_image(image)
# Mix original and blur image
#blur_mix_image = image * self.blur_rates + blur_image * self.inv_blur_rates
blur_mix_image = image * self.inv_blur_rates + blur_image * self.blur_rates
# Mix blur mixed image and gray blur image.
#gray_mix_image = blur_mix_image * self.gray_rates + gray_blur_image * self.inv_gray_rates
gray_mix_image = blur_mix_image * self.inv_gray_rates + gray_blur_image * self.gray_rates
return gray_mix_image.astype(np.uint8)
|
{"hexsha": "e45e981c4366368b072ad9463eb0986999333acd", "size": 8867, "ext": "py", "lang": "Python", "max_stars_repo_path": "application/functions/hp.py", "max_stars_repo_name": "miyosuda/oculomotor", "max_stars_repo_head_hexsha": "78e7ec61a808d058116c69bff1ea71ecf117c126", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-12T11:26:17.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-12T11:26:17.000Z", "max_issues_repo_path": "application/functions/hp.py", "max_issues_repo_name": "miyosuda/oculomotor", "max_issues_repo_head_hexsha": "78e7ec61a808d058116c69bff1ea71ecf117c126", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "application/functions/hp.py", "max_forks_repo_name": "miyosuda/oculomotor", "max_forks_repo_head_hexsha": "78e7ec61a808d058116c69bff1ea71ecf117c126", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-11-27T07:00:06.000Z", "max_forks_repo_forks_event_max_datetime": "2018-11-27T07:00:06.000Z", "avg_line_length": 38.8903508772, "max_line_length": 179, "alphanum_fraction": 0.5624224653, "include": true, "reason": "import numpy", "num_tokens": 2391}
|
program phaml_master
use phaml
implicit none
type(phaml_solution_type) :: soln
call phaml_create(soln,nproc=4)
call phaml_solve_pde(soln,print_grid_when=PHASES,print_grid_who=MASTER, &
max_eq=500, mg_cycles=5, &
reftype=H_ADAPTIVE)
call phaml_destroy(soln)
end program phaml_master
|
{"hexsha": "27e50d7114297424c7d3dcf32225c589469c942e", "size": 324, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "testdir/test_refinement/test02.f90", "max_stars_repo_name": "qsnake/phaml", "max_stars_repo_head_hexsha": "8925b4c32657bbd9f81cd5f8f9d6739151c66fec", "max_stars_repo_licenses": ["mpich2"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-09-07T15:46:34.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-07T15:46:34.000Z", "max_issues_repo_path": "testdir/test_refinement/test02.f90", "max_issues_repo_name": "qsnake/phaml", "max_issues_repo_head_hexsha": "8925b4c32657bbd9f81cd5f8f9d6739151c66fec", "max_issues_repo_licenses": ["mpich2"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "testdir/test_refinement/test02.f90", "max_forks_repo_name": "qsnake/phaml", "max_forks_repo_head_hexsha": "8925b4c32657bbd9f81cd5f8f9d6739151c66fec", "max_forks_repo_licenses": ["mpich2"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4545454545, "max_line_length": 73, "alphanum_fraction": 0.725308642, "num_tokens": 92}
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import tensorflow as tf
import oneflow as flow
from collections import OrderedDict
import oneflow.typing as oft
import test_global_storage
from test_util import (
GenArgDict,
GenArgList,
type_name_to_flow_type,
type_name_to_np_type,
)
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def RunOneflowBinaryOp(device_type, flow_op, x, y, data_type):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
flow_type = type_name_to_flow_type[data_type]
@flow.global_function(type="train", function_config=func_config)
def FlowJob(
x: oft.Numpy.Placeholder(x.shape, dtype=flow_type),
y: oft.Numpy.Placeholder(y.shape, dtype=flow_type),
):
with flow.scope.placement(device_type, "0:0"):
x += flow.get_variable(
name="x",
shape=x.shape,
dtype=flow_type,
initializer=flow.zeros_initializer(),
trainable=True,
)
y += flow.get_variable(
name="y",
shape=y.shape,
dtype=flow_type,
initializer=flow.zeros_initializer(),
trainable=True,
)
loss = flow_op(x, y)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(loss)
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch_diff(y, test_global_storage.Setter("y_diff"))
return loss
# Oneflow
out = FlowJob(x, y).get().numpy()
x_diff = test_global_storage.Get("x_diff")
y_diff = test_global_storage.Get("y_diff")
return out, x_diff, y_diff
def RunTensorFlowBinaryOp(tf_op, x, y):
# TensorFlow
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(x)
y = tf.Variable(y)
out = tf_op(x, y)
x_diff = tape.gradient(out, x)
y_diff = tape.gradient(out, y)
return out.numpy(), x_diff, y_diff
def compare_with_tensorflow(
test_case,
device_type,
flow_op,
tf_op,
x_shape,
y_shape,
data_type,
x_minval=-10,
x_maxval=10,
y_minval=-10,
y_maxval=10,
compare_grad=True,
out_rtol=1e-5,
out_atol=1e-5,
diff_rtol=1e-5,
diff_atol=1e-5,
):
test_case.assertTrue(device_type in ["gpu", "cpu"])
np_type = type_name_to_np_type[data_type]
x = np.random.uniform(low=x_minval, high=x_maxval, size=x_shape).astype(np_type)
y = np.random.uniform(low=y_minval, high=y_maxval, size=y_shape).astype(np_type)
of_out, of_x_diff, of_y_diff, = RunOneflowBinaryOp(
device_type, flow_op, x, y, data_type
)
tf_out, tf_x_diff, tf_y_diff = RunTensorFlowBinaryOp(tf_op, x, y)
test_case.assertTrue(
np.allclose(of_out, tf_out, rtol=out_rtol, atol=out_atol, equal_nan=True)
)
if compare_grad:
test_case.assertTrue(
np.allclose(
of_x_diff,
tf_x_diff.numpy(),
rtol=diff_rtol,
atol=diff_atol,
equal_nan=True,
)
)
test_case.assertTrue(
np.allclose(
of_y_diff,
tf_y_diff.numpy(),
rtol=diff_rtol,
atol=diff_atol,
equal_nan=True,
)
)
flow.clear_default_session()
@flow.unittest.skip_unless_1n1d()
class TestBinaryElementwiseOps(flow.unittest.TestCase):
def test_floordiv(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.floordiv]
arg_dict["tf_op"] = [tf.math.floordiv]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [-10]
arg_dict["x_maxval"] = [10]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [10]
arg_dict["compare_grad"] = [False]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_pow(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.pow]
arg_dict["tf_op"] = [tf.math.pow]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_xdivy(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.xdivy]
arg_dict["tf_op"] = [tf.math.xdivy]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [100]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [10]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_xlogy(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.xlogy]
arg_dict["tf_op"] = [tf.math.xlogy]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_atan2(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.atan2]
arg_dict["tf_op"] = [tf.math.atan2]
arg_dict["x_shape"] = [(5, 5,)]
arg_dict["y_shape"] = [(5, 5,)]
arg_dict["data_type"] = ["float32", "double"]
arg_dict["x_minval"] = [1]
arg_dict["x_maxval"] = [5]
arg_dict["y_minval"] = [1]
arg_dict["y_maxval"] = [5]
arg_dict["compare_grad"] = [True]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
if __name__ == "__main__":
unittest.main()
|
{"hexsha": "73a545bf284bada93a9c22257df929bd444101d3", "size": 7577, "ext": "py", "lang": "Python", "max_stars_repo_path": "oneflow/python/test/ops/test_binary_elementwise_ops.py", "max_stars_repo_name": "MaoXianXin/oneflow", "max_stars_repo_head_hexsha": "6caa52f3c5ba11a1d67f183bac4c1559b2a58ef5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-22T00:43:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-22T00:43:08.000Z", "max_issues_repo_path": "oneflow/python/test/ops/test_binary_elementwise_ops.py", "max_issues_repo_name": "MaoXianXin/oneflow", "max_issues_repo_head_hexsha": "6caa52f3c5ba11a1d67f183bac4c1559b2a58ef5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "oneflow/python/test/ops/test_binary_elementwise_ops.py", "max_forks_repo_name": "MaoXianXin/oneflow", "max_forks_repo_head_hexsha": "6caa52f3c5ba11a1d67f183bac4c1559b2a58ef5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.1059322034, "max_line_length": 84, "alphanum_fraction": 0.5960142537, "include": true, "reason": "import numpy", "num_tokens": 1990}
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
# In[ ]:
df = pd.read_csv("../../../input/sammy123_lower-back-pain-symptoms-dataset/Dataset_spine.csv",usecols=['Col1','Col2','Col3','Col4','Col5','Col6','Col7','Col8','Col9','Col10','Col11','Col12','Class_att'])
# In[ ]:
df.columns = ['pelvic_incidence','pelvic_tilt','lumbar_lordosis_angle', 'sacral_slope','pelvic_radius','degree_spondylolisthesis', 'pelvic_slope','Direct_tilt','thoracic_slope','cervical_tilt', 'sacrum_angle','scoliosis_slope','Class_att']
# In[ ]:
df.head()
# In[ ]:
features = df[['pelvic_incidence','pelvic_tilt','lumbar_lordosis_angle', 'sacral_slope','pelvic_radius','degree_spondylolisthesis', 'pelvic_slope','Direct_tilt','thoracic_slope','cervical_tilt', 'sacrum_angle','scoliosis_slope']]
# In[ ]:
targetVars = df.Class_att
# In[ ]:
feature_train,feature_test,target_train,target_test = train_test_split(features, targetVars, test_size=0.3)
# In[ ]:
model = LogisticRegression()
# In[ ]:
fitted_model = model.fit(feature_train, target_train)
# In[ ]:
predictions = fitted_model.predict(feature_test)
# In[ ]:
accuracy_score(target_test,predictions)
|
{"hexsha": "f69c000a36e679d888035f29675813f39879fb5b", "size": 1443, "ext": "py", "lang": "Python", "max_stars_repo_path": "relancer-exp/original_notebooks/sammy123_lower-back-pain-symptoms-dataset/logistic-regression.py", "max_stars_repo_name": "Chenguang-Zhu/relancer", "max_stars_repo_head_hexsha": "bf1a175b77b7da4cff12fbc5de17dd55246d264d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-05T22:27:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-05T22:27:49.000Z", "max_issues_repo_path": "relancer-exp/original_notebooks/sammy123_lower-back-pain-symptoms-dataset/logistic-regression.py", "max_issues_repo_name": "Chenguang-Zhu/relancer", "max_issues_repo_head_hexsha": "bf1a175b77b7da4cff12fbc5de17dd55246d264d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "relancer-exp/original_notebooks/sammy123_lower-back-pain-symptoms-dataset/logistic-regression.py", "max_forks_repo_name": "Chenguang-Zhu/relancer", "max_forks_repo_head_hexsha": "bf1a175b77b7da4cff12fbc5de17dd55246d264d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.24, "max_line_length": 240, "alphanum_fraction": 0.735966736, "include": true, "reason": "import numpy", "num_tokens": 400}
|
"""Utility functions"""
import os
import numpy as np
TRANSFER_COST = 2 * 60 # Default transfer time is 2 minutes
LARGE_NUMBER = 2147483647 # Earliest arrival time at start of algorithm
TRANSFER_TRIP = None
def mkdir_if_not_exists(name: str) -> None:
"""Create directory if not exists"""
if not os.path.exists(name):
os.makedirs(name)
def str2sec(time_str: str) -> int:
"""
Convert hh:mm:ss to seconds since midnight
:param time_str: String in format hh:mm:ss
"""
split_time = time_str.strip().split(":")
if len(split_time) == 3:
# Has seconds
hours, minutes, seconds = split_time
return int(hours) * 3600 + int(minutes) * 60 + int(seconds)
minutes, seconds = split_time
return int(minutes) * 60 + int(seconds)
def sec2str(scnds: int, show_sec: bool = False) -> str:
"""
Convert hh:mm:ss to seconds since midnight
:param show_sec: only show :ss if True
:param scnds: Seconds to translate to hh:mm:ss
"""
scnds = np.round(scnds)
hours = int(scnds / 3600)
minutes = int((scnds % 3600) / 60)
seconds = int(scnds % 60)
return (
"{:02d}:{:02d}:{:02d}".format(hours, minutes, seconds)
if show_sec
else "{:02d}:{:02d}".format(hours, minutes)
)
|
{"hexsha": "e1a7cace872f3deebd5b268f671323710574589e", "size": 1287, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyraptor/util.py", "max_stars_repo_name": "yfredrix/pyraptor", "max_stars_repo_head_hexsha": "a00b1d5576cd4126611483409e293d283cb7917d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-01-12T17:19:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T15:45:18.000Z", "max_issues_repo_path": "pyraptor/util.py", "max_issues_repo_name": "yfredrix/pyraptor", "max_issues_repo_head_hexsha": "a00b1d5576cd4126611483409e293d283cb7917d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-21T15:53:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-21T15:53:21.000Z", "max_forks_repo_path": "pyraptor/util.py", "max_forks_repo_name": "yfredrix/pyraptor", "max_forks_repo_head_hexsha": "a00b1d5576cd4126611483409e293d283cb7917d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2022-02-04T06:58:50.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-24T06:34:28.000Z", "avg_line_length": 27.3829787234, "max_line_length": 72, "alphanum_fraction": 0.6285936286, "include": true, "reason": "import numpy", "num_tokens": 357}
|
Require Import compcert.lib.Coqlib.
Require Import VST.msl.Coqlib2.
(* deliberately imported here *)
Require Import Coq.Wellfounded.Inclusion.
Require Import Coq.Wellfounded.Inverse_Image.
(* ssreflect *)
From mathcomp.ssreflect Require Import ssreflect ssrbool ssrnat ssrfun eqtype seq fintype finfun.
Set Implicit Arguments.
Require Import VST.concurrency.pos.
Require Import VST.concurrency.cast.
(** * Well-Founded Orders *)
(** This file defines a generalized lexicographic order on dependently
typed products. It exposes the following interface: *)
(** Assumptions:
- [Variable N : nat.]
- [Variable N_pos : (0 < N)%coq_nat.]
- [Variable types : 'I_N -> Type.]
- [Variable ords : forall i : 'I_N, types i -> types i -> Prop.]
- [Variable wf_ords : forall i : 'I_N, well_founded (@ords i).] *)
(** Exported:
- [t : Type]
- [mk : (forall i : 'I_N, types i) -> t]
- [get : (i : 'I_N) (d : t), types i]
- [set : {i : 'I_N} (d : t) (x : types i), t]
- [ord : t -> t -> Prop]
- [wf_ord : well_founded ord]
- [ord_set : (i : 'I_N) (d : t) (x : types i),
ords i x (get i d) -> ord (set d x) d]
- [gss : (i : 'I_N) (x : types i) (d : t), get i (set i x d) = x]
- [gso : (i j : 'I_N) (x : types i) (d : t),
i <> j -> get j (set i x d) = get j d] *)
(** In addition, it defines a WF order on the [\Sigma]-type
[\Sigma ix : 'I_N. T ix] *)
Lemma ord_dec (N : nat) (i j : 'I_N) : {i=j} + {~i=j}.
Proof.
case: i; case: j=> m pf n pf'; move: pf pf'; case: (eq_nat_dec n m).
by move=> -> pf pf'; left; f_equal; apply: proof_irr.
by move=> neq pf pf'; right; case.
Qed.
Lemma wf_eta {A: Type}{f: A -> A -> Prop} :
well_founded f -> well_founded (fun a1 a2 => f a1 a2).
Proof.
by move=> H1; have ->: ((fun a1 a2 => f a1 a2) = f) by do 2 extensionality.
Qed.
Lemma wf_eta' {A: Type}{f: A -> A -> Prop} :
well_founded (fun a1 a2 => f a1 a2) -> well_founded f.
Proof.
by move=> H1; have <-: ((fun a1 a2 => f a1 a2) = f) by do 2 extensionality.
Qed.
Lemma wf_funct {A B: Type}{R: B -> B -> Prop}(f: A -> B) :
well_founded R -> well_founded (fun a1 a2 => R (f a1) (f a2)).
Proof.
move=> H1; set (F := fun a b => f a = b).
apply: (wf_incl _ _
(fun x y: A => exists2 b : B, F x b & forall c : B, F y c -> R b c)).
by move=> a1 a2 HR; exists (f a1); rewrite/F=> //; move=> b <-.
by apply: wf_inverse_rel.
Qed.
Module Type LEX.
Parameter t : forall (N : pos) (types : 'I_N -> Type), Type.
Arguments t {N} types.
Parameter ord : forall (N : pos)
(types : 'I_N -> Type) (ords : forall i : 'I_N, types i -> types i -> Prop),
t types -> t types -> Prop.
Parameter wf_ord : forall (N : pos)
(types : 'I_N -> Type) (ords : forall i : 'I_N, types i -> types i -> Prop)
(wf_ords : forall i : 'I_N, well_founded (ords i)),
well_founded (ord ords).
Section lex.
Variable N : pos.
Variable types : 'I_N -> Type.
Variable ords : forall i : 'I_N, types i -> types i -> Prop.
Variable wf_ords : forall i : 'I_N, well_founded (@ords i).
Notation t := (t types).
Notation ord := (@ord N types ords).
Parameter mk : (forall i : 'I_N, types i) -> t.
Parameter get : forall i : 'I_N, t -> types i.
Parameter set : forall (i : 'I_N) (x : types i), t -> t.
Parameter ord_upd :
forall (i : 'I_N) (x : types i) (d : t),
ords x (get i d) -> ord (set x d) d.
Parameter gss :
forall (i : 'I_N) (x : types i) (d : t), get i (set x d) = x.
Parameter gso :
forall (i j : 'I_N) (x : types i) (d : t), i <> j ->
get j (set x d) = get j d.
End lex. End LEX.
Module Lex : LEX. Section lex.
Variable N : pos.
Variable types : 'I_N -> Type.
Variable ords : forall i : 'I_N, types i -> types i -> Prop.
Variable wf_ords : forall i : 'I_N, well_founded (@ords i).
Lemma N_minus_lt (n : nat) : ((N - S n) < N)%N.
Proof. case: N=> m pf; rewrite -minusE; apply/ltP=> /=; omega. Qed.
Definition t : Type := forall i : 'I_N, types i.
Definition mk : t -> t := id.
Fixpoint ty' (n : nat) : Type :=
match n with
| O => unit
| S n' => (types (Ordinal (N_minus_lt n')) * ty' n')%type
end.
Fixpoint ty_intro' (n : nat) (data : t) : ty' n :=
match n as m in nat return ty' m with
| O => tt
| S n' => (data (Ordinal (N_minus_lt n')), ty_intro' n' data)
end.
Definition ty := ty' N.
Definition ty_intro := ty_intro' N.
Fixpoint ord' (n : nat) : ty' n -> ty' n -> Prop :=
match n as m in nat return ty' m -> ty' m -> Prop with
| O => fun d1 d2 => False
| S n' => fun d1 d2 =>
lex_ord (@ords (Ordinal (N_minus_lt n'))) (ord' n') d1 d2
end.
Lemma wf_ord' n : well_founded (ord' n).
Proof.
rewrite/ord'; move: wf_ords; move: ords; clear ords wf_ords.
induction n=> ords WF; first by constructor.
apply: wf_eta; apply: wf_lex_ord; first by [].
by apply: (IHn ords).
Qed.
Definition ord (d1 d2 : t) := ord' N (ty_intro d1) (ty_intro d2).
Lemma wf_ord : well_founded ord.
Proof. by rewrite/ord; apply: wf_funct; apply: wf_ord'. Qed.
Definition cast_ty (T1 T2: Type) (pf: T1=T2) (x : T1) : T2.
Proof.
rewrite pf in x; refine x.
Defined.
Lemma types_eq (i j : 'I_N) : i=j -> types i=types j.
Proof. by move=> ->. Defined.
Definition get i (d : t) : types i := d i.
Definition set i (new_i : types i) (d : t) : t :=
fun j : 'I_N =>
match ord_dec i j with
| left pf => cast_ty (types_eq pf) new_i
| right _ => d j
end.
Implicit Arguments set [].
Lemma gss i x d : get i (set i x d) = x.
Proof.
rewrite/get/set; case: (ord_dec i i)=> //.
rewrite/cast_ty/eq_rect_r/eq_rect/types_eq/eq_ind_r/eq_ind/eq_rect=> e.
by rewrite (UIP_refl _ _ e).
Qed.
Lemma gso i j x d : i<>j -> get j (set i x d) = get j d.
Proof. by rewrite/get/set; case: (ord_dec i j). Qed.
Local Open Scope nat_scope.
Lemma ord_upd' : forall i data1 data2 num_cores,
@ords i (get i data2) (get i data1) ->
(N - num_cores <= i < N)%nat -> (num_cores <= N)%nat ->
(forall j : 'I_N, j < i -> get j data1=get j data2) ->
ord' num_cores (ty_intro' num_cores data2) (ty_intro' num_cores data1).
Proof.
move=> [i pf] d1 d2 num_cores H1; move/andP=> []; move/leP=> H2; move/ltP.
induction num_cores as [|n]; first by rewrite -minusE /= in H2=> /= H3; omega.
move=> /= H3 H4 H5; case: (ord_dec (Ordinal pf) (Ordinal (N_minus_lt n))).
by move=> EQ; apply: lex_ord_left; rewrite -EQ.
rewrite/get in H5; move=> NEQ; rewrite -H5; have NEQ2: (i <> N - n.+1)
by move=> H6; apply: NEQ; subst; f_equal; apply: proof_irr.
have: (N - S n < i) by apply/ltP; simpl in H2; omega.
move/ltP=> H6; apply lex_ord_right; apply: IHn=> //=.
by move: H6; rewrite -minusE=> H6; omega.
by move: (ltP H4)=> H7; apply/leP; omega.
have H6: (i <> N - n.+1)
by move=> H6; apply: NEQ; subst; f_equal; apply: proof_irr.
by move: H2=> /= H2; apply/ltP; omega.
Qed.
Lemma ord_upd i x d : ords x (get i d) -> ord (set i x d) d.
Proof.
move=> A; apply ord_upd' with (i := i)=> //.
rewrite gss=> //.
have B: (O <= i < N) by apply/andP; split.
rewrite -minusE; move: (andP B)=> {A B}[]A B.
move: (leP A)=> A'; move: (ltP B)=> B'; apply/andP; split.
- by apply/leP; omega.
- by apply/ltP; omega.
move=> j Hlt; rewrite gso=> //.
case: i x A Hlt; case: j=> /= i pf new_i A Hlt j pf'=> H1.
by move: pf'; case: H1=> ->; move/ltP=> ?; omega.
Qed.
End lex.
End Lex.
(** The following defines a wf-order on the type
\Sigma ix : 'I_N. T ix *)
Section sig_ord.
Variable N : pos.
Variable T : 'I_N -> Type.
Variable ords : forall ix : 'I_N, T ix -> T ix -> Prop.
Variable ords_wf : forall ix, well_founded (@ords ix).
Definition sig_data := {ix : 'I_N & T ix}.
Definition sig_ord (x y : sig_data) :=
exists pf : projT1 x = projT1 y,
(@ords (projT1 x))
(projT2 x)
(cast_ty (lift_eq _ (sym_eq pf)) (projT2 y)).
Lemma wf_sig_ord : well_founded sig_ord.
Proof.
move=> []i.
apply (well_founded_induction (@ords_wf i)).
move=> x IH.
apply: Acc_intro=> []j []/=pf H2.
have H3: @ords i (cast _ pf (projT2 j)) x.
{ move: pf H2; case: j=> /= ix j pf; subst ix.
by rewrite !cast_ty_erefl. }
case: (IH _ H3)=> H4.
apply: Acc_intro=> y H5; apply: H4.
have pf2: projT1 y = i.
{ by case: H5=> pf0 _; rewrite pf0. }
exists pf2=> /=.
move: pf2; set r := projT1 y; subst i.
rewrite cast_ty_erefl.
move=> pf2; move: H5; subst r; case=> pf3.
by have ->: sym_eq pf3 = sym_eq pf2 by apply: proof_irr.
Qed.
End sig_ord.
|
{"author": "ildyria", "repo": "coq-verif-tweetnacl", "sha": "8181ab4406cefd03ab0bd53d4063eb1644a2673d", "save_path": "github-repos/coq/ildyria-coq-verif-tweetnacl", "path": "github-repos/coq/ildyria-coq-verif-tweetnacl/coq-verif-tweetnacl-8181ab4406cefd03ab0bd53d4063eb1644a2673d/packages/coq-vst/coq-vst.2.0/concurrency/wf_lemmas.v"}
|
using LineEdit
include("keymaps.jl")
|
{"hexsha": "f19840451339360944e3a49a3bae6e749ee43501", "size": 38, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "UnofficialJuliaMirror/LineEdit.jl-3f7aa1fa-0fbc-58b7-89cd-01d1190b12a7", "max_stars_repo_head_hexsha": "52685d387a516e41a733baba1102439190a79204", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "UnofficialJuliaMirror/LineEdit.jl-3f7aa1fa-0fbc-58b7-89cd-01d1190b12a7", "max_issues_repo_head_hexsha": "52685d387a516e41a733baba1102439190a79204", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-09-04T17:00:53.000Z", "max_issues_repo_issues_event_max_datetime": "2019-10-09T08:34:26.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "UnofficialJuliaMirror/LineEdit.jl-3f7aa1fa-0fbc-58b7-89cd-01d1190b12a7", "max_forks_repo_head_hexsha": "52685d387a516e41a733baba1102439190a79204", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-07-12T02:14:49.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T12:27:15.000Z", "avg_line_length": 9.5, "max_line_length": 21, "alphanum_fraction": 0.7631578947, "num_tokens": 11}
|
using TensorIntegration
using Test
using LinearAlgebra
@testset "Expectation of uniform by simpson rule, 1D/2D/3D" begin
# add test for basic integrals
N = 5
# uniform expectation
a = 1.
b = 3.
pdf(x, a, b) = 1.0 ./ (b-a)
grid, weights = TensorIntegration.simpson(a, b, N)
Exp = dot((grid.*pdf.(grid, a, b)),weights)
@test Exp ≈ (a+b)/2.
# now in 2d
N2d = [5, 5]
a2d = [1., 1.]
b2d = [2., 2.]
pdf(x, a, b, d) = 1.0 ./ (b[d]-a[d])
grid, weights = TensorIntegration.tensor_simpson(a2d, b2d, N2d)
Exp1 = dot((grid[:,1].*pdf(grid, a2d, b2d, 1)), weights)
Exp2 = dot((grid[:,2].*pdf(grid, a2d, b2d, 2)), weights)
@test Exp1[1] ≈ (a2d[1]+b2d[1])/2.
@test Exp2[1] ≈ (a2d[2]+b2d[2])/2.
# now in 3d
N3d = [5, 5, 5]
a3d = [1., 1., 0.1]
b3d = [2., 2., 3.3]
pdf(x, a::Array, b::Array) = 1.0 ./ prod(b3d.-a3d)
grid, weights = TensorIntegration.tensor_simpson(a3d, b3d, N3d)
Exp1 = dot((grid[:,1].*pdf(grid, a3d, b3d)), weights)
Exp2 = dot((grid[:,2].*pdf(grid, a3d, b3d)), weights)
Exp3 = dot((grid[:,3].*pdf(grid, a3d, b3d)), weights)
@test isapprox(Exp1, (a3d[1]+b3d[1])/2.)
@test isapprox(Exp2, (a3d[2]+b3d[2])/2.)
@test isapprox(Exp3, (a3d[3]+b3d[3])/2.)
# now with one dimension only 1 point
N3d = [5, 5, 1]
a3d = [1., 1., 1.]
b3d = [2., 2., 2.]
pdf(x, a::Array, b::Array) = 1.0 ./prod(b3d.-a3d)
grid, weights = TensorIntegration.tensor_simpson(a3d, b3d, N3d)
Exp1 = dot((grid[:,1].*pdf(grid, a3d, b3d)), weights)
Exp2 = dot((grid[:,2].*pdf(grid, a3d, b3d)), weights)
Exp3 = dot((grid[:,3].*pdf(grid, a3d, b3d)), weights)
@test isapprox(Exp1, (a3d[1]+b3d[1])/2.)
@test isapprox(Exp2, (a3d[2]+b3d[2])/2.)
@test isapprox(Exp3, (a3d[3]+b3d[3])/2.)
end
|
{"hexsha": "d1be127699238c0037cfdeff642651821a0055a3", "size": 1830, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "chrished/TensorIntegration.jl", "max_stars_repo_head_hexsha": "c97a57557d46eed491f2a3acf104fcd02890471b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "chrished/TensorIntegration.jl", "max_issues_repo_head_hexsha": "c97a57557d46eed491f2a3acf104fcd02890471b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "chrished/TensorIntegration.jl", "max_forks_repo_head_hexsha": "c97a57557d46eed491f2a3acf104fcd02890471b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.0, "max_line_length": 67, "alphanum_fraction": 0.5448087432, "num_tokens": 824}
|
#!/bin/python3
# encoding: utf-8
import sys
import numpy as np
from time import time
'''
x
[0, 2] => idx start 0, end 3
[3, 5] => idx start 3, end 6
[6, 8] => idx start 6, end 9
((0 + (r_idx // 3 * 3)): (3 + (r_idx // 3 * 3)), (0 + (c_idx // 3 * 3)): (3 + (c_idx // 3 * 3)))
np.random.randint(1, 10)
'''
sys.setrecursionlimit(10 ** 7)
np.random.seed(int(time() % 1000))
TRIALS = [(0, 0, 0)]
def padding(input_values, rollback=False):
MAX_ROW, MAX_COL = input_values.shape
# if it is rollback
if rollback:
if len(TRIALS) == 0:
raise Exception('No possible result!')
i, j, prev_val = TRIALS.pop()
valid_digit = False
for num in range(prev_val+1, 10):
input_values[i, j] = num
valid_digit = value_chk(input_values, i, j)
if valid_digit: # if value fits current position
TRIALS.append((i, j, num))
return padding(input_values)
if not valid_digit: # if not updated
# clear value
input_values[i, j] = 0
# and rollback again
return padding(input_values, True)
else:
# if new position
for i in range(MAX_ROW):
for j in range(MAX_COL):
if input_values[i, j] == 0:
valid_digit = False
for num in range(1, 10):
input_values[i, j] = num
valid_digit = value_chk(input_values, i, j)
if valid_digit: # if value fits current position
TRIALS.append((i, j, num))
return padding(input_values)
# if no digit fits, rollback
if not valid_digit:
input_values[i, j] = 0
return padding(input_values, True)
return input_values
def value_chk(val_mtx, row_idx, col_idx):
val = val_mtx[row_idx, col_idx]
return (dup_cnt(val_mtx[row_idx, :], val) == 1
and dup_cnt(val_mtx[:, col_idx], val) == 1
and dup_cnt(val_mtx[(0 + (row_idx // 3 * 3)): (3 + (row_idx // 3 * 3)), (0 + (col_idx // 3 * 3)): (3 + (col_idx // 3 * 3))].flatten(), val) == 1)
def dup_cnt(tar_arr, val):
cnt = 0
for e in tar_arr:
if e == val:
cnt += 1
return cnt
if __name__ == '__main__':
i1 = np.array([
[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 7, 9]
])
print('Original input:\n', i1)
result = padding(i1)
print('Result:\n', result)
# result check
for i in range(result.shape[0]):
for j in range(result.shape[1]):
if not value_chk(result, i, j):
raise Exception("Unvalid result! ({}, {})".format(i, j))
|
{"hexsha": "c99c9b261d2068fe9c60d5d654b34c3a8117a520", "size": 3050, "ext": "py", "lang": "Python", "max_stars_repo_path": "py_code/sudoku.py", "max_stars_repo_name": "xiangnan-fan/proj01", "max_stars_repo_head_hexsha": "856b1a444a526fa35e3fc1328669526429fd56af", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "py_code/sudoku.py", "max_issues_repo_name": "xiangnan-fan/proj01", "max_issues_repo_head_hexsha": "856b1a444a526fa35e3fc1328669526429fd56af", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "py_code/sudoku.py", "max_forks_repo_name": "xiangnan-fan/proj01", "max_forks_repo_head_hexsha": "856b1a444a526fa35e3fc1328669526429fd56af", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.8080808081, "max_line_length": 153, "alphanum_fraction": 0.4868852459, "include": true, "reason": "import numpy", "num_tokens": 1004}
|
""" FlowNet model written in TF2/Keras
https://arxiv.org/pdf/1504.06852.pdf
"""
from typing import Dict, Tuple, Optional, Union
from pathlib import Path
from copy import deepcopy
from datetime import datetime
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
import utils_io as uio
import utils
from config import CONFIG_FLOWNET, CONFIG_TRAINING
class MalformedNetworkType(Exception):
"""The provided network type doesn't match one of 'simple' or 'correlation'."""
class FlowNet:
""" FlowNetSimple model from the Computer Vision Group of Freiburg.
https://lmb.informatik.uni-freiburg.de/
https://lmb.informatik.uni-freiburg.de/Publications/2015/DFIB15/flownet.pdf
"""
def __init__(self, config: Dict):
self.config = config
self.model = self._construct_network(config)
def __getattr__(self, attr):
""" Rather than potentially override any of the tf.keras.Model methods by subclassing and defining new methods,
create a composition class with self.model:tf.keras.Model and allow attribute calls directly against self.model
"""
return getattr(self.model, attr)
@staticmethod
def get_simple_model(config: Dict) -> tf.keras.Model:
inputs = tf.keras.Input(shape=(384, 512, 6))
""" Contracting part """
conv_1 = tf.keras.layers.Conv2D(name='conv1', filters=64, kernel_size=7, strides=2, padding='same', activation=tf.keras.activations.relu)(inputs)
conv_2 = tf.keras.layers.Conv2D(name='conv2', filters=128, kernel_size=5, strides=2, padding='same', activation=tf.keras.activations.relu)(conv_1)
conv_3 = tf.keras.layers.Conv2D(name='conv3', filters=256, kernel_size=5, strides=2, padding='same', activation=tf.keras.activations.relu)(conv_2)
conv_3_1 = tf.keras.layers.Conv2D(name='conv3_1', filters=256, kernel_size=3, strides=1, padding='same', activation=tf.keras.activations.relu)(conv_3)
conv_4 = tf.keras.layers.Conv2D(name='conv4', filters=512, kernel_size=3, strides=2, padding='same', activation=tf.keras.activations.relu)(conv_3_1)
conv_4_1 = tf.keras.layers.Conv2D(name='conv4_1', filters=512, kernel_size=3, strides=1, padding='same', activation=tf.keras.activations.relu)(conv_4)
conv_5 = tf.keras.layers.Conv2D(name='conv5', filters=512, kernel_size=3, strides=2, padding='same', activation=tf.keras.activations.relu)(conv_4_1)
conv_5_1 = tf.keras.layers.Conv2D(name='conv5_1', filters=512, kernel_size=3, strides=1, padding='same', activation=tf.keras.activations.relu)(conv_5)
conv_6 = tf.keras.layers.Conv2D(name='conv6', filters=1024, kernel_size=3, strides=2, padding='same', activation=tf.keras.activations.relu)(conv_5_1)
conv_6_1 = tf.keras.layers.Conv2D(name='conv6_1', filters=1024, kernel_size=3, strides=1, padding='same', activation=tf.keras.activations.relu)(conv_6)
""" The paper itself doesn't have this documented but all implementations, including the original authors, use an extra flow path in the code. """
predict_6 = tf.keras.layers.Conv2D(name='predict_6', filters=2, kernel_size=3, strides=1, padding='same', activation=None)(conv_6_1)
""" Expanding part """
upconv_5 = tf.keras.layers.Conv2DTranspose(name='upconv_5', filters=512, kernel_size=(4, 4), strides=2, padding='same', activation=tf.keras.activations.relu)(conv_6)
flow_6 = tf.keras.layers.Conv2DTranspose(name='flow_6', filters=2, kernel_size=(4, 4), strides=2, padding='same', activation=tf.keras.activations.relu)(predict_6)
concat_5 = tf.keras.layers.Concatenate(name='concat_5', axis=-1)([upconv_5, conv_5_1, flow_6])
predict_5 = tf.keras.layers.Conv2D(name='predict_5', filters=2, kernel_size=3, strides=1, padding='same', activation=None)(concat_5)
upconv_4 = tf.keras.layers.Conv2DTranspose(name='upconv_4', filters=256, kernel_size=(4, 4), strides=2, padding='same', activation=tf.keras.activations.relu)(concat_5)
flow_5 = tf.keras.layers.Conv2DTranspose(name='flow_5', filters=2, kernel_size=(4, 4), strides=2, padding='same', activation=tf.keras.activations.relu)(predict_5)
concat_4 = tf.keras.layers.Concatenate(name='concat_4', axis=-1)([upconv_4, conv_4_1, flow_5])
predict_4 = tf.keras.layers.Conv2D(name='predict_4', filters=2, kernel_size=3, strides=1, padding='same', activation=None)(concat_4)
upconv_3 = tf.keras.layers.Conv2DTranspose(name='upconv_3', filters=128, kernel_size=(4, 4), strides=2, padding='same', activation=tf.keras.activations.relu)(concat_4)
flow_4 = tf.keras.layers.Conv2DTranspose(name='flow_4', filters=2, kernel_size=(4, 4), strides=2, padding='same', activation=tf.keras.activations.relu)(predict_4)
concat_3 = tf.keras.layers.Concatenate(name='concat_3', axis=-1)([upconv_3, conv_3_1, flow_4])
predict_3 = tf.keras.layers.Conv2D(name='predict_3', filters=2, kernel_size=3, strides=1, padding='same', activation=None)(concat_3)
upconv_2 = tf.keras.layers.Conv2DTranspose(name='upconv_2', filters=64, kernel_size=(4, 4), strides=2, padding='same', activation=tf.keras.activations.relu)(concat_3)
flow_3 = tf.keras.layers.Conv2DTranspose(name='flow_3', filters=2, kernel_size=(4, 4), strides=2, padding='same', activation=tf.keras.activations.relu)(predict_3)
concat_2 = tf.keras.layers.Concatenate(name='concat_2', axis=-1)([upconv_2, conv_2, flow_3])
predict_2 = tf.keras.layers.Conv2D(name='predict_2', filters=2, kernel_size=3, strides=1, padding='same', activation=None)(concat_2)
upconv_1 = tf.keras.layers.Conv2DTranspose(name='upconv_1', filters=64, kernel_size=(4, 4), strides=2, padding='same', activation=tf.keras.activations.relu)(concat_2)
flow_2 = tf.keras.layers.Conv2DTranspose(name='flow_2', filters=2, kernel_size=(4, 4), strides=2, padding='same', activation=tf.keras.activations.relu)(predict_2)
concat_1 = tf.keras.layers.Concatenate(name='concat_1', axis=-1)([upconv_1, conv_1, flow_2])
predict_1 = tf.keras.layers.Conv2D(name='predict_1', filters=2, kernel_size=3, strides=1, padding='same', activation=None)(concat_1)
if config['training']:
return tf.keras.Model(inputs=inputs, outputs=[predict_6, predict_5, predict_4, predict_3, predict_2, predict_1])
return tf.keras.Model(inputs=inputs, outputs=predict_1)
def disable_training(self):
""" After training is finished, run this method to have self.model predict a single array rather than a list of 6 arrays
"""
self.model = tf.keras.Model(inputs=self.model.layers[0].input, outputs=self.model.layers[-1].output)
def enable_training(self):
""" If you need to re-enable training, run this method to have self.model predict the list of 6 predictions
"""
output_layers = [layer.output for layer in self.model.layers if 'predict' in layer.name]
self.model = tf.keras.Model(inputs=self.model.layers[0].input, outputs=output_layers)
@staticmethod
def get_corr_model(config: Dict) -> tf.keras.Model:
raise NotImplementedError("The correlation model hasn't been implemented.")
@staticmethod
def _construct_network(config: Dict) -> tf.keras.Model:
if config['architecture'] == 'simple':
return FlowNet.get_simple_model(config)
if config['architecture'] == 'corr':
return FlowNet.get_corr_model(config)
raise MalformedNetworkType(f"{config['architecture']}: {MalformedNetworkType.__doc__}")
class DataGenerator:
""" Instantiate then call instance.next_train() to get a generator for training images/labels
call instance.next_val() to get a generator for validation images/labels
"""
def __init__(self,
network_type: str,
flo_normalization: Tuple[float, float],
root_path: Path,
batch_size: int,
validation_batch_size: int,
train_ratio: Union[float, int] = 1,
test_ratio: Union[float, int] = 0,
shuffle: bool = False,
augmentations: Optional[Dict] = None):
self.network_type = network_type
images = list(root_path.glob('*1.ppm'))
self.train, self.val, self.test = utils.get_train_val_test(images, train_ratio, test_ratio, shuffle)
self.batch_size = batch_size
self.validation_batch_size = validation_batch_size
self.replace = True
self.flo_normalization = flo_normalization
self.augmentations = augmentations
def next_train(self):
while True:
images = np.random.choice(self.train, self.batch_size, replace=self.replace)
img1 = [uio.read(str(img)) for img in images]
img2 = [uio.read(str(img).replace('1.ppm', '2.ppm')) for img in images]
label = [uio.read(str(img).replace('img1.ppm', 'flow.flo')) for img in images]
img1 = utils.normalize_images(img1)
img2 = utils.normalize_images(img2)
label = utils.normalize_flo(label, self.flo_normalization)
if not self.augmentations is None:
img1, img2, label = self._augment(img1, img2, label)
if self.network_type == 'simple':
images = np.concatenate([img1, img2], axis=-1)
elif self.network_type == 'correlation':
raise NotImplementedError()
else:
raise MalformedNetworkType(f'{self.network_type}: {MalformedNetworkType.__doc__}')
yield (images, np.array(label))
def next_val(self):
while True:
images = np.random.choice(self.val, self.validation_batch_size, replace=False)
img1 = [uio.read(str(img)) for img in images]
img2 = [uio.read(str(img).replace('1.ppm', '2.ppm')) for img in images]
label = [uio.read(str(img).replace('img1.ppm', 'flow.flo')) for img in images]
img1 = utils.normalize_images(img1)
img2 = utils.normalize_images(img2)
label = utils.normalize_flo(label, self.flo_normalization)
if self.network_type == 'simple':
images = np.concatenate([img1, img2], axis=-1)
elif self.network_type == 'correlation':
raise NotImplementedError()
else:
raise MalformedNetworkType(f'{self.network_type}: {MalformedNetworkType.__doc__}')
yield (images, np.array(label))
def _augment(self, img1, img2, label):
# Augmentations are more awkward because of the Siamese architecture, I can't justify applying different color transforms to each image independently
# I'm 100 certain there is a better way to do this as this is extremely inefficient with each call likely containing some portion of each other call.
r = np.random.rand(len(self.augmentations))
r_inc = 0 # This, with r, are used to randomly turn on/off augmentations so that not every augmentation is applied each time
r_onoff = 2/5
if 'brightness' in self.augmentations and r[r_inc] <= r_onoff:
rdm = np.random.rand(self.batch_size) * self.augmentations['brightness']
def brt(x, idx): return tf.image.adjust_brightness(x, rdm[idx])
img1 = tf.stack([brt(im, idx) for idx, im in enumerate(img1)], axis=0)
img2 = tf.stack([brt(im, idx) for idx, im in enumerate(img2)], axis=0)
r_inc += 1
if 'multiplicative_colour' in self.augmentations and r[r_inc] <= r_onoff:
rdm = np.random.rand(self.batch_size, 3) * (self.augmentations['multiplicative_colour'][1] -
self.augmentations['multiplicative_colour'][0]) + self.augmentations['multiplicative_colour'][0]
def mc(x, idx): return x * rdm[idx]
img1 = tf.clip_by_value(tf.stack([mc(im, idx) for idx, im in enumerate(img1)], axis=0), clip_value_min=0, clip_value_max=1)
img2 = tf.clip_by_value(tf.stack([mc(im, idx) for idx, im in enumerate(img2)], axis=0), clip_value_min=0, clip_value_max=1)
r_inc += 1
if 'gamma' in self.augmentations and r[r_inc] <= r_onoff:
rdm = np.random.rand(self.batch_size) * (self.augmentations['gamma'][1] - self.augmentations['gamma'][0]) + self.augmentations['gamma'][0]
def gam(x, idx): return tf.image.adjust_gamma(x, gamma=rdm[idx])
img1 = tf.stack([gam(im, idx) for idx, im in enumerate(img1)], axis=0)
img2 = tf.stack([gam(im, idx) for idx, im in enumerate(img2)], axis=0)
r_inc += 1
if 'contrast' in self.augmentations and r[r_inc] <= r_onoff:
rdm = np.random.rand(self.batch_size) * (self.augmentations['contrast'][1] - self.augmentations['contrast'][0]) + self.augmentations['contrast'][0]
def cts(x, idx): return tf.image.adjust_contrast(x, contrast_factor=rdm[idx])
img1 = tf.stack([cts(im, idx) for idx, im in enumerate(img1)], axis=0)
img2 = tf.stack([cts(im, idx) for idx, im in enumerate(img2)], axis=0)
r_inc += 1
if 'gaussian_noise' in self.augmentations and r[r_inc] <= r_onoff:
rdm = np.random.rand(self.batch_size) * self.augmentations['gaussian_noise']
def gau(x, idx): return x + tf.random.normal(x.shape, mean=0.0, stddev=rdm[idx], dtype=x.dtype)
img1 = tf.clip_by_value(tf.stack([gau(im, idx) for idx, im in enumerate(img1)], axis=0), clip_value_min=0, clip_value_max=1)
img2 = tf.clip_by_value(tf.stack([gau(im, idx) for idx, im in enumerate(img2)], axis=0), clip_value_min=0, clip_value_max=1)
r_inc += 1
return img1, img2, label
class EndPointError(tf.keras.losses.Loss):
""" EndPointError is the Euclidean distance between the predicted flow vector and the ground truth averaged over all pixels.
The resizing is required because the loss is calculated for each flow prediction which occur at different stride levels,
resizing effectively averages at that scale.
"""
def call(self, y_true, y_pred):
return K.sqrt(K.sum(K.square(tf.image.resize(y_true, y_pred.shape[1:3]) - y_pred), axis=1, keepdims=True))
def load_images(image_name: str):
""" Debug function to load the first image for visualization
"""
root_path = Path(r'C:\Users\andre\Documents\Python\FlowNet_TF2\data\FlyingChairs_release\data')
flo_path = root_path / f'{image_name}_flow.flo'
img1_path = root_path / f'{image_name}_img1.ppm'
img2_path = root_path / f'{image_name}_img2.ppm'
flo = uio.read(str(flo_path))
img1 = uio.read(str(img1_path))
img2 = uio.read(str(img2_path))
img = np.expand_dims(np.concatenate([img1, img2], axis=-1), axis=0)
# fig, ax = plt.subplots(ncols=2, nrows=2)
# ax[0,0].imshow(img1)
# ax[0,1].imshow(img2)
# ax[1,0].imshow(flo[...,0])
# ax[1,1].imshow(flo[...,1])
# plt.show()
return img, np.expand_dims(flo, axis=0)
def show_images(simple_images, label):
"""
"""
import matplotlib.pyplot as plt
fig, ax = plt.subplots(ncols=2, nrows=2)
ax[0, 0].imshow(simple_images[..., :3])
ax[0, 1].imshow(simple_images[..., 3:])
ax[1, 0].imshow(label[..., 0])
ax[1, 1].imshow(label[..., 1])
plt.show()
def main():
config_network = deepcopy(CONFIG_FLOWNET)
config_training = deepcopy(CONFIG_TRAINING)
# On first run, populate the min, max scaling values for the flo dataset
# min, max = utils.get_training_min_max(config_training['img_path'])
flownet = FlowNet(config_network)
loss = EndPointError()
flownet.compile(optimizer=tf.keras.optimizers.Adam(),
loss=[loss, loss, loss, loss, loss, loss],
loss_weights=config_training['loss_weights'][::-1])
data_generator = DataGenerator(config_network['architecture'],
config_network['flo_normalization'],
config_training['img_path'],
config_training['batch_size'],
config_training['validation_batch_size'],
config_training['train_ratio'],
config_training['test_ratio'],
config_training['shuffle'],
config_training['augmentations'])
log_dir = f"logs/fit/{datetime.now().strftime('%Y%m%d-%H%M%S')}"
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
checkpoint_filepath = f"checkpoint/{datetime.now().strftime('%Y%m%d-%H%M%S')}"
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_filepath,
save_weights_only=False,
monitor='val_loss',
mode='min',
save_best_only=True)
if not config_training['pretrained_path'] is None:
flownet.model = tf.keras.models.load_model(config_training['pretrained_path'], custom_objects={'EndPointError': EndPointError})
# flownet.fit(x=data_generator.next_train(),
# epochs=10,
# verbose=1,
# steps_per_epoch=22872 // config_training['batch_size'],
# validation_data=data_generator.next_val(),
# validation_steps=4,
# validation_batch_size=config_training['validation_batch_size'],
# callbacks=[tensorboard_callback, model_checkpoint_callback],
# # use_multiprocessing=True
# )
flownet.disable_training()
#
# Temporary debugging and visualization
#
img, flo = load_images(image_name="22868")
norm_img = utils.normalize_images(img)
predicted_flo = flownet.predict(norm_img)
predicted_flo = utils.denormalize_flo(predicted_flo, config_network['flo_normalization'])
predicted_flo = tf.image.resize(predicted_flo, (384, 512)).numpy()
import matplotlib.pyplot as plt
scale_min = np.min([np.min(flo), np.min(predicted_flo)])
scale_max = np.max([np.max(flo), np.max(predicted_flo)])
fig, ax = plt.subplots(ncols=2, nrows=3)
ax[0, 0].imshow(img[0, ..., :3])
ax[0, 1].imshow(img[0, ..., 3:])
ax[0, 0].set_ylabel('Input images')
ax[1, 0].imshow(flo[0, ..., 0], vmin=scale_min, vmax=scale_max)
ax[1, 1].imshow(flo[0, ..., 1], vmin=scale_min, vmax=scale_max)
ax[1, 0].set_ylabel('Ground truth flows')
ax[2, 0].imshow(predicted_flo[0, ..., 0], vmin=scale_min, vmax=scale_max)
ax[2, 1].imshow(predicted_flo[0, ..., 1], vmin=scale_min, vmax=scale_max)
ax[2, 0].set_ylabel('Predicted flows')
plt.show()
print('stall')
if __name__ == "__main__":
main()
|
{"hexsha": "b57020279726feabd70a804e5df2d7a6a9ae30bf", "size": 19143, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/model.py", "max_stars_repo_name": "andrewlstewart/FlowNet_v1_TF2", "max_stars_repo_head_hexsha": "eb21cfca227c21707db57e9e9a0cd359ab849cdb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2021-01-19T04:01:34.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-16T21:24:53.000Z", "max_issues_repo_path": "src/model.py", "max_issues_repo_name": "andrewlstewart/FlowNet_v1_TF2", "max_issues_repo_head_hexsha": "eb21cfca227c21707db57e9e9a0cd359ab849cdb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-11-13T18:58:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:40:39.000Z", "max_forks_repo_path": "src/model.py", "max_forks_repo_name": "andrewlstewart/FlowNet_v1_TF2", "max_forks_repo_head_hexsha": "eb21cfca227c21707db57e9e9a0cd359ab849cdb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.6942857143, "max_line_length": 175, "alphanum_fraction": 0.6494802278, "include": true, "reason": "import numpy", "num_tokens": 4838}
|
# -*- coding: utf-8 -*-
"""
Chance pair distance timeseries
Created on Sat Oct 12 13:42:52 2019
@author: Gebruiker
"""
import numpy as np
import pandas as pd
def ComputeDistance(ID1,ID2,Data_Mediterrenean):
id1 = [] #select only the 1st ID from all Mediterrenean data
id2 = [] #select only the 2nd ID from all Mediterrenean data
for i in range(len(Data_Mediterrenean[0])):
if Data_Mediterrenean[0,i] == ID1: #select right ID
id1 +=[[Data_Mediterrenean[1,i],Data_Mediterrenean[2,i],Data_Mediterrenean[3,i]]] #save latitude, longitude, time
if Data_Mediterrenean[0,i] == ID2: #select right ID
id2 +=[[Data_Mediterrenean[1,i],Data_Mediterrenean[2,i],Data_Mediterrenean[3,i]]] #save latitude, longitude, time
id1 = np.asarray(id1) #save as array for easy indexing
id2 = np.asarray(id2) #save as array for easy indexing
distance = [] #generate empty distance timeseries
time = [] #generate corresponding timeaxis
for i in range(len(id1)): #compare all measurement data
for j in range(len(id2)):
if id1[i,2]==id2[j,2]: # if the time is equal
distance += [np.sqrt((id1[i,0]-id2[j,0])**2+(id1[i,1]-id2[j,1])**2)] #compute distance in km and add to timeseries
time += [id1[i,2]] #add timestamp to timeaxis
mind = distance.index(min(distance)) #find the index of the minimum separation distance to slice both 'distance' and 'time'
d1 = list(reversed(distance[:mind+1])) #slice the timeseries up to the minimum and reverse it to create a backward timeseries
d2 = distance[mind:] #slice the timeseries from the minimum onwards to create a forward timeseries
t1 = list(reversed(time[:mind+1])) #slice te timeaxis in the same way as the timeseries
t2 = time[mind:] #slice te timeaxis in the same way as the timeseries
for n in range(len(t1)-1): #check for continuity
if t1[n]-1 != t1[n+1]: #In backward timeaxis each next timestep should be 1 smaller
t1 = t1[:n] #slice continuous timeaxis
d1 = d1[:n] #slice corresponding backward distance timeseries
break #stop for-loop when discontinuity is found
for n in range(len(t2)-1): #do the same for the forward timeseries
if t2[n]+1 != t2[n+1]:
t2 = t2[:n]
d2 = d2[:n]
break
return distance,time,d1,d2,t1,t2,mind
if __name__ == "__main__":
nd = np.genfromtxt('Data/MedSeaIDs.txt',delimiter=',')
pairs = np.genfromtxt('Data/UnPair.txt', delimiter=',')
for i in range(len(pairs)):
d,t,d1,d2,t1,t2,mind = ComputeDistance(pairs[i,0],pairs[i,1],nd)
np.savetxt('Data/BackwardsDistances/BDPair{0}.csv'.format(i),np.asarray((d1,t1)),delimiter = ',')
np.savetxt('Data/ForwardDistances/FDPair{0}.csv'.format(i),np.asarray((d2,t2)),delimiter = ',')
|
{"hexsha": "0c244070276a4ef7271e0e438beb65dfba603ee5", "size": 2861, "ext": "py", "lang": "Python", "max_stars_repo_path": "computedistance.py", "max_stars_repo_name": "reint-fischer/MAIOproject", "max_stars_repo_head_hexsha": "564fd60b4835657a5f9f9a58b4dc822d80895f8d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "computedistance.py", "max_issues_repo_name": "reint-fischer/MAIOproject", "max_issues_repo_head_hexsha": "564fd60b4835657a5f9f9a58b4dc822d80895f8d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "computedistance.py", "max_forks_repo_name": "reint-fischer/MAIOproject", "max_forks_repo_head_hexsha": "564fd60b4835657a5f9f9a58b4dc822d80895f8d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 52.9814814815, "max_line_length": 130, "alphanum_fraction": 0.6560643132, "include": true, "reason": "import numpy", "num_tokens": 851}
|
import sys
import unittest
import numpy as np
from embryovision.attentionbox import BoundingBoxCalculator, find_bounding_box
LABEL_SHAPE = (500, 500)
class TestConvenienceFunction(unittest.TestCase):
def test_does_the_same_as_calculator(self):
box_zona = (20, 20, 33, 33)
box_well = (10, 10, 70, 70)
box_cell = (25, 25, 15, 15)
labels = create_embryo_like_labels(box_zona, box_well, box_cell)
calc = BoundingBoxCalculator()
out_from_calc = calc.find_bounding_box(labels)
out_from_convenience = find_bounding_box(labels)
self.assertEqual(out_from_calc, out_from_convenience)
def test_honors_box_size(self):
box_zona = (20, 20, 33, 33)
box_well = (30, 30, 70, 70)
box_cell = (25, 25, 15, 15)
labels = create_embryo_like_labels(box_zona, box_well, box_cell)
box_side = 200
out_from_convenience = find_bounding_box(
labels, box_side=box_side)
self.assertEqual(out_from_convenience[2:], (box_side, box_side))
def test_returns_centered_box_on_empty_labels(self):
empty_labels = np.zeros(LABEL_SHAPE, dtype='int')
empty_labels[:] = BoundingBoxCalculator.outside_well_label
box = find_bounding_box(empty_labels)
image_center = [i // 2 for i in LABEL_SHAPE]
box_center = [box[0] + box[2] / 2, box[1] + box[3] / 2]
self.assertEqual(image_center, box_center)
def test_returns_valid_box_on_random_labels(self):
np.random.seed(1033)
random_labels = np.random.randint(
low=0, high=4, size=LABEL_SHAPE, dtype='int')
box = find_bounding_box(random_labels)
for i in range(2):
self.assertGreaterEqual(box[i], 0)
self.assertLessEqual(box[i] + box[i + 2], LABEL_SHAPE[i])
class TestBoundingBoxCalculator(unittest.TestCase):
def test_find_bounding_box_keeps_all_zona_and_within_zona(self):
calc = BoundingBoxCalculator()
box_zona = (20, 20, 33, 33)
box_well = (10, 10, 70, 70)
box_cell = (25, 25, 15, 15)
labels = create_embryo_like_labels(box_zona, box_well, box_cell)
out = calc.find_bounding_box(labels)
cropped = labels[out[0]: out[0] + out[2], out[1]: out[1] + out[3]]
for which_label in [calc.zona_label, calc.inside_zona_label]:
n_pixels_raw = (labels == which_label).sum()
n_pixels_cropped = (cropped == which_label).sum()
self.assertEqual(n_pixels_raw.sum(), n_pixels_cropped.sum())
def test_init_stores_box_side(self):
side = 100
calc = BoundingBoxCalculator(side)
self.assertEqual(calc.box_side, side)
def test_raises_value_error_when_shape_is_incorrect(self):
calc = BoundingBoxCalculator()
correct_shape = calc.image_shape
wrong_shape = [i + 10 for i in correct_shape]
image_wrong = np.zeros(wrong_shape)
self.assertRaises(ValueError, calc.find_bounding_box, image_wrong)
def test_default_box_side_is_328(self):
calc = BoundingBoxCalculator()
self.assertEqual(calc.box_side, 328)
def test_init_sets_up_box_halfside(self):
side = 100
calc = BoundingBoxCalculator(side)
self.assertEqual(calc._box_halfside, side / 2)
def test_expand_box_to_correct_size_gives_correct_width_and_height(self):
side = 31
calc = BoundingBoxCalculator(side)
np.random.seed(1130)
box_initial = np.random.randint(low=0, high=LABEL_SHAPE[0], size=4)
box_recentered = calc._expand_box_to_correct_size(box_initial)
self.assertEqual(box_recentered[2:], (side, side))
def test_find_bounding_box_returns_correct_shape(self):
side = 300
calc = BoundingBoxCalculator(side)
box_zona = (20, 20, 15, 15)
box_well = (30, 30, 70, 70)
box_cell = (25, 25, 5, 5)
labels = create_embryo_like_labels(box_zona, box_well, box_cell)
out = calc._find_bounding_box(labels)
self.assertEqual(out[2:], (side, side))
def test_find_bounding_box_returns_indexable_of_correct_size(self):
box_side = 300
calc = BoundingBoxCalculator(box_side)
box_well = (10, 10, 70, 70)
box_zona = (20, 20, 33, 33)
box_cell = (25, 25, 15, 15)
labels = create_embryo_like_labels(box_zona, box_well, box_cell)
out = calc.find_bounding_box(labels)
cropped = labels[out[0]: out[0] + out[2], out[1]: out[1] + out[3]]
self.assertEqual(cropped.shape, (box_side, box_side))
def test_find_mask_center(self):
center = (72, 36)
height = 11
width = 23
bbox = (center[0] - height // 2, center[1] - width // 2, height, width)
mask = create_mask(bbox)
calc = BoundingBoxCalculator(40)
predicted = calc._find_mask_center(mask)
self.assertEqual(center, predicted)
def test_mask_well_interior(self):
calc = BoundingBoxCalculator(40)
np.random.seed(1123)
labels = np.random.randint(
low=0, high=4, size=LABEL_SHAPE, dtype='int8')
inside_well = (labels != calc.outside_well_label)
predicted = calc._mask_well_interior(labels)
self.assertTrue(np.all(inside_well == predicted))
def test_mask_embryo(self):
calc = BoundingBoxCalculator(40)
np.random.seed(1123)
labels = np.random.randint(
low=0, high=4, size=LABEL_SHAPE, dtype='int8')
embryo = ((labels == calc.zona_label) |
(labels == calc.inside_zona_label))
predicted = calc._mask_embryo(labels)
self.assertTrue(np.all(embryo == predicted))
def test_calculate_minimal_bounding_box_for(self):
bbox = (65, 22, 11, 41)
mask = create_mask(bbox)
calc = BoundingBoxCalculator(40)
minimal_bbox = calc._calculate_minimal_bounding_box_for(mask)
self.assertEqual(bbox, minimal_bbox)
def test_shift_box1_to_enclose_box2_when_already_enclosed(self):
center = (62, 62)
side1 = 20
side2 = 10
box1 = tuple([c - side1 // 2 for c in center]) + (side1, side1)
box2 = tuple([c - side2 // 2 for c in center]) + (side2, side2)
box_shifted = BoundingBoxCalculator._shift_box1_to_enclose_box2(
box1, box2)
self.assertEqual(box_shifted, box1)
def test_shift_box1_to_enclose_box2_when_lower_left_of_box1(self):
box1 = (42, 42, 20, 20)
box2 = (35, 35, 10, 10)
box_shifted = BoundingBoxCalculator._shift_box1_to_enclose_box2(
box1, box2)
box_correct = box2[:2] + box1[2:]
self.assertEqual(box_shifted, box_correct)
def test_shift_box1_to_enclose_box2_when_upper_right_of_box1(self):
box1 = (42, 42, 22, 20)
box2 = (60, 60, 12, 10)
box_shifted = BoundingBoxCalculator._shift_box1_to_enclose_box2(
box1, box2)
upper2 = box2[0] + box2[2]
right2 = box2[1] + box2[3]
height1, width1 = box1[2:]
box_correct = (upper2 - height1, right2 - width1) + box1[2:]
self.assertEqual(box_shifted, box_correct)
def test_clip_box_to_be_within_boxside_when_negative(self):
calc = BoundingBoxCalculator()
box_outside = (-10, -10, 100, 100)
box_clipped = calc._clip_box_to_be_within_image(box_outside)
for i in range(2):
self.assertGreaterEqual(box_clipped[i], 0)
def test_clip_box_to_be_within_boxside_when_too_large(self):
box_side = 300
calc = BoundingBoxCalculator(box_side)
max_size = calc.image_shape
box_outside = (max_size[0] + 10, max_size[1] + 10, 100, 100)
box_clipped = calc._clip_box_to_be_within_image(box_outside)
for i in range(2):
self.assertLessEqual(box_clipped[i] + box_side, max_size[i])
def test_centered_empty_box(self):
calc = BoundingBoxCalculator()
centered_empty_box = calc._centered_empty_box
correct_box = tuple([i // 2 for i in calc.image_shape]) + (0, 0)
self.assertEqual(correct_box, centered_empty_box)
def create_mask(bbox):
lower, left, height, width = bbox
upper = lower + height
right = left + width
mask = np.zeros(LABEL_SHAPE, dtype='bool')
mask[lower:upper, left:right] = True
return mask
def create_embryo_like_labels(box_zona, box_well, box_cell):
mask_zona = create_mask(box_zona)
mask_well = create_mask(box_well)
mask_cell = create_mask(box_cell)
labels = np.full(
LABEL_SHAPE,
BoundingBoxCalculator.outside_well_label, dtype='int')
labels[mask_well] = BoundingBoxCalculator.inside_well_label
labels[mask_zona] = BoundingBoxCalculator.zona_label
labels[mask_cell] = BoundingBoxCalculator.inside_zona_label
return labels
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "6af324ba733749ed13271362d1692a25be18e980", "size": 8940, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_attentionbox.py", "max_stars_repo_name": "briandleahy/embryovision", "max_stars_repo_head_hexsha": "83a271ff71dcdc699e1d83b977a0e366e0870ef4", "max_stars_repo_licenses": ["BSD-4-Clause-UC"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_attentionbox.py", "max_issues_repo_name": "briandleahy/embryovision", "max_issues_repo_head_hexsha": "83a271ff71dcdc699e1d83b977a0e366e0870ef4", "max_issues_repo_licenses": ["BSD-4-Clause-UC"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_attentionbox.py", "max_forks_repo_name": "briandleahy/embryovision", "max_forks_repo_head_hexsha": "83a271ff71dcdc699e1d83b977a0e366e0870ef4", "max_forks_repo_licenses": ["BSD-4-Clause-UC"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.76, "max_line_length": 79, "alphanum_fraction": 0.6568232662, "include": true, "reason": "import numpy", "num_tokens": 2387}
|
[STATEMENT]
lemma real_polynomial_function_divide [intro]:
assumes "real_polynomial_function p" shows "real_polynomial_function (\<lambda>x. p x / c)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. real_polynomial_function (\<lambda>x. p x / c)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. real_polynomial_function (\<lambda>x. p x / c)
[PROOF STEP]
have "real_polynomial_function (\<lambda>x. p x * Fields.inverse c)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. real_polynomial_function (\<lambda>x. p x * inverse c)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
real_polynomial_function p
goal (1 subgoal):
1. real_polynomial_function (\<lambda>x. p x * inverse c)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
real_polynomial_function (\<lambda>x. p x * inverse c)
goal (1 subgoal):
1. real_polynomial_function (\<lambda>x. p x / c)
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
real_polynomial_function (\<lambda>x. p x * inverse c)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
real_polynomial_function (\<lambda>x. p x * inverse c)
goal (1 subgoal):
1. real_polynomial_function (\<lambda>x. p x / c)
[PROOF STEP]
by (simp add: divide_inverse)
[PROOF STATE]
proof (state)
this:
real_polynomial_function (\<lambda>x. p x / c)
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 541, "file": null, "length": 8}
|
function _chol!(A::StridedMatrix{<:BlasFloat}, ::Type{UpperTriangular})
C, info = LAPACK.potrf!('U', A)
return UpperTriangular(C), info
end
function _chol!(A::StridedMatrix{<:BlasFloat}, ::Type{LowerTriangular})
C, info = LAPACK.potrf!('L', A)
return LowerTriangular(C), info
end
|
{"hexsha": "e60bead99f10e92ed0fba8cfcf7f19ffcaf60512", "size": 297, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/cholesky.jl", "max_stars_repo_name": "Red-Portal/IonLinearAlgebra.jl", "max_stars_repo_head_hexsha": "5073647c79abdc816630baa5cb8611fab8a0085a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/cholesky.jl", "max_issues_repo_name": "Red-Portal/IonLinearAlgebra.jl", "max_issues_repo_head_hexsha": "5073647c79abdc816630baa5cb8611fab8a0085a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/cholesky.jl", "max_forks_repo_name": "Red-Portal/IonLinearAlgebra.jl", "max_forks_repo_head_hexsha": "5073647c79abdc816630baa5cb8611fab8a0085a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7, "max_line_length": 71, "alphanum_fraction": 0.6868686869, "num_tokens": 103}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Credits: Grigorii Sukhorukov, Macha Nikolski
import numpy as np
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import ray
import os
import pathlib
import math
import random
from sklearn.utils import shuffle
import h5py
def reverse_complement(fragment):
"""
provides reverse complement to sequences
Input:
sequences - list with SeqRecord sequences in fasta format
Output:
complementary_sequences -
list with SeqRecord complementary sequences in fasta format
"""
# complementary_sequences = []
# for sequence in sequences:
# complementary_sequence = SeqRecord(
# seq=Seq(sequence.seq).reverse_complement(),
# id=sequence.id + "_reverse_complement",
# )
# complementary_sequences.append(complementary_sequence)
fragment = fragment[::-1].translate(str.maketrans('ACGT', 'TGCA'))
return fragment
def introduce_mutations(seqs, mut_rate, rs=None):
"""
Function that mutates sequences in the entering fasta file
A proportion of nucleotides are changed to other nucleotide
Not yet taking account of mutation for gaps
mut_rate - proportion from 0.0 to 1.0, float
"""
random.seed(a=rs)
assert 0.0 <= mut_rate <= 1.0
mutated_seqs = []
for seq in seqs:
mut_seq = list(str(seq.seq))
l_ = len(mut_seq)
mutated_sites_i = random.sample(range(l_), int(mut_rate * l_))
for mut_site_i in mutated_sites_i:
mut_site = mut_seq[mut_site_i]
mutations = ["A", "C", "T", "G"]
if mut_site in mutations:
mutations.remove(mut_site)
mut_seq[mut_site_i] = random.sample(mutations, 1)[0]
mutated_seq = SeqRecord(
seq=Seq("".join(mut_seq)),
id=seq.id + f"mut_{mut_rate}",
name="",
description="",
)
mutated_seqs.append(mutated_seq)
return mutated_seqs
def separate_by_length(length_, seq_list, fold=None,):
# TODO: add docs
included = []
to_process = []
excluded = 0
for seq_ in seq_list:
l_ = len(seq_.seq)
if l_ >= length_:
if fold is None:
included.append(seq_)
elif l_ < length_ * fold:
included.append(seq_)
else:
to_process.append(seq_)
else:
excluded += 1
print(f"A total of {excluded} sequences was excluded due to being smaller than {length_}")
return included, to_process
def chunks(lst, n):
"""Yield successive n-sized chunks from lst.
https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks"""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def correct(frag):
"""
leaves only unambiguous DNA code (ACTG-)
Input:
frag - string of nucleotides
Output:
pr_frag - corrected string of nucleotides
"""
pr_frag = frag.upper()
pr_frag_s = set(pr_frag)
if pr_frag_s != {"A", "C", "G", "T", "-"}:
for letter in pr_frag_s - {"A", "C", "G", "T", "-"}:
pr_frag = pr_frag.replace(letter, "-")
return pr_frag
def fragmenting(sequences, sl_wind_size, max_gap=0.05, sl_wind_step=None):
"""
slices sequences in fragments by sliding window
based on its size and step.
last fragment is padded by '-'
fragments have ambiguous bases replaced by '-'
fragments with many '-' are discarded
Input:
sequences - list with SeqRecord sequences in fasta format
max_gap - max allowed proportion of '-'
sl_wind_size - sliding window step
sl_wind_step - sliding window step, by default equals
sliding window size (None is replaced by it)
Output:
fragments - list with sequence fragments
"""
if sl_wind_step is None:
sl_wind_step = sl_wind_size
fragments = []
fragments_rc = []
out_sequences = []
for sequence in sequences:
seq = str(sequence.seq)
n_fragments = 1 + max(0, math.ceil((len(seq) - sl_wind_size)/sl_wind_step))
for n in range(n_fragments):
if n + 1 != n_fragments:
frag = seq[n * sl_wind_step: n * sl_wind_step + sl_wind_size]
elif n_fragments == 1:
# padding the shorter fragment to sl_wind_size
frag_short = seq[n * sl_wind_step: n * sl_wind_step + sl_wind_size]
frag = frag_short + (sl_wind_size - len(frag_short)) * "-"
else:
frag = seq[(len(seq) - sl_wind_size):]
# replace ambiguous characters
frag = correct(frag)
assert len(frag) == sl_wind_size, f"{len(frag)} vs {sl_wind_size}"
# skipping sequences with many gaps
if frag.count("-") / sl_wind_size <= max_gap:
fragments.append(frag)
# generating reverse complement
fragments_rc.append(reverse_complement(frag))
fr_seq = SeqRecord(
seq=Seq(frag),
id=f"{sequence.id}_{n*sl_wind_step}_{sl_wind_size}",
name="",
description="",
)
out_sequences.append(fr_seq)
return fragments, fragments_rc, out_sequences
def label_fasta_fragments(sequences, label):
"""
Provides labels to generated fragments stored in fasta
Input:
sequences - list with SeqRecord sequences
label - type of label (bacteria, virus, plant)
Output:
labeled_fragments - list with labeled SeqRecord sequences
"""
assert label in ["virus", "plant", "bacteria"]
labeled_fragments = []
for sequence in sequences:
sequence.id = sequence.id + f"_{label}"
labeled_fragments.append(sequence)
return labeled_fragments
@ray.remote(max_calls=1)
def one_hot_encode(fragments):
"""
produces one-hot matrices from fragments and labels
'-' is given all zeros
Input:
fragments - list with sequence fragments
label - type of label (int <= depth)
label_depth - number of possible labels
Output:
encoded_fragments - list with one-hot encoded fragments
labels - list with one-hot encoded labels
"""
import tensorflow as tf
encoded_fragments = []
map_dict = {"A": 0, "C": 1, "G": 2, "T": 3, "-": -1}
for frag in fragments:
frag_array = np.array(list(frag))
integer_encoded = np.int8(np.vectorize(map_dict.get)(frag_array))
one_hot_encoded = tf.one_hot(integer_encoded, depth=4, dtype=tf.int8).numpy()
encoded_fragments.append(one_hot_encoded)
encoded_fragments = np.stack(encoded_fragments)
return encoded_fragments
def prepare_labels(fragments, label, label_depth):
"""
produces one-hot labels
'-' is given all zeros
Input:
fragments - list with sequence fragments
label - type of label (int <= depth)
label_depth - number of possible labels
Output:
labels - list with one-hot encoded labels
"""
import tensorflow as tf
n_fragments = len(fragments)
labels = np.int8(np.full(n_fragments, label))
labels = tf.one_hot(labels, depth=label_depth).numpy()
return labels
# TODO: write docs for functions
def calculate_total_length(seq_path):
"""
Calculate total length of the sequences in the fasta file.
Needed for weighted sampling
Input:
seq_path - path to the file with sequences
Output:
seq_length - total length of all sequences in the file
"""
seqs = list(SeqIO.parse(seq_path, "fasta"))
seq_length = 0
for seq in seqs:
seq_length += len(seq.seq)
return seq_length
def prepare_seq_lists(in_paths, n_fragments, weights=None,):
"""
selects files with sequences based on extension
and calculates number of fragments to be sampled
Input:
in_paths - list of paths to folder with sequence files. Can be a string also a string
n_fragments - number of fragments to be sampled
weights - upsampling of fragments. fractions should sum to one
Output:
seqs_list - list with path to files with sequences
n_fragments_list - number of fragments to be sampled
lists are zipped to work with ray iterators
"""
# case when we recieve a single sequence file
if type(in_paths) is str and in_paths.endswith(('.fna', '.fasta')):
return [[in_paths, n_fragments]]
else:
# transform string to list
if type(in_paths) is str or type(in_paths) is pathlib.PosixPath:
in_paths = [in_paths]
if weights:
assert len(weights) == len(in_paths)
assert 1.01 > round(sum(weights), 2) > 0.99
else:
l_ = len(in_paths)
weights = [1/l_] * l_
n_fragments_list_all = []
seqs_list_all = []
for in_paths, w_ in zip(in_paths, weights):
seqs_list = []
seq_length_list = []
total_length = 0
for file in os.listdir(in_paths):
if file.endswith("fna") or file.endswith("fasta"):
seq_path = (os.path.join(in_paths, file))
seqs_length = calculate_total_length(seq_path)
seqs_list.append(seq_path)
seq_length_list.append(seqs_length)
total_length += seqs_length
# + 1 may lead to a slightly bigger number than desired
n_fragments_list = [((seq_length / total_length) * n_fragments * w_ + 1) for seq_length in seq_length_list]
n_fragments_list_all.extend(n_fragments_list)
seqs_list_all.extend(seqs_list)
print("list calculation done")
return list(zip(seqs_list_all, n_fragments_list_all))
@ray.remote(max_calls=1)
def sample_fragments(seq_container, length, random_seed=1, limit=None, max_gap=0.05, sl_wind_step=None):
"""
Randomly samples fragments from sequences in the list.
Is a bit cumbersome written to work with ray.
Input:
seq_container - list with each entry containing path to sequence,
and n samples from this sequence.
length - desired length of sampled fragments
Output:
fragments - list with sequence fragments
"""
random.seed(a=random_seed)
total_fragments = []
total_fragments_rc = []
total_seqs = []
for entry in seq_container:
seq = list(SeqIO.parse(entry[0], "fasta"))
n_fragments = entry[1]
seqs = []
fragments = []
fragments_rc = []
counter_1 = 0
counter_2 = 0
while counter_1 < n_fragments:
# select chromosomes if there are any
fragment_full = random.choice(seq)
r_end = len(fragment_full.seq) - length
try:
r_start = random.randrange(r_end)
fragment = SeqRecord(
seq=fragment_full.seq[r_start:(r_start + length)],
id=f"{fragment_full.id}_{length}_{r_start}",
name="",
description="",
)
temp_, temp_rc, _ = fragmenting([fragment], length, max_gap, sl_wind_step=sl_wind_step)
if temp_ and temp_rc:
seqs.append(fragment)
fragments.extend(temp_)
fragments_rc.extend(temp_rc)
counter_1 += 1
except ValueError:
# print(f"{fragment_full.id} has length {len(fragment_full.seq)} and is too short to be sampled")
pass
counter_2 += 1
if limit:
assert counter_2 <= limit * n_fragments, f"While cycle iterated more than {limit}, data is ambiguous." \
f" Only {len(fragments)} fragments were sampled out of {n_fragments}"
total_fragments.extend(fragments)
total_fragments_rc.extend(fragments_rc)
total_seqs.extend(seqs)
# print("sequence sampling done")
return [total_fragments, total_fragments_rc, total_seqs]
def prepare_ds_fragmenting(in_seq, label, label_int, fragment_length, sl_wind_step, max_gap=0.05, n_cpus=1):
if sl_wind_step is None:
sl_wind_step = int(fragment_length / 2)
ray.init(num_cpus=n_cpus, num_gpus=0, include_dashboard=False)
# generating viral fragments and labels
seqs = list(SeqIO.parse(in_seq, "fasta"))
frags, frags_rc, seqs_ = fragmenting(seqs, fragment_length, max_gap=max_gap, sl_wind_step=sl_wind_step)
it = chunks(frags, int(len(frags) / n_cpus + 1))
encoded = np.concatenate(ray.get([one_hot_encode.remote(s) for s in it]))
it = chunks(frags_rc, int(len(frags_rc) / n_cpus + 1))
encoded_rc = np.concatenate(ray.get([one_hot_encode.remote(s) for s in it]))
labs = prepare_labels(frags, label=label_int, label_depth=3)
seqs_ = label_fasta_fragments(seqs_, label=label)
# subsetting to unique fragments
u_encoded, indices = np.unique(encoded, axis=0, return_index=True)
u_encoded_rc = encoded_rc[indices]
u_labs = labs[indices]
u_seqs = [seqs_[i] for i in indices]
assert (np.shape(u_encoded)[0] == np.shape(u_encoded_rc)[0])
print(f"Encoding {label} sequences finished")
print(f"{np.shape(u_encoded)[0]} forward fragments generated")
n_frags = np.shape(u_encoded)[0]
ray.shutdown()
return u_encoded, u_encoded_rc, u_labs, u_seqs, n_frags
def prepare_ds_sampling(in_seqs, fragment_length, n_frags, label, label_int, random_seed, n_cpus=1, limit=100):
ray.init(num_cpus=n_cpus, num_gpus=0, include_dashboard=False)
# generating plant fragments and labels
seqs_list = prepare_seq_lists(in_seqs, n_frags)
it = chunks(seqs_list, int(len(seqs_list) / n_cpus + 1))
frs_ = ray.get(
[sample_fragments.remote(s, fragment_length, random_seed, limit=limit, max_gap=0.05) for s in it])
frags = sum([i[0] for i in frs_], [])
frags_rc = sum([i[1] for i in frs_], [])
seqs_ = sum([i[2] for i in frs_], [])
frags, frags_rc, seqs_ = shuffle(frags, frags_rc, seqs_, random_state=random_seed, n_samples=int(n_frags))
it = chunks(frags, int(len(frags) / n_cpus + 1))
encoded = np.concatenate(ray.get([one_hot_encode.remote(s) for s in it]))
it = chunks(frags_rc, int(len(frags_rc) / n_cpus + 1))
encoded_rc = np.concatenate(ray.get([one_hot_encode.remote(s) for s in it]))
labs = prepare_labels(frags, label=label_int, label_depth=3)
seqs_ = label_fasta_fragments(seqs_, label=label)
assert (np.shape(encoded)[0] == np.shape(encoded_rc)[0])
print(f"Encoding {label} sequences finished")
print(f"{np.shape(encoded)[0]} forward fragments generated")
ray.shutdown()
return encoded, encoded_rc, labs, seqs_, n_frags
def storing_encoded(encoded, encoded_rc, labs, out_path, ):
f = h5py.File(out_path, "w")
f.create_dataset("fragments", data=encoded)
f.create_dataset("fragments_rc", data=encoded_rc)
f.create_dataset("labels", data=labs)
f.close()
print(f"encoded fragments and labels stored in {out_path}")
|
{"hexsha": "5d009255d377c8582b7308a51bb651cd40c8006d", "size": 15177, "ext": "py", "lang": "Python", "max_stars_repo_path": "virhunter/utils/preprocess.py", "max_stars_repo_name": "admincbib/virhunter", "max_stars_repo_head_hexsha": "cf7b9122eeaaee2947c0bf2504b9b57df6580261", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "virhunter/utils/preprocess.py", "max_issues_repo_name": "admincbib/virhunter", "max_issues_repo_head_hexsha": "cf7b9122eeaaee2947c0bf2504b9b57df6580261", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2022-02-07T13:24:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T13:59:37.000Z", "max_forks_repo_path": "virhunter/utils/preprocess.py", "max_forks_repo_name": "cbib/virhunter", "max_forks_repo_head_hexsha": "cf7b9122eeaaee2947c0bf2504b9b57df6580261", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9425, "max_line_length": 126, "alphanum_fraction": 0.6335244119, "include": true, "reason": "import numpy", "num_tokens": 3631}
|
#!/usr/bin/env python
import numpy as np
import argparse
import sys,math
def MLSQ(x,y):
n = len(x)
sx = np.sum(x)
sy = np.sum(y)
sxx = np.dot(x,x)
sxy = np.dot(x,y)
syy = np.dot(y,y)
denom = (n*sxx-sx*sx)
b = (n*sxy - sx*sy)/denom
a = (sy-b*sx)/n
estim = np.array([a,b],dtype=np.float)
sigma2 = syy + n*a*a + b*b*sxx + 2*a*b*sx - 2*a*sy - 2*b*sxy
cov = sigma2 / denom * np.array([[sxx,-sx],[-sx,n]],dtype=np.float)
return estim,cov
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i","--infiles",nargs="*")
parser.add_argument("-t","--timepoints",default=-1,type=int)
args = parser.parse_args()
data = list()
n = 0
for filename in args.infiles:
print "# load '{}'".format(filename)
try:
data.append(np.genfromtxt(filename))
n += 1
except:
continue
#raise IOError("could not open file")
if n > 0:
gmean = np.power(np.prod(data,axis=0),1./n)
else:
raise IOError("could not open any input file")
for i,conc in enumerate(gmean[:,0]):
if args.timepoints < 0:
tp = len(gmean[i,1:])
else:
tp = args.timepoints
time = np.arange(0,10*tp,10)/60.
celln = gmean[i,1:tp + 2]
mlsq_data = MLSQ(time,np.log(celln))
gr = mlsq_data[0][1]
grDev = np.sqrt(mlsq_data[1][1,1])
print '{:7.4f} {:7.4f} {:7.4f}'.format(conc,gr,grDev)
if __name__ == "__main__":
main()
|
{"hexsha": "1556cd1ee0eccfde2d79228ae6a9533c1a8a7882", "size": 1582, "ext": "py", "lang": "Python", "max_stars_repo_path": "killcurves_GrowthDeathRate.py", "max_stars_repo_name": "lukasgeyrhofer/antibiotics", "max_stars_repo_head_hexsha": "3fc81fa4006e56a65a9596ba79ad4b1a0287f1d1", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "killcurves_GrowthDeathRate.py", "max_issues_repo_name": "lukasgeyrhofer/antibiotics", "max_issues_repo_head_hexsha": "3fc81fa4006e56a65a9596ba79ad4b1a0287f1d1", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "killcurves_GrowthDeathRate.py", "max_forks_repo_name": "lukasgeyrhofer/antibiotics", "max_forks_repo_head_hexsha": "3fc81fa4006e56a65a9596ba79ad4b1a0287f1d1", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.71875, "max_line_length": 74, "alphanum_fraction": 0.5221238938, "include": true, "reason": "import numpy", "num_tokens": 502}
|
classdef ShapeCircle < ShapeGeneral
%ShapeCircle represents a circular geographical selection of events
%
% see also ShapeGeneral, ShapePolygon
properties (SetObservable = true, AbortSet=true)
Radius (1,1) double = 5 % active radius in units defined by the RefEllipsoid
end
methods
function obj=ShapeCircle(varargin)
% SHAPECIRCLE create a circular shape
%
% ShapeCircle() :
% ShapeCircle('dlg') create via a dialg box
%
% CIRCLE: select using circle with a defined radius. define with 2 clicks or mouseover and press "R"
% UNASSIGNED: clear shape
obj@ShapeGeneral();
if ~isempty(varargin)
dosomething
end
report_this_filefun();
%axes(findobj(gcf,'Tag','mainmap_ax')); % should be the map, with lon/lat
obj.Type='circle';
try
ra=ShapeGeneral.ShapeStash.Radius;
catch
ra=obj.Radius;
end
obj.AllowVertexEditing = false;
addlistener(obj, 'Radius', 'PostSet', @obj.notifyShapeChange);
if numel(varargin)==0
do_nothing;
elseif strcmpi(varargin{1},'dlg')
stashedshape = ShapeGeneral.ShapeStash;
sdlg.prompt = ['Choose Radius [',obj.RefEllipsoid.LengthUnit,']:'];
sdlg.value = ra;
sdlg(2).prompt = 'Center X :'; sdlg(2).value=stashedshape.X0;
sdlg(3).prompt = 'Center Y :'; sdlg(3).value=stashedshape.Y0;
[~,cancelled,obj.Radius,obj.Points(1),obj.Points(2)]=smart_inputdlg('Define Circle',sdlg);
if cancelled
beep
disp('Circle creation cancelled by user')
return
end
else
oo=ShapeCircle.selectUsingMouse(gca, obj.RefEllipsoid);
if ~isempty(oo)
obj=oo;
else
return
end
end
end
function val=Outline(obj, col)
if iscartesian(obj.RefEllipsoid)
pts = exp(1i*pi*linspace(0,2*pi,3600)') .* obj.Radius;
x = real(pts)+ obj.X0;
y = imag(pts) + obj.Y0;
val = [x,y];
else
[lat,lon]=reckon(obj.Y0,obj.X0,obj.Radius,(0:.1:360)',obj.RefEllipsoid);
val=[lon, lat];
end
if exist('col','var')
val=val(:,col);
end
end
function moveTo(obj, x, y)
if isnan(obj.Points)
obj.Points=[0 0];
end
moveTo@ShapeGeneral(obj,x,y)
end
function s=toStruct(obj)
s=toStruct@ShapeGeneral(obj);
s.RadiusKm = obj.Radius;
end
function s = toStr(obj)
cardinalDirs='SNWE';
isN=obj.Y0>=0; NS=cardinalDirs(isN+1);
isE=obj.X0>=0; EW=cardinalDirs(isE+3);
s = sprintf('Circle with R:%s %s, centered at ( %s %s, %s %s)',...
num2str(obj.Radius),...
obj.RefEllipsoid.LengthUnit,...
num2str(abs(obj.Y0)), NS,...
num2str(abs(obj.X0)), EW);
end
function summary(obj)
helpdlg(obj.toStr,'Circle');
end
function add_shape_specific_context(obj,c)
uimenu(c,'label','Choose Radius','MenuSelectedFcn',@chooseRadius)
uimenu(c,'label','Snap To N Events','MenuSelectedFcn',@snapToEvents)
function snapToEvents(~,~)
ZG=ZmapGlobal.Data;
nc=inputdlg('Number of events to enclose','Edit Circle',1,{num2str(ZG.ni)});
nc=round(str2double(nc{1}));
if ~isempty(nc) && ~isnan(nc)
ZG.ni=nc;
zmw=get(ancestor(c,'figure'),'UserData');
if isa(zmw,'ZmapWindow')
ca=zmw.rawcatalog;
else
ca=ZG.primeCatalog;
end
[~,obj.Radius]=ca.selectClosestEvents(obj.Y0, obj.X0, [],nc,'DistanceOnly');
obj.Radius=obj.Radius;%+0.005;
end
end
function chooseRadius(~,~)
radiusInputText = ['Choose Radius [',obj.RefEllipsoid.LengthUnit,']'];
nc=inputdlg(radiusInputText,'Edit Circle',1,{num2str(obj.Radius)});
nc=str2double(nc{1});
if ~isempty(nc) && ~isnan(nc)
obj.Radius=nc;
end
end
end
function [mask]=isinterior(obj,otherX, otherY, include_boundary)
% isinterior true if value is within this circle's radius of center. Radius inclusive.
%
% overridden because using polygon approximation is too inaccurate for circles
%
% [mask]=obj.isinterior(otherX, otherY)
if ~exist('include_boundary','var')
include_boundary = true;
end
if isempty(obj.Points)||isnan(obj.Points(1))
mask = ones(size(otherX));
else
otherX(ismissing(otherY))= missing;
otherY(ismissing(otherX))= missing;
% return a vector of size otherX that is true where item is inside polygon
if iscartesian(obj.RefEllipsoid)
dists = sqrt((otherY-obj.Y0).^2 + (otherX-obj.X0).^2);
else
dists = distance(obj.Y0, obj.X0, otherY, otherX, obj.RefEllipsoid);
end
if ~include_boundary
mask = dists < obj.Radius;
else
mask = dists <= obj.Radius;
end
end
end
function finishedMoving(obj, movedObject, deltas)
centerX = mean(bounds2(movedObject.XData));
centerY = mean(bounds2(movedObject.YData));
obj.Radius=obj.Radius.* abs(deltas(3)); % NO NEGATIVE RADII
obj.Points=[centerX,centerY];
end
function save(obj, filelocation, delimiter)
persistent savepath
if ~exist('filelocation','var') || isempty(filelocation)
if isempty(savepath)
savepath = ZmapGlobal.Data.Directories.data;
end
[filename,pathname,filteridx]=uiputfile(...
{'*.mat','MAT-files (*.mat)';...
'*.csv;*.txt;*.dat','ASCII files (*.csv, *.txt, *.dat)'},...
'Save Circle',...
fullfile(savepath,'zmap_shape.mat'));
if filteridx==0
msg.dbdisp('user cancelled shape save');
return
end
filelocation=fullfile(pathname,filename);
end
[savepath,~,ext] = fileparts(filelocation);
if ext==".mat"
zmap_shape = obj; %#ok<NASGU>
save(filelocation,'zmap_shape');
else
if ~exist('delimiter','var'), delimiter = ',';end
radiusName = ['Radius[',shortenLengthUnit(obj.RefEllipsoid.LengthUnit),']'];
tb=table(obj.X0, obj.Y0,obj.Radius,'VariableNames',{'Latitude','Longitude',radiusName});
writetable(tb,filelocation,'Delimiter',delimiter);
end
end
end
methods(Static)
function obj=selectUsingMouse(ax, ref_ellipsoid)
[ss,ok] = selectSegmentUsingMouse(ax,'r', @circ_update);
delete(findobj(gca,'Tag','tmp_circle_outline'));
if ~ok
obj=[];
return
end
obj=ShapeCircle();
obj.Points=ss.xy1;
obj.Radius=ss.dist;
function circ_update(stxy, ~, d)
h=findobj(gca,'Tag','tmp_circle_outline');
if isempty(h)
h=line(nan,nan,'Color','r','DisplayName','Rough Outline','LineWidth',2,'Tag','tmp_circle_outline');
end
if iscartesian(ref_ellipsoid)
pts = exp(1i*pi*linspace(0,2*pi,120)') .* d;
h.XData = real(pts)+ stxy(1);
h.YData = imag(pts) + stxy(2);
else
[lat,lon]=reckon(stxy(2),stxy(1),d,(0:3:360)',ref_ellipsoid);
h.XData=lon;
h.YData=lat;
end
end
end
end
end
|
{"author": "CelsoReyes", "repo": "zmap7", "sha": "3895fcb3ca3073608abe22ca71960eb082fd0d9a", "save_path": "github-repos/MATLAB/CelsoReyes-zmap7", "path": "github-repos/MATLAB/CelsoReyes-zmap7/zmap7-3895fcb3ca3073608abe22ca71960eb082fd0d9a/src/cgr_utils/selections/ShapeCircle.m"}
|
import numpy
N=int(input())
A=list(map(int,input().split()))
print(*numpy.argsort(A)+1)
|
{"hexsha": "ff43bf9d28f8d6bcfa5e391f0a2397998d541711", "size": 87, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/abc142_c_03.py", "max_stars_repo_name": "KoyanagiHitoshi/AtCoder", "max_stars_repo_head_hexsha": "731892543769b5df15254e1f32b756190378d292", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-08-16T16:55:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-11T10:21:40.000Z", "max_issues_repo_path": "code/abc142_c_03.py", "max_issues_repo_name": "KoyanagiHitoshi/AtCoder", "max_issues_repo_head_hexsha": "731892543769b5df15254e1f32b756190378d292", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/abc142_c_03.py", "max_forks_repo_name": "KoyanagiHitoshi/AtCoder", "max_forks_repo_head_hexsha": "731892543769b5df15254e1f32b756190378d292", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.75, "max_line_length": 32, "alphanum_fraction": 0.6896551724, "include": true, "reason": "import numpy", "num_tokens": 23}
|
#!/usr/bin/env python3
__author__ = "Niklas Kroeger"
__email__ = "niklas@kroeger.dev"
__status__ = "Development"
import numpy as np
import tables
class ImageStack(object):
"""
Image stack class that stores data in a pytables table (hdf5 file format)
"""
def __init__(self, filename, dummy_img=None, metadata_cls=None, mode='r'):
"""
Create a new ImageStack based on PyTables EArray
This class opens the hdf5 file given by filename. If an img is given,
an existing file is overwritten with a new file. The ImageStack is
saved in a PyTables EArray, which can be indexed the same way as numpy
arrays. The shape of the ImageStack is (N, *img.shape),
where N is the number of images in the stack.
Parameters
----------
filename : str
path to the h5 file the image stack should be saved in or from
which the stack should be loaded
dummy_img : numpy.ndarray or None
If None the file under filename is opened. If data should be saved
instead, the img should be an example image having the same shape
and dtype as the images that should be stored. This template image
is NOT stored. Images that should be saved have to be added with
self.addImage(img)
metadata_cls : dict
A dict containing keys under which the desired metadata should be
saved. The corresponding values should describe the columns datatype
that should be stored. The datatype definition should be an instance
of a pytables Col object. This can be used to define a new table
layout (names and datatypes for columns) in order to save some
metadata connected to the recorded images. This can be something
like the exposure time of the recorded image, some light intensity
value or a timestamp at which the image was recorded. This allows
keeping the image and information about the acquisition together in
a single file.
Links for further reading:
- https://www.pytables.org/usersguide/introduction.html
- https://www.pytables.org/usersguide/datatypes.html
mode : str
Mode with which filename is opened. Defaults to 'r' for read
access to the given file. If img is not None, the mode is instead
set to 'w' for write access. This overwrites a possibly existing
file with the same filename! If new images should be added to an
existing ImageStack, 'a' can be tried. This also creates a new
file if it does not yet exist
"""
self.filename = filename
self.mode = mode
if dummy_img is not None:
# create new image stack. This overwrites existing files!
self.mode = 'w'
self.atom = tables.Atom.from_dtype(dummy_img.dtype)
self._h5file = tables.open_file(filename=self.filename,
mode=self.mode)
if dummy_img is not None:
# create new image stack in given file
self.data = self._h5file.create_earray(self._h5file.root,
'img_stack',
self.atom,
(0,) + dummy_img.shape)
if metadata_cls is not None:
self.metadata = self._h5file.create_table(self._h5file.root,
'metadata',
metadata_cls)
else:
self.metadata = None
else:
# load the images from the given file
self.data = self._h5file.root.img_stack
try:
self.metadata = self._h5file.root.metadata
except tables.exceptions.NoSuchNodeError as e:
self.metadata = None
@property
def shape(self):
return self.data.shape
def add_image(self, img, metadata=None):
"""
Add a new image to the stack
Parameters
----------
img : numpy.ndarray
Image that should be added to the stack. Has to have the same
shape and dtype as the template image that was passed during
initialization of the ImageStack instance
metadata : dict
The metadata that should be saved alongside the image data. Note
that the data type of the values passed here has to match that given
at construction time of the ImageStack!
"""
self.data.append(np.expand_dims(img, 0))
if metadata is not None:
row = self._get_metadata_row()
for (key, value) in metadata.items():
row[key] = value
row.append()
def has_metadata(self):
"""
Quick way to check if this image stack makes use of the metadata
feature.
Returns
-------
bool
True if there is metadata in this ImageStack. False if there is no
metadata
"""
return isinstance(self.metadata, tables.table.Table)
def _get_metadata_row(self):
"""
Get the current row of the metadata table. This is necessary to add a
metadata entry.
The variable returned by this method is the one you should use to append
new metadata entries. This can be done by simply indexing it like a dict
with the keys corresponding to the fields that were defined in the
metadata_cls during initialization of this ImageStack instance. After
filling all fields with the desired data remember to call row.append()
to actually add the data to the table.
Returns
-------
tables.tableextension.Row
The metadata_table row.
"""
if self.has_metadata():
return self.metadata.row
else:
return None
def __iter__(self):
return self.data.__iter__()
def __getitem__(self, item):
return self.data.__getitem__(item)
def __del__(self):
if self.has_metadata() and self.metadata._v_isopen:
self.metadata.flush()
self._h5file.close()
if __name__ == '__main__':
# Example usage:
# Creating a new stack to save images:
img_shape = (100, 200) # the shape of our images is (100, 200)
dummy_img = np.random.rand(*img_shape)
# Optional: Definition of the metadata that should be saved along the images
# Note that the data type for each field has to be defined here!
metadata = {'exp_time': tables.Int32Col(),
'some_string': tables.StringCol(itemsize=20)}
# Create a new image stack to save some files. Note that the input img
# for the constructor is not saved! See Docstring for more complete
# explanation
stack_write = ImageStack(filename='test_stack.h5', # h5-file for the stack
dummy_img=dummy_img, # sample image that defines our image shape
metadata_cls=metadata, # Optional metadata definition
)
# Now we can append new images to this stack.
for i in range(10):
# we can now overwrite the initial metadata dict with actual data
metadata['exp_time'] = i
metadata['some_string'] = str(i)*(i+1)
stack_write.add_image(np.random.rand(*img_shape), # random image
metadata, # and some pseudo metadata (optional)
)
# the images are automatically saved. To close the file simply destroy the
# stack instance
del stack_write
print('-'*50)
# Reading an existing image stack:
stack_read = ImageStack(filename='test_stack.h5')
# Note image shape. First value is number of images (see Docstring for details)
print(stack_read.shape)
# to iterate over all images:
for i, img in enumerate(stack_read):
print(img.sum())
if stack_read.has_metadata():
print(stack_read.metadata[i])
|
{"hexsha": "b067b01e6da253da82711b0875eefa216c4d4266", "size": 8276, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyImageStack/pyImageStack.py", "max_stars_repo_name": "NiklasKroeger/pyImageStack", "max_stars_repo_head_hexsha": "84bdb951ca5d66241796c174fd12473b459041ef", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyImageStack/pyImageStack.py", "max_issues_repo_name": "NiklasKroeger/pyImageStack", "max_issues_repo_head_hexsha": "84bdb951ca5d66241796c174fd12473b459041ef", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyImageStack/pyImageStack.py", "max_forks_repo_name": "NiklasKroeger/pyImageStack", "max_forks_repo_head_hexsha": "84bdb951ca5d66241796c174fd12473b459041ef", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1747572816, "max_line_length": 94, "alphanum_fraction": 0.6004108265, "include": true, "reason": "import numpy", "num_tokens": 1679}
|
function h = supportFunction(polygon, varargin)
%SUPPORTFUNCTION Compute support function of a polygon.
%
% H = supportFunction(POLYGON, N)
% uses N points for suport function approximation
%
% H = supportFunction(POLYGON)
% assume 24 points for approximation
%
% H = supportFunction(POLYGON, V)
% where V is a vector, uses vector V of angles to compute support
% function.
%
% See also
% polygons2d, convexification
% ------
% Author: David Legland
% E-mail: david.legland@inrae.fr
% Created: 2004-12-20
% Copyright 2004-2022 INRA - TPV URPOI - BIA IMASTE
N = 24;
u = 0:2*pi/N:2*pi*(1-1/N);
if length(varargin)==1
var = varargin{1};
if length(var)==1
N = var;
u = 0:2*pi/N:2*pi*(1-1/N);
else
u = var;
end
end
% ensure u vertical vector
if size(u, 1)==1
u=u';
end
h = zeros(size(u));
for i=1:length(u)
v = repmat([cos(u(i)) sin(u(i))], [size(polygon, 1), 1]);
h(i) = max(dot(polygon, v, 2));
end
|
{"author": "mattools", "repo": "matGeom", "sha": "1fd2c937064be1ee1f4fd09fbfdf96145ebe5271", "save_path": "github-repos/MATLAB/mattools-matGeom", "path": "github-repos/MATLAB/mattools-matGeom/matGeom-1fd2c937064be1ee1f4fd09fbfdf96145ebe5271/matGeom/polygons2d/supportFunction.m"}
|
import tensorflow as tf
from PIL import Image
import numpy as np
import cv2
def grad_cam_tf(model, im, cls_select, tf_sess, layer, alpha = 0.6, preproc_function = None, reverse_function = None):
image = im.copy()
if len(image) != 4:
# Make image batch-like
image = image[np.newaxis, :,:,:]
if reverse_function is not None:
image_original = reverse_function(image[0])
else:
image_original = image[0]
if preproc_function is not None:
# this preprocessing function generally apply to 4-D array
# TO DO: make it tolerate to 3-D input
image = preproc_function(image.astype('float32'))
H, W = image.shape[1], image.shape[2]
y_c = model.model_ops['output']['prediction1'][0, cls_select]
conv_output = model.sess.graph.get_tensor_by_name(layer)
grads = tf.gradients(y_c, conv_output)
output, grads_val = tf_sess.run([conv_output, grads],
feed_dict = {model.model_ops['input'][0]: image,
model.model_ops['is_training']: False,
tf.keras.backend.learning_phase(): False})
#return output, grads_val
output, grads_val = output[0, :], grads_val[0][0, :,:,:]
weights = np.mean(grads_val, axis = (0, 1))
cam = np.dot(output, weights)
## resize it ##
cam = tf.image.resize_bicubic(images=cam[np.newaxis, : , :, np.newaxis],
size=(H, W))
cam = cam[0, :, :, 0]
cam = tf.maximum(cam, 0)
cam = cam / tf.reduce_max(cam)
cam = tf_sess.run(cam)
# apply color map
mapping = cv2.applyColorMap(np.uint8(255 * (1-cam) ), cv2.COLORMAP_JET)
#mapping = cv2.GaussianBlur(mapping, (3,3), 1)
mapping = np.concatenate((mapping, ((mapping.max(axis=-1) - 128 )*255*alpha)[:,:,np.newaxis]), axis = -1)
# foreground - background
background = Image.fromarray(image_original)
foreground = Image.fromarray(mapping.astype('uint8'))
background.paste(foreground, (0, 0), foreground)
return cam, background
|
{"hexsha": "127ef9af9ee4f60e748e0286f53e113a59de8eae", "size": 2148, "ext": "py", "lang": "Python", "max_stars_repo_path": "model_visualization.py", "max_stars_repo_name": "jimmy15923/Common_tools", "max_stars_repo_head_hexsha": "3e77dc1509ef8ac5173d41d792a170ba6ed98be0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-10-26T09:33:26.000Z", "max_stars_repo_stars_event_max_datetime": "2018-10-26T09:33:26.000Z", "max_issues_repo_path": "model_visualization.py", "max_issues_repo_name": "jimmy15923/Common_tools", "max_issues_repo_head_hexsha": "3e77dc1509ef8ac5173d41d792a170ba6ed98be0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "model_visualization.py", "max_forks_repo_name": "jimmy15923/Common_tools", "max_forks_repo_head_hexsha": "3e77dc1509ef8ac5173d41d792a170ba6ed98be0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.0545454545, "max_line_length": 118, "alphanum_fraction": 0.5926443203, "include": true, "reason": "import numpy", "num_tokens": 540}
|
// This file is part of libigl, a simple c++ geometry processing library.
//
// Copyright (C) 2017 Amir Vaxman <avaxman@gmail.com>
//
// This Source Code Form is subject to the terms of the Mozilla Public License
// v. 2.0. If a copy of the MPL was not distributed with this file, You can
// obtain one at http://mozilla.org/MPL/2.0/.
#include <igl/shapeup.h>
#include <igl/min_quad_with_fixed.h>
#include <igl/igl_inline.h>
#include <igl/setdiff.h>
#include <igl/cat.h>
#include <igl/PI.h>
#include <Eigen/Core>
#include <vector>
namespace igl
{
//This projection does nothing but render points into projP. Mostly used for "echoing" the global step
IGL_INLINE bool shapeup_identity_projection(const Eigen::PlainObjectBase<Eigen::MatrixXd>& P, const Eigen::PlainObjectBase<Eigen::VectorXi>& SC, const Eigen::PlainObjectBase<Eigen::MatrixXi>& S, Eigen::PlainObjectBase<Eigen::MatrixXd>& projP){
projP.conservativeResize(SC.rows(), 3*SC.maxCoeff());
for (int i=0;i<S.rows();i++){
Eigen::RowVector3d avgCurrP=Eigen::RowVector3d::Zero();
for (int j=0;j<SC(i);j++)
avgCurrP+=P.row(S(i,j))/(double)(SC(i));
for (int j=0;j<SC(i);j++)
projP.block(i,3*j,1,3)=P.row(S(i,j))-avgCurrP;
}
return true;
}
//the projection assumes that the sets are vertices of polygons in order
IGL_INLINE bool shapeup_regular_face_projection(const Eigen::PlainObjectBase<Eigen::MatrixXd>& P, const Eigen::PlainObjectBase<Eigen::VectorXi>& SC, const Eigen::PlainObjectBase<Eigen::MatrixXi>& S, Eigen::PlainObjectBase<Eigen::MatrixXd>& projP){
projP.conservativeResize(SC.rows(), 3*SC.maxCoeff());
for (int currRow=0;currRow<SC.rows();currRow++){
//computing average
int N=SC(currRow);
const Eigen::RowVectorXi SRow=S.row(currRow);
Eigen::RowVector3d avgCurrP=Eigen::RowVector3d::Zero();
Eigen::MatrixXd targetPolygon(N, 3);
Eigen::MatrixXd sourcePolygon(N, 3);
for (int j=0;j<N;j++)
avgCurrP+=P.row(SRow(j))/(double)(N);
for (int j=0;j<N;j++)
targetPolygon.row(j)=P.row(SRow(j))-avgCurrP;
//creating perfectly regular source polygon
for (int j=0;j<N;j++)
sourcePolygon.row(j)<<cos(2*igl::PI*(double)j/(double(N))), sin(2*igl::PI*(double)j/(double(N))),0.0;
//finding closest similarity transformation between source and target
Eigen::MatrixXd corrMat=sourcePolygon.transpose()*targetPolygon;
Eigen::JacobiSVD<Eigen::Matrix3d> svd(corrMat, Eigen::ComputeFullU | Eigen::ComputeFullV);
Eigen::MatrixXd R=svd.matrixU()*svd.matrixV().transpose();
//getting scale by edge length change average. TODO: by singular values
Eigen::VectorXd sourceEdgeLengths(N);
Eigen::VectorXd targetEdgeLengths(N);
for (int j=0;j<N;j++){
sourceEdgeLengths(j)=(sourcePolygon.row((j+1)%N)-sourcePolygon.row(j)).norm();
targetEdgeLengths(j)=(targetPolygon.row((j+1)%N)-targetPolygon.row(j)).norm();
}
double scale=(targetEdgeLengths.cwiseQuotient(sourceEdgeLengths)).mean();
for (int j=0;j<N;j++)
projP.block(currRow,3*j,1,3)=sourcePolygon.row(j)*R*scale;
}
return true;
}
template <
typename DerivedP,
typename DerivedSC,
typename DerivedS,
typename Derivedw>
IGL_INLINE bool shapeup_precomputation(const Eigen::PlainObjectBase<DerivedP>& P,
const Eigen::PlainObjectBase<DerivedSC>& SC,
const Eigen::PlainObjectBase<DerivedS>& S,
const Eigen::PlainObjectBase<DerivedS>& E,
const Eigen::PlainObjectBase<DerivedSC>& b,
const Eigen::PlainObjectBase<Derivedw>& wShape,
const Eigen::PlainObjectBase<Derivedw>& wSmooth,
ShapeupData & sudata)
{
using namespace std;
using namespace Eigen;
sudata.P=P;
sudata.SC=SC;
sudata.S=S;
sudata.b=b;
typedef typename DerivedP::Scalar Scalar;
//checking for consistency of the input
assert(SC.rows()==S.rows());
assert(SC.rows()==wShape.rows());
assert(E.rows()==wSmooth.rows());
assert(b.rows()!=0); //would lead to matrix becoming SPD
sudata.DShape.conservativeResize(SC.sum(), P.rows()); //Shape matrix (integration);
sudata.DClose.conservativeResize(b.rows(), P.rows()); //Closeness matrix for positional constraints
sudata.DSmooth.conservativeResize(E.rows(), P.rows()); //smoothness matrix
//Building shape matrix
std::vector<Triplet<Scalar> > DShapeTriplets;
int currRow=0;
for (int i=0;i<S.rows();i++){
Scalar avgCoeff=1.0/(Scalar)SC(i);
for (int j=0;j<SC(i);j++){
for (int k=0;k<SC(i);k++){
if (j==k)
DShapeTriplets.push_back(Triplet<Scalar>(currRow+j, S(i,k), (1.0-avgCoeff)));
else
DShapeTriplets.push_back(Triplet<Scalar>(currRow+j, S(i,k), (-avgCoeff)));
}
}
currRow+=SC(i);
}
sudata.DShape.setFromTriplets(DShapeTriplets.begin(), DShapeTriplets.end());
//Building closeness matrix
std::vector<Triplet<Scalar> > DCloseTriplets;
for (int i=0;i<b.size();i++)
DCloseTriplets.push_back(Triplet<Scalar>(i,b(i), 1.0));
sudata.DClose.setFromTriplets(DCloseTriplets.begin(), DCloseTriplets.end());
//Building smoothness matrix
std::vector<Triplet<Scalar> > DSmoothTriplets;
for (int i=0; i<E.rows(); i++) {
DSmoothTriplets.push_back(Triplet<Scalar>(i, E(i, 0), -1));
DSmoothTriplets.push_back(Triplet<Scalar>(i, E(i, 1), 1));
}
SparseMatrix<Scalar> tempMat;
igl::cat(1, sudata.DShape, sudata.DClose, tempMat);
igl::cat(1, tempMat, sudata.DSmooth, sudata.A);
//weight matrix
vector<Triplet<Scalar> > WTriplets;
//one weight per set in S.
currRow=0;
for (int i=0;i<SC.rows();i++){
for (int j=0;j<SC(i);j++)
WTriplets.push_back(Triplet<double>(currRow+j,currRow+j,sudata.shapeCoeff*wShape(i)));
currRow+=SC(i);
}
for (int i=0;i<b.size();i++)
WTriplets.push_back(Triplet<double>(SC.sum()+i, SC.sum()+i, sudata.closeCoeff));
for (int i=0;i<E.rows();i++)
WTriplets.push_back(Triplet<double>(SC.sum()+b.size()+i, SC.sum()+b.size()+i, sudata.smoothCoeff*wSmooth(i)));
sudata.W.conservativeResize(SC.sum()+b.size()+E.rows(), SC.sum()+b.size()+E.rows());
sudata.W.setFromTriplets(WTriplets.begin(), WTriplets.end());
sudata.At=sudata.A.transpose(); //for efficieny, as we use the transpose a lot in the iteration
sudata.Q=sudata.At*sudata.W*sudata.A;
return min_quad_with_fixed_precompute(sudata.Q,VectorXi(),SparseMatrix<double>(),true,sudata.solver_data);
}
template <
typename DerivedP,
typename DerivedSC,
typename DerivedS>
IGL_INLINE bool shapeup_solve(const Eigen::PlainObjectBase<DerivedP>& bc,
const std::function<bool(const Eigen::PlainObjectBase<DerivedP>&, const Eigen::PlainObjectBase<DerivedSC>&, const Eigen::PlainObjectBase<DerivedS>&, Eigen::PlainObjectBase<DerivedP>&)>& local_projection,
const Eigen::PlainObjectBase<DerivedP>& P0,
const ShapeupData & sudata,
const bool quietIterations,
Eigen::PlainObjectBase<DerivedP>& P)
{
using namespace Eigen;
using namespace std;
MatrixXd currP=P0;
MatrixXd prevP=P0;
MatrixXd projP;
assert(bc.rows()==sudata.b.rows());
MatrixXd rhs(sudata.A.rows(), 3); rhs.setZero();
rhs.block(sudata.DShape.rows(), 0, sudata.b.rows(),3)=bc; //this stays constant throughout the iterations
if (!quietIterations){
cout<<"Shapeup Iterations, "<<sudata.DShape.rows()<<" constraints, solution size "<<P0.size()<<endl;
cout<<"**********************************************************************************************"<<endl;
}
projP.conservativeResize(sudata.SC.rows(), 3*sudata.SC.maxCoeff());
for (int iter=0;iter<sudata.maxIterations;iter++){
local_projection(currP, sudata.SC,sudata.S,projP);
//constructing the projection part of the (DShape rows of the) right hand side
int currRow=0;
for (int i=0;i<sudata.S.rows();i++)
for (int j=0;j<sudata.SC(i);j++)
rhs.row(currRow++)=projP.block(i, 3*j, 1,3);
DerivedP lsrhs=-sudata.At*sudata.W*rhs;
MatrixXd Y(0,3), Beq(0,3); //We do not use the min_quad_solver fixed variables mechanism; they are treated with the closeness energy of ShapeUp.
min_quad_with_fixed_solve(sudata.solver_data, lsrhs,Y,Beq,currP);
double currChange=(currP-prevP).lpNorm<Infinity>();
if (!quietIterations)
cout << "Iteration "<<iter<<", integration Linf error: "<<currChange<< endl;
prevP=currP;
if (currChange<sudata.pTolerance){
P=currP;
return true;
}
}
P=currP;
return false; //we went over maxIterations
}
}
#ifdef IGL_STATIC_LIBRARY
template bool igl::shapeup_precomputation< typename Eigen::Matrix<double, -1, -1, 0, -1, -1>, typename Eigen::Matrix<int, -1, 1, 0, -1, 1>, typename Eigen::Matrix<int, -1, -1, 0, -1, -1>, typename Eigen::Matrix<double, -1, 1, 0, -1, 1> >(Eigen::PlainObjectBase<Eigen::Matrix<double, -1, -1, 0, -1, -1> > const&, Eigen::PlainObjectBase<Eigen::Matrix<int, -1, 1, 0, -1, 1> > const&, Eigen::PlainObjectBase<Eigen::Matrix<int, -1, -1, 0, -1, -1> > const&, Eigen::PlainObjectBase<Eigen::Matrix<int, -1, -1, 0, -1, -1> > const&, Eigen::PlainObjectBase<Eigen::Matrix<int, -1, 1, 0, -1, 1> > const&, Eigen::PlainObjectBase<Eigen::Matrix<double, -1, 1, 0, -1, 1> > const&, Eigen::PlainObjectBase<Eigen::Matrix<double, -1, 1, 0, -1, 1> > const&, igl::ShapeupData&);
template bool igl::shapeup_solve<typename Eigen::Matrix<double, -1, -1, 0, -1, -1>, typename Eigen::Matrix<int, -1, 1, 0, -1, 1>, typename Eigen::Matrix<int, -1, -1, 0, -1, -1> >(const Eigen::PlainObjectBase<Eigen::Matrix<double, -1, -1, 0, -1, -1> >& bc, const std::function<bool(const Eigen::PlainObjectBase<Eigen::Matrix<double, -1, -1, 0, -1, -1> >&, const Eigen::PlainObjectBase<Eigen::Matrix<int, -1, 1, 0, -1, 1> >&, const Eigen::PlainObjectBase<Eigen::Matrix<int, -1, -1, 0, -1, -1> >&, Eigen::PlainObjectBase<Eigen::Matrix<double, -1, -1, 0, -1, -1> >& ) >& local_projection, const Eigen::PlainObjectBase<Eigen::Matrix<double, -1, -1, 0, -1, -1> >& P0, const igl::ShapeupData & sudata, const bool quietIterations, Eigen::PlainObjectBase<Eigen::Matrix<double, -1, -1, 0, -1, -1> >& P);
#endif
|
{"hexsha": "f352d22bfd58bd3759bc0e944447c8caf6624764", "size": 11060, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "thirdparty/simpleuv/thirdparty/libigl/include/igl/shapeup.cpp", "max_stars_repo_name": "MelvinG24/dust3d", "max_stars_repo_head_hexsha": "c4936fd900a9a48220ebb811dfeaea0effbae3ee", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2392.0, "max_stars_repo_stars_event_min_datetime": "2016-12-17T14:14:12.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T19:40:40.000Z", "max_issues_repo_path": "thirdparty/simpleuv/thirdparty/libigl/include/igl/shapeup.cpp", "max_issues_repo_name": "MelvinG24/dust3d", "max_issues_repo_head_hexsha": "c4936fd900a9a48220ebb811dfeaea0effbae3ee", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 106.0, "max_issues_repo_issues_event_min_datetime": "2018-04-19T17:47:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-01T19:44:11.000Z", "max_forks_repo_path": "thirdparty/simpleuv/thirdparty/libigl/include/igl/shapeup.cpp", "max_forks_repo_name": "MelvinG24/dust3d", "max_forks_repo_head_hexsha": "c4936fd900a9a48220ebb811dfeaea0effbae3ee", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 184.0, "max_forks_repo_forks_event_min_datetime": "2017-11-15T09:55:37.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-21T16:30:46.000Z", "avg_line_length": 46.2761506276, "max_line_length": 794, "alphanum_fraction": 0.6118444846, "num_tokens": 3125}
|
"""
Model to approximate cross products of node voltages
```
wdcr[(i,j)] <= wdc[i]*wdc[j]
```
"""
function constraint_voltage_dc(pm::_PM.AbstractWRMModel, n::Int)
wdc = _PM.var(pm, n, :wdc)
wdcr = _PM.var(pm, n, :wdcr)
for (i,j) in _PM.ids(pm, n, :buspairsdc)
JuMP.@constraint(pm.model, [ wdc[i]/sqrt(2), wdc[j]/sqrt(2), wdcr[(i,j)]/sqrt(2), wdcr[(i,j)]/sqrt(2)] in JuMP.RotatedSecondOrderCone() )
end
end
"""
Limits dc branch current
```
p[f_idx] <= wdc[f_bus] * Imax
```
"""
function constraint_dc_branch_current(pm::_PM.AbstractWRMModel, n::Int, f_bus, f_idx, ccm_max, p)
p_dc_fr = _PM.var(pm, n, :p_dcgrid, f_idx)
wdc_fr = _PM.var(pm, n, :wdc, f_bus)
JuMP.@constraint(pm.model, p_dc_fr <= wdc_fr * ccm_max * p^2)
end
############## TNEP constraints ###################
"""
Model to approximate cross products of node voltages
```
wdcr[(i,j)] <= wdc[i]*wdc[j]
```
"""
function constraint_voltage_dc_ne(pm::_PM.AbstractWRMModel, n::Int)
wdc = _PM.var(pm, n, :wdc_ne)
wdc_frto = _PM.var(pm, n, :wdcr_ne)
wdc_du_frto = _PM.var(pm, n, :wdcr_du)
wdc_du_to = _PM.var(pm, n, :wdc_du_to)
wdc_du_fr = _PM.var(pm, n, :wdc_du_fr)
z = _PM.var(pm, n, :branchdc_ne)
for (l,i,j) in pm.ref[:it][:pm][:nw][n][:arcs_dcgrid_from_ne]
wdc_to = []
wdc_fr = []
wdc_to, wdc_fr = contraint_ohms_dc_branch_busvoltage_structure_W(pm, n, i, j, wdc_du_to, wdc_du_fr)
JuMP.@constraint(pm.model, [ wdc_du_to[l]/sqrt(2), wdc_du_fr[l]/sqrt(2), wdc_du_frto[l]/sqrt(2), wdc_du_frto[l]/sqrt(2)] in JuMP.RotatedSecondOrderCone() )
end
end
|
{"hexsha": "3e9ae700a32e59d45d693c6e630d270ba0bdde17", "size": 1622, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/formdcgrid/wrm.jl", "max_stars_repo_name": "hakanergun/PowerModelsACDC.jl", "max_stars_repo_head_hexsha": "8ef219296223306a53e976005bad9ab788cb0171", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 24, "max_stars_repo_stars_event_min_datetime": "2018-07-06T00:16:41.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-07T10:21:23.000Z", "max_issues_repo_path": "src/formdcgrid/wrm.jl", "max_issues_repo_name": "hakanergun/PowerModelsACDC.jl", "max_issues_repo_head_hexsha": "8ef219296223306a53e976005bad9ab788cb0171", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2018-06-21T01:49:40.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-17T11:31:31.000Z", "max_forks_repo_path": "src/formdcgrid/wrm.jl", "max_forks_repo_name": "hakanergun/PowerModelsACDC.jl", "max_forks_repo_head_hexsha": "8ef219296223306a53e976005bad9ab788cb0171", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 13, "max_forks_repo_forks_event_min_datetime": "2019-02-26T15:25:32.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-14T21:32:04.000Z", "avg_line_length": 30.037037037, "max_line_length": 163, "alphanum_fraction": 0.6220715166, "num_tokens": 599}
|
[STATEMENT]
lemma less_multiset\<^sub>H\<^sub>O:
"M < N \<longleftrightarrow> M \<noteq> N \<and> (\<forall>y. count N y < count M y \<longrightarrow> (\<exists>x>y. count M x < count N x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (M < N) = (M \<noteq> N \<and> (\<forall>y. count N y < count M y \<longrightarrow> (\<exists>x>y. count M x < count N x)))
[PROOF STEP]
by (rule mult\<^sub>H\<^sub>O[folded multp_def less_multiset_def])
|
{"llama_tokens": 177, "file": null, "length": 1}
|
# -*- coding: utf-8 -*-
"""
Functions to evaluate a trained model
Note: The file was more or less taken from Spotlight
"""
import numpy as np
import scipy.stats as st
FLOAT_MAX = np.finfo(np.float32).max
def mrr_score(model, test, train=None):
"""
Compute mean reciprocal rank (MRR) scores. One score
is given for every user with interactions in the test
set, representing the mean reciprocal rank of all their
test items.
Parameters
----------
model: fitted instance of a recommender model
The model to evaluate.
test: :class:`spotlight.interactions.Interactions`
Test interactions.
train: :class:`spotlight.interactions.Interactions`, optional
Train interactions. If supplied, scores of known
interactions will be set to very low values and so not
affect the MRR.
Returns
-------
mrr scores: numpy array of shape (num_users,)
Array of MRR scores for each user in test.
"""
test = test.tocsr()
if train is not None:
train = train.tocsr()
mrrs = []
for user_id, row in enumerate(test):
if not len(row.indices):
continue
predictions = -model.predict(user_id)
if train is not None:
predictions[train[user_id].indices] = FLOAT_MAX
mrr = (1.0 / st.rankdata(predictions)[row.indices]).mean()
mrrs.append(mrr)
return np.array(mrrs)
def _get_precision_recall(predictions, targets, k):
predictions = predictions[:k]
n_hit = len(set(predictions).intersection(set(targets)))
return float(n_hit) / len(predictions), float(n_hit) / len(targets)
def precision_recall_score(model, test, train=None, k=10):
"""
Compute Precision@k and Recall@k scores. One score
is given for every user with interactions in the test
set, representing the Precision@k and Recall@k of all their
test items.
Parameters
----------
model: fitted instance of a recommender model
The model to evaluate.
test: :class:`spotlight.interactions.Interactions`
Test interactions.
train: :class:`spotlight.interactions.Interactions`, optional
Train interactions. If supplied, scores of known
interactions will not affect the computed metrics.
k: int or array of int,
The maximum number of predicted items
Returns
-------
(Precision@k, Recall@k): numpy array of shape (num_users, len(k))
A tuple of Precisions@k and Recalls@k for each user in test.
If k is a scalar, will return a tuple of vectors. If k is an
array, will return a tuple of arrays, where each row corresponds
to a user and each column corresponds to a value of k.
"""
test = test.tocsr()
if train is not None:
train = train.tocsr()
if np.isscalar(k):
k = np.array([k])
precision = []
recall = []
for user_id, row in enumerate(test):
if not len(row.indices):
continue
predictions = -model.predict(user_id)
if train is not None:
rated = train[user_id].indices
predictions[rated] = FLOAT_MAX
predictions = predictions.argsort()
targets = row.indices
user_precision, user_recall = zip(*[
_get_precision_recall(predictions, targets, x)
for x in k
])
precision.append(user_precision)
recall.append(user_recall)
precision = np.array(precision).squeeze()
recall = np.array(recall).squeeze()
return precision, recall
def auc_score(model, test, train=None, auc_selection_seed=42):
"""
See https://arxiv.org/pdf/1508.06091.pdf
Args:
model:
test:
train:
auc_selection_seed:
Returns:
"""
# TODO: Implement known positive removal (not urgent as not applicable for Movielens)
test = test.tocsr()
np.random.seed(auc_selection_seed)
auc_score = []
for user_id, row in enumerate(test):
if not len(row.indices):
continue
# Make predictions for all items
predictions = model.predict(user_id)
pos_targets = row.indices
n_preds = len(pos_targets)
neg_targets = np.setdiff1d(np.arange(len(predictions)), pos_targets)
neg_targets = np.random.choice(neg_targets, size=n_preds, replace=False)
# Obtain predictions for all positives
pos_predictions = predictions[pos_targets]
# Obtain predictions for random set of unobserved that has the same length
# as the positives
neg_predictions = predictions[neg_targets]
# Compare both ratings for ranking distortions, i.e. positive < negative
user_auc_score = (pos_predictions > neg_predictions).sum()/n_preds
auc_score.append(user_auc_score)
return np.array(auc_score)
def rmse_score(model, test):
"""
Compute RMSE score for test interactions.
Parameters
----------
model: fitted instance of a recommender model
The model to evaluate.
test: :class:`spotlight.interactions.Interactions`
Test interactions.
Returns
-------
rmse_score: float
The RMSE score.
"""
predictions = model.predict(test.user_ids, test.item_ids)
ratings = np.clip(test.ratings, 0, 1) # bring -1 to 0
return np.sqrt(((ratings - predictions) ** 2).mean())
|
{"hexsha": "dfd0ea9434b28c0b8722f35f768e97ecbcdd017c", "size": 5436, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/lrann/evaluations.py", "max_stars_repo_name": "FlorianWilhelm/lrann", "max_stars_repo_head_hexsha": "553ae98d48e76d0b827ba3fffa48e20c68dd475d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-05-31T03:11:39.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-28T11:00:46.000Z", "max_issues_repo_path": "src/lrann/evaluations.py", "max_issues_repo_name": "FlorianWilhelm/lrann", "max_issues_repo_head_hexsha": "553ae98d48e76d0b827ba3fffa48e20c68dd475d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lrann/evaluations.py", "max_forks_repo_name": "FlorianWilhelm/lrann", "max_forks_repo_head_hexsha": "553ae98d48e76d0b827ba3fffa48e20c68dd475d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-11-03T08:55:57.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-03T08:55:57.000Z", "avg_line_length": 25.5211267606, "max_line_length": 89, "alphanum_fraction": 0.640544518, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1229}
|
import sys
sys.path.append('..')
import numpy as np
from common.trainer import Trainer
from common.optimizer import Adam
from common.layers import MatMul, SoftmaxWithLoss
from common.util import preprocess, convert_one_hot
def create_context_target(corpus,window_size=1):
target = corpus[window_size:-window_size]
contexts = []
for idx in range(window_size,len(corpus)-window_size):
cs = []
for t in range(-window_size,window_size+1):
if t == 0:
continue
cs.append(corpus[idx+t])
contexts.append(cs)
return np.array(contexts),np.array(target)
class SimpleCBOW:
def __init__(self,vocab_size,hidden_size):
V, H = vocab_size, hidden_size
#重みの初期化
W_in = 0.01 * np.random.randn(V,H).astype('f')
W_out = 0.01 * np.random.randn(H,V).astype('f')
#レイヤの作成
self.in_layer0 = MatMul(W_in)
self.in_layer1 = MatMul(W_in)
self.out_layer = MatMul(W_out)
self.loss_layer = SoftmaxWithLoss()
#全ての重みと勾配をリストにまとめる
layers = [self.in_layer0, self.in_layer1,self.out_layer]
self.params,self.grads = [],[]
for layer in layers:
self.params += layer.params
self.grads += layer.grads
#メンバ変数に単語の分散表現を設定
self.word_vecs = W_in
def forward(self,contexts,target):
h0 = self.in_layer0.forward(contexts[:,0])
h1 = self.in_layer1.forward(contexts[:,1])
h = (h0+h1) * 0.5
score = self.out_layer.forward(h)
loss = self.loss_layer.forward(score,target)
return loss
def backward(self,dout=1):
ds = self.loss_layer.backward(dout)
da = self.out_layer.backward(ds)
da *= 0.5
self.in_layer1.backward(da)
self.in_layer0.backward(da)
window_size = 1
hidden_size = 5
batch_size = 3
max_eopch = 1000
text = 'You say goodbye and I say hello.'
corpus, word_to_id, id_to_word = preprocess(text)
#print(corpus) #[0 1 2 3 4 1 5 6]
#print(id_to_word) #{0: 'you', 1: 'say', 2: 'goodbye', 3: 'and', 4: 'i', 5: 'hello', 6: '.'}
vocab_size = len(word_to_id)
contexts, target = create_context_target(corpus,window_size)
#print(contexts) #[[0 2],[1 3],[2 4],[3 1],[4 5],[1 6]]
#print(target) #[1 2 3 4 1 5]
target = convert_one_hot(target,vocab_size)
contexts = convert_one_hot(contexts,vocab_size)
model = SimpleCBOW(vocab_size,hidden_size)
optimizer = Adam()
trainer = Trainer(model,optimizer)
trainer.fit(contexts, target, max_eopch,batch_size)
trainer.plot()
word_vecs = model.word_vecs
for word_id,word in id_to_word.items():
print(word,word_vecs[word_id])
|
{"hexsha": "98a45cb57dbad4d99e6624b11760d7ce2aa12040", "size": 2652, "ext": "py", "lang": "Python", "max_stars_repo_path": "ch03/test3.py", "max_stars_repo_name": "gangigammo/deep-learning-2", "max_stars_repo_head_hexsha": "6bce355261d8ad5135c104fca32946aa13dc0ba4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ch03/test3.py", "max_issues_repo_name": "gangigammo/deep-learning-2", "max_issues_repo_head_hexsha": "6bce355261d8ad5135c104fca32946aa13dc0ba4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ch03/test3.py", "max_forks_repo_name": "gangigammo/deep-learning-2", "max_forks_repo_head_hexsha": "6bce355261d8ad5135c104fca32946aa13dc0ba4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4666666667, "max_line_length": 95, "alphanum_fraction": 0.6481900452, "include": true, "reason": "import numpy", "num_tokens": 779}
|
#include <geometry.h>
#include <tiny_math_types.h>
#define BOOST_AUTO_TEST_MAIN
#include <boost/test/auto_unit_test.hpp>
#include <boost/test/unit_test_suite.hpp>
#include <boost/test/floating_point_comparison.hpp>
#include <boost/test/test_tools.hpp>
BOOST_AUTO_TEST_SUITE(geometry);
BOOST_AUTO_TEST_CASE(inside_sphere_test)
{
typedef tiny::MathTypes<double> MT;
typedef MT::vector3_type V;
typedef MT::real_type T;
typedef MT::value_traits VT;
V const center = V::zero();
T const radius = VT::one();
geometry::Sphere<V> const sphere = geometry::make_sphere(center , radius );
BOOST_CHECK(geometry::is_valid(sphere));
{
V const p = V::make( 0.0, 0.0, 0.0);
bool const test = geometry::inside_sphere( p, sphere);
BOOST_CHECK( test );
}
{
V const p = V::make( 1.0, 0.0, 0.0);
bool const test = geometry::inside_sphere( p, sphere);
BOOST_CHECK( test );
}
{
V const p = V::make( 2.0, 0.0, 0.0);
bool const test = geometry::inside_sphere( p, sphere);
BOOST_CHECK( !test );
}
}
BOOST_AUTO_TEST_SUITE_END();
|
{"hexsha": "6d13c4569350ad77d7229d5e5f60386ee853eed8", "size": 1130, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "PROX/FOUNDATION/GEOMETRY/unit_tests/geometry_inside_sphere/geometry_inside_sphere.cpp", "max_stars_repo_name": "diku-dk/PROX", "max_stars_repo_head_hexsha": "c6be72cc253ff75589a1cac28e4e91e788376900", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2019-11-27T09:44:45.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-13T00:24:21.000Z", "max_issues_repo_path": "PROX/FOUNDATION/GEOMETRY/unit_tests/geometry_inside_sphere/geometry_inside_sphere.cpp", "max_issues_repo_name": "erleben/matchstick", "max_issues_repo_head_hexsha": "1cfdc32b95437bbb0063ded391c34c9ee9b9583b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "PROX/FOUNDATION/GEOMETRY/unit_tests/geometry_inside_sphere/geometry_inside_sphere.cpp", "max_forks_repo_name": "erleben/matchstick", "max_forks_repo_head_hexsha": "1cfdc32b95437bbb0063ded391c34c9ee9b9583b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.1111111111, "max_line_length": 77, "alphanum_fraction": 0.6504424779, "num_tokens": 317}
|
import networkx as nx
from matplotlib import pyplot as plt
import numpy as np
import pytest
import qleet
@pytest.mark.parametrize("ensemble_size", [3, 5])
def test_plot_histogram(ensemble_size):
graph = nx.gnm_random_graph(n=10, m=40)
qaoa = qleet.QAOACircuitMaxCut(graph, p=2)
circuit = qleet.CircuitDescriptor(qaoa.qaoa_circuit, qaoa.params, qaoa.qaoa_cost)
epochs_chart = (0, 1, 5, 10)
plot = qleet.ParameterHistograms(
circuit, ensemble_size=2, epochs_chart=epochs_chart
)
ax = plot.plot()
assert isinstance(ax, np.ndarray), "Array of plots wasn't returned by the plotter."
assert ax.shape == (
len(circuit.parameters),
len(epochs_chart),
), "The shape of the plot returned is not correct."
for subplot in np.reshape(ax, -1):
assert isinstance(
subplot, plt.Axes
), "Matplotlib figure axes were not returned by the plotter."
for _group, data in plot._histograms.items():
data = np.array(data)
assert not np.any(
np.isclose(np.var(data, axis=0), 0)
), "The parameters stayed the same after training, check logging."
|
{"hexsha": "4cd08401fe627ea12137c4a852399c03c19d4b9a", "size": 1159, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/analyzers/test_histograms.py", "max_stars_repo_name": "AnimeshSinha1309/qaoa-optimizer", "max_stars_repo_head_hexsha": "2a93a46bacc99f22f49e7b5121eb3aa9f12c0163", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2021-09-26T18:43:43.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:34:01.000Z", "max_issues_repo_path": "tests/analyzers/test_histograms.py", "max_issues_repo_name": "QLemma/qLEET", "max_issues_repo_head_hexsha": "2a93a46bacc99f22f49e7b5121eb3aa9f12c0163", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 12, "max_issues_repo_issues_event_min_datetime": "2021-09-19T13:29:33.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-09T15:22:49.000Z", "max_forks_repo_path": "tests/analyzers/test_histograms.py", "max_forks_repo_name": "QLemma/qLEET", "max_forks_repo_head_hexsha": "2a93a46bacc99f22f49e7b5121eb3aa9f12c0163", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-14T03:02:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T03:02:24.000Z", "avg_line_length": 34.0882352941, "max_line_length": 87, "alphanum_fraction": 0.670405522, "include": true, "reason": "import numpy,import networkx", "num_tokens": 301}
|
#!/usr/bin/env python
# K_modularity using weighted edges
import matplotlib.pyplot as plt
import re, os, sys
import networkx as nx
from numpy import linalg as la
from networkx.generators.atlas import *
import numpy as np
import networkx as nx
import random, copy
import math
from scipy.sparse import csr_matrix
import argparse
import logging
import urllib3
import graph_kClusterAlgorithm_functions as QCD
import graphFileUtility_functions as GFU
#
# The Quantum Graph Community Detection Algorithm has been described
# in the following publications. Please cite in your publication.
#
# H. Ushijima-Mwesigwa, C. F. A. Negre, S. M. Mniszewski,
# 2017, Graph Partitioning using Quantum Annealing on the
# D-Wave System, Proceedings of the 2nd International
# Workshop on Post Moore’s Era Supercomputing (PMES), 22-29.
#
# C. F. A. Negre, H. Ushijima-Mwesigwa, S. M. Mniszewski 2020, Detecting
# Multiple Communities using Quantum Annealing on the D-Wave System,
# PLOS ONE 15(2): e0227538. https://doi.org/10.1371/journal.pone.0227538
#
# S. M. Mniszewski, P. A. Dub, S. Tretiak, P. M. Anisimov, Y. Zhang,
# C. F. A. Negre, 2021, Reduction of the Molecular Hamiltonian Matrix using
# Quantum Community Detection, Sci Rep 11, 4099 (2021).
# https://doi.org/10.1038/s41598-021-83561-x#
#
if __name__== '__main__':
urllib3.disable_warnings()
parser = argparse.ArgumentParser(description='Quantum Community Detection')
parser.add_argument('-nparts', type=int, default=2, help='number of parts')
parser.add_argument('-pflag', type=int, default=0, help='plot flag, 0-no 1-yes')
parser.add_argument('-ifile', help='input filename')
parser.add_argument('-ftype', default='mtx', help='input file type (mtx, umtx, zmtx, gml, konect, net, mi')
parser.add_argument('-beta', type=int, default=1, help='beta penalty constant: minimize edge cut')
parser.add_argument('-gamma', type=int, default=-5, help='gamma penalty constant: each node in 1 part')
parser.add_argument('-threshold', type=float, default=0.00, help='threshold value')
parser.add_argument('-label', default='qcd_hybrid', help='label for run')
parser.add_argument('-qsize', type=int, default=64, help='QPU sub-qubo size')
args = parser.parse_args()
print('number parts = ', args.nparts)
print('plot flag = ', args.pflag)
print('input file = ', args.ifile)
print('file type = ', args.ftype)
print('beta = ', args.beta)
print('gamma = ', args.gamma)
print('threshold = ', args.threshold)
print('label = ', args.label)
print('qsize = ', args.qsize)
print('\n')
num_parts = args.nparts
pflag = args.pflag
ifilename = args.ifile
ftype = args.ftype
beta0 = args.beta
gamma0 = args.gamma
threshold = args.threshold
run_label = args.label
qsize = args.qsize
####
# NOTE: node and matrix indexing starts from 0
#
###
# Read and generate graph
graph = GFU.generateGraph(ftype, ifilename, threshold)
A = nx.adjacency_matrix(graph)
print ('\nAdjacency matrix:\n', A.todense())
num_blocks = num_parts
num_nodes = nx.number_of_nodes(graph)
num_edges = nx.number_of_edges(graph)
print ("\n\t Quantum Community Detection: up to %d communities...\n" %num_parts)
print ("Graph has %d nodes and %d edges" %(num_nodes, num_edges))
# Collect results to dictionary
result = {}
result['alg'] = 'LANL_QCD'
result['num_clusters'] = num_parts
result['name'] = ifilename
result['nodes'] = num_nodes
result['edges'] = num_edges
result['size'] = num_nodes * num_parts
result['run_arch'] = 'DWAVE_Hybrid'
result['subqubo_size'] = qsize
beta, gamma, GAMMA = QCD.set_penalty_constant(num_nodes, num_blocks, beta0, gamma0)
mtotal, modularity = QCD.build_mod(A, threshold, num_edges)
print ("\nModularity matrix: \n", modularity)
print ("min value = ", modularity.min())
print ("max value = ", modularity.max())
print ("threshold = ", threshold)
Q = QCD.makeQubo(graph, modularity, beta, gamma, GAMMA, num_nodes, num_parts, num_blocks, threshold)
# Run k-clustering with Hybrid/D-Wave using ocean
ss = QCD.clusterHybrid(Q, num_parts, qsize, run_label, result)
# Process solution
part_number = QCD.process_solution(ss, graph, num_blocks, num_nodes, num_parts, result)
mmetric = QCD.calcModularityMetric(mtotal, modularity, part_number)
print ("\nModularity metric = ", mmetric)
result['modularity_metric'] = mmetric
GFU.write_partFile(part_number, num_nodes, num_parts)
GFU.write_resultFile(result)
if pflag == 1:
GFU.showClusters(part_number, graph)
exit(0)
|
{"hexsha": "8924c6f1bbe406f74ae20e1dd1bf504629c445e9", "size": 4560, "ext": "py", "lang": "Python", "max_stars_repo_path": "algorithm/k-community_detection/quantum_kcommunity_detection_hybrid.py", "max_stars_repo_name": "lanl/Quantum_Graph_Algorithms", "max_stars_repo_head_hexsha": "3e8e63fe98866321b417b8d47cb52a9ce60596bc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-10T08:35:50.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-10T08:35:50.000Z", "max_issues_repo_path": "algorithm/k-community_detection/quantum_kcommunity_detection_hybrid.py", "max_issues_repo_name": "lanl/Quantum_Graph_Algorithms", "max_issues_repo_head_hexsha": "3e8e63fe98866321b417b8d47cb52a9ce60596bc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "algorithm/k-community_detection/quantum_kcommunity_detection_hybrid.py", "max_forks_repo_name": "lanl/Quantum_Graph_Algorithms", "max_forks_repo_head_hexsha": "3e8e63fe98866321b417b8d47cb52a9ce60596bc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5714285714, "max_line_length": 109, "alphanum_fraction": 0.7166666667, "include": true, "reason": "import numpy,from numpy,from scipy,import networkx,from networkx", "num_tokens": 1307}
|
import tactic
import tactic.induction
noncomputable theory
open_locale classical
@[to_additive]
lemma finset_prod_congr_set
{α : Type*} [comm_monoid α] {β : Type*} [fintype β] (s : set β) (f : β → α) (g : s → α)
(w : ∀ (x : β) (h : x ∈ s), f x = g ⟨x, h⟩) (w' : ∀ (x : β), x ∉ s → f x = 1) :
finset.univ.prod f = finset.univ.prod g :=
begin
by_cases hs : s.nonempty,
{ cases hs with d hd,
have h : ∀ (fs : finset β), ∃ (fh : β → s),
(∀ (x : β), x ∈ s → (fh x : s).1 = x) ∧
fs.prod f = (finset.image fh (fs ∩ s.to_finset)).prod g,
{ rintro fs, apply fs.induction_on; clear fs,
{ fsplit,
{ intro x, apply dite (x ∈ s); intro h,
{ exact ⟨_, h⟩ },
{ exact ⟨_, hd⟩ }},
{ simp, intro x, split_ifs; simp [h] }},
{ rintro x fs h₁ h₂, rcases h₂ with ⟨fh, h₂, h₃⟩, use [fh, h₂],
rw finset.prod_insert h₁, change _ * fs.prod f = _,
have hh : ∀ {x y : β}, x ∈ s → y ∈ s → fh x = fh y → x = y,
{ rintro x y hx hy h₄, rw subtype.ext_iff at h₄,
change (fh x).1 = (fh y).1 at h₄, simp [h₂ _ hx, h₂ _ hy] at h₄,
exact h₄ },
by_cases hx : x ∈ s,
{ rw (_ : _ ∩ _ = (insert x (fs ∩ s.to_finset))), swap,
{ apply finset.insert_inter_of_mem, rwa set.mem_to_finset },
rw [finset.image_insert, finset.prod_insert], swap,
{ intro h₄, rw finset.mem_image at h₄, rcases h₄ with ⟨y, hy, h₄⟩,
have hy₁ := finset.mem_of_mem_inter_left hy,
have hy₂ := finset.mem_of_mem_inter_right hy,
rw set.mem_to_finset at hy₂, replace h₄ := hh hy₂ hx h₄,
subst h₄, contradiction },
rw [w _ hx, h₃], congr, ext, simp, exact (h₂ _ hx).symm },
{ rwa [w' _ hx, one_mul, finset.insert_inter_of_not_mem],
rwa set.mem_to_finset }}},
obtain ⟨fh, h₁, h₂⟩ := h finset.univ, convert h₂, ext x, simp,
apply finset.mem_image.mpr, cases x with x hx, simp_rw set.mem_to_finset,
use [x, hx], ext, simp, exact h₁ _ hx },
{ rw set.not_nonempty_iff_eq_empty at hs, subst s, simp,
rw finset.prod_eq_one, rintro x hx, apply w', simp },
end
|
{"author": "user7230724", "repo": "lean-projects", "sha": "ab9a83874775efd18f8c5b867e480bae4d596b31", "save_path": "github-repos/lean/user7230724-lean-projects", "path": "github-repos/lean/user7230724-lean-projects/lean-projects-ab9a83874775efd18f8c5b867e480bae4d596b31/src/other/prod_congr_set.lean"}
|
using HDF5
using Merlin
include("parser.jl")
include("model.jl")
const wordembeds_file = ".data/glove.6B.100d.h5"
#traindoc = CoNLL.read(".data/wsj_00-18.conll")
#testdoc = CoNLL.read(".data/wsj_22-24.conll")
#info("# sentences of train doc: $(length(traindoc))")
#info("# sentences of test doc: $(length(testdoc))")
#traindata = setup_data(traindoc, worddict, chardict)
#testdata = setup_data(testdoc, worddict, chardict)
#info("# words: $(length(worddict))")
#info("# chars: $(length(chardict))")
#traindata, testdata, worddict, chardict
|
{"hexsha": "4febe31f173d93e9f8f48612c35b95dad55a22f2", "size": 544, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "depparsing/main.jl", "max_stars_repo_name": "hshindo/Merlin-Examples", "max_stars_repo_head_hexsha": "a12fd471d5271b99f6d9680d8c768661dca1ea31", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-11-25T00:34:51.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-25T00:34:51.000Z", "max_issues_repo_path": "depparsing/main.jl", "max_issues_repo_name": "hshindo/Merlin-Examples", "max_issues_repo_head_hexsha": "a12fd471d5271b99f6d9680d8c768661dca1ea31", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "depparsing/main.jl", "max_forks_repo_name": "hshindo/Merlin-Examples", "max_forks_repo_head_hexsha": "a12fd471d5271b99f6d9680d8c768661dca1ea31", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6315789474, "max_line_length": 54, "alphanum_fraction": 0.7132352941, "num_tokens": 172}
|
# Helper data for color parsing
include("names_data.jl")
# Color Parsing
# -------------
const col_pat_hex1 = r"(#|0x)([[:xdigit:]])([[:xdigit:]])([[:xdigit:]])"
const col_pat_hex2 = r"(#|0x)([[:xdigit:]]{2})([[:xdigit:]]{2})([[:xdigit:]]{2})"
const col_pat_rgb = r"rgb\((\d+%?),(\d+%?),(\d+%?)\)"
const col_pat_hsl = r"hsl\((\d+%?),(\d+%?),(\d+%?)\)"
const col_pat_rgba = r"rgba\((\d+%?),(\d+%?),(\d+%?),(\d+(?:\.\d*)?%?)\)"
const col_pat_hsla = r"hsla\((\d+%?),(\d+%?),(\d+%?),(\d+(?:\.\d*)?%?)\)"
# Parse a number used in the "rgb()" or "hsl()" color.
function parse_rgb(num::AbstractString)
if num[end] == '%'
return clamp(parse(Int, num[1:end-1], 10) / 100, 0, 1)
else
return clamp(parse(Int, num, 10) / 255, 0, 1)
end
end
function parse_hsl_hue(num::AbstractString)
if num[end] == '%'
error("hue cannot end in %")
else
return parse(Int, num, 10)
end
end
function parse_hsl_sl(num::AbstractString)
if num[end] != '%'
error("saturation and lightness must end in %")
else
return parse(Int, num[1:end-1], 10) / 100
end
end
# Parse a number used in the alpha field of "rgba()" and "hsla()".
function parse_alpha_num(num::AbstractString)
if num[end] == '%'
return parse(Int, num[1:end-1]) / 100
else
return parse(Float32, num)
end
end
function _parse_colorant(desc::AbstractString)
desc_ = replace(desc, " ", "")
mat = match(col_pat_hex2, desc_)
if mat != nothing
return RGB{N0f8}(parse(Int, mat.captures[2], 16) / 255,
parse(Int, mat.captures[3], 16) / 255,
parse(Int, mat.captures[4], 16) / 255)
end
mat = match(col_pat_hex1, desc_)
if mat != nothing
return RGB{N0f8}((16 * parse(Int, mat.captures[2], 16)) / 255,
(16 * parse(Int, mat.captures[3], 16)) / 255,
(16 * parse(Int, mat.captures[4], 16)) / 255)
end
mat = match(col_pat_rgb, desc_)
if mat != nothing
return RGB{N0f8}(parse_rgb(mat.captures[1]),
parse_rgb(mat.captures[2]),
parse_rgb(mat.captures[3]))
end
mat = match(col_pat_hsl, desc_)
if mat != nothing
return HSL{ColorTypes.eltype_default(HSL)}(parse_hsl_hue(mat.captures[1]),
parse_hsl_sl(mat.captures[2]),
parse_hsl_sl(mat.captures[3]))
end
mat = match(col_pat_rgba, desc_)
if mat != nothing
return RGBA{N0f8}(parse_rgb(mat.captures[1]),
parse_rgb(mat.captures[2]),
parse_rgb(mat.captures[3]),
parse_alpha_num(mat.captures[4]))
end
mat = match(col_pat_hsla, desc_)
if mat != nothing
return HSLA{ColorTypes.eltype_default(HSLA)}(parse_hsl_hue(mat.captures[1]),
parse_hsl_sl(mat.captures[2]),
parse_hsl_sl(mat.captures[3]),
parse_alpha_num(mat.captures[4]))
end
desc_ = lowercase(desc_)
if desc_ == "transparent"
return RGBA{N0f8}(0,0,0,0)
end
if !haskey(color_names, desc_)
error("Unknown color: ", desc)
end
c = color_names[desc_]
return RGB{N0f8}(c[1] / 255, c[2] / 255, c[3] / 255)
end
# note: these exist to enable proper dispatch, since super(Colorant) == Any
_parse_colorant{C<:Colorant,SUP<:Any}(::Type{C}, ::Type{SUP}, desc::AbstractString) = _parse_colorant(desc)
_parse_colorant{C<:Colorant,SUP<:Colorant}(::Type{C}, ::Type{SUP}, desc::AbstractString) = convert(C, _parse_colorant(desc))::C
"""
parse(Colorant, desc)
Parse a color description.
This parses subset of HTML/CSS color specifications. In particular, everything
is supported but: "currentColor".
It does support named colors (though it uses X11 named colors, which are
slightly different than W3C named colors in some cases), "rgb()", "hsl()",
"#RGB", and "#RRGGBB' syntax.
Args:
- `Colorant`: literal "Colorant" will parse according to the `desc`
string (usually returning an `RGB`); any more specific choice will
return a color of the specified type.
- `desc`: A color name or description.
Returns:
An `RGB{N0f8}` color, unless:
- "hsl(h,s,l)" was used, in which case an `HSL` color;
- "rgba(r,g,b,a)" was used, in which case an `RGBA` color;
- "hsla(h,s,l,a)" was used, in which case an `HSLA` color;
- a specific `Colorant` type was specified in the first argument
"""
Base.parse{C<:Colorant}(::Type{C}, desc::AbstractString) = _parse_colorant(C, supertype(C), desc)
Base.parse{C<:Colorant}(::Type{C}, desc::Symbol) = parse(C, string(desc))
Base.parse{C<:Colorant}(::Type{C}, c::Colorant) = c
macro colorant_str(ex)
isa(ex, AbstractString) || error("colorant requires literal strings")
col = parse(Colorant, ex)
:($col)
end
@noinline function ColorTypes.color(str::AbstractString)
Base.depwarn("color(\"$str\") is deprecated, use colorant\"$str\" or parse(Colorant, \"$str\")", :color)
parse(Colorant, str)
end
|
{"hexsha": "2234963cee123828ec61ea5bac4a8f48326e0c9a", "size": 5171, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "public/.julia/v0.5/Colors/src/parse.jl", "max_stars_repo_name": "Giarcr0b/MVO_Tool", "max_stars_repo_head_hexsha": "8f3348b8b56968febca8307acea3ebe1817fccae", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-03-12T03:24:25.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-12T03:24:25.000Z", "max_issues_repo_path": "public/.julia/v0.5/Colors/src/parse.jl", "max_issues_repo_name": "Giarcr0b/MVO_Tool", "max_issues_repo_head_hexsha": "8f3348b8b56968febca8307acea3ebe1817fccae", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "public/.julia/v0.5/Colors/src/parse.jl", "max_forks_repo_name": "Giarcr0b/MVO_Tool", "max_forks_repo_head_hexsha": "8f3348b8b56968febca8307acea3ebe1817fccae", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.3612903226, "max_line_length": 127, "alphanum_fraction": 0.5847998453, "num_tokens": 1559}
|
[STATEMENT]
lemma fresh_pgwt_same_type:
assumes "finite S" "wf\<^sub>t\<^sub>r\<^sub>m t"
shows "\<Gamma> (fresh_pgwt S (\<Gamma> t)) = \<Gamma> t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S (\<Gamma> t)) = \<Gamma> t
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S (\<Gamma> t)) = \<Gamma> t
[PROOF STEP]
let ?P = "\<lambda>\<tau>::('fun,'atom) term_type. wf\<^sub>t\<^sub>r\<^sub>m \<tau> \<and> (\<forall>f T. TComp f T \<sqsubseteq> \<tau> \<longrightarrow> 0 < arity f)"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S (\<Gamma> t)) = \<Gamma> t
[PROOF STEP]
{
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S (\<Gamma> t)) = \<Gamma> t
[PROOF STEP]
fix \<tau>
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S (\<Gamma> t)) = \<Gamma> t
[PROOF STEP]
assume "?P \<tau>"
[PROOF STATE]
proof (state)
this:
wf\<^sub>t\<^sub>r\<^sub>m \<tau> \<and> (\<forall>f T. Fun f T \<sqsubseteq> \<tau> \<longrightarrow> 0 < arity f)
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S (\<Gamma> t)) = \<Gamma> t
[PROOF STEP]
hence "\<Gamma> (fresh_pgwt S \<tau>) = \<tau>"
[PROOF STATE]
proof (prove)
using this:
wf\<^sub>t\<^sub>r\<^sub>m \<tau> \<and> (\<forall>f T. Fun f T \<sqsubseteq> \<tau> \<longrightarrow> 0 < arity f)
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S \<tau>) = \<tau>
[PROOF STEP]
proof (induction \<tau>)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>x. wf\<^sub>t\<^sub>r\<^sub>m (Var x) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Var x \<longrightarrow> 0 < arity f) \<Longrightarrow> \<Gamma> (fresh_pgwt S (Var x)) = Var x
2. \<And>x1a x2. \<lbrakk>\<And>x2a. \<lbrakk>x2a \<in> set x2; wf\<^sub>t\<^sub>r\<^sub>m x2a \<and> (\<forall>f T. Fun f T \<sqsubseteq> x2a \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S x2a) = x2a; wf\<^sub>t\<^sub>r\<^sub>m (Fun x1a x2) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Fun x1a x2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S (Fun x1a x2)) = Fun x1a x2
[PROOF STEP]
case (Var a)
[PROOF STATE]
proof (state)
this:
wf\<^sub>t\<^sub>r\<^sub>m (Var a) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Var a \<longrightarrow> 0 < arity f)
goal (2 subgoals):
1. \<And>x. wf\<^sub>t\<^sub>r\<^sub>m (Var x) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Var x \<longrightarrow> 0 < arity f) \<Longrightarrow> \<Gamma> (fresh_pgwt S (Var x)) = Var x
2. \<And>x1a x2. \<lbrakk>\<And>x2a. \<lbrakk>x2a \<in> set x2; wf\<^sub>t\<^sub>r\<^sub>m x2a \<and> (\<forall>f T. Fun f T \<sqsubseteq> x2a \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S x2a) = x2a; wf\<^sub>t\<^sub>r\<^sub>m (Fun x1a x2) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Fun x1a x2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S (Fun x1a x2)) = Fun x1a x2
[PROOF STEP]
let ?P = "\<lambda>c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>x. wf\<^sub>t\<^sub>r\<^sub>m (Var x) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Var x \<longrightarrow> 0 < arity f) \<Longrightarrow> \<Gamma> (fresh_pgwt S (Var x)) = Var x
2. \<And>x1a x2. \<lbrakk>\<And>x2a. \<lbrakk>x2a \<in> set x2; wf\<^sub>t\<^sub>r\<^sub>m x2a \<and> (\<forall>f T. Fun f T \<sqsubseteq> x2a \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S x2a) = x2a; wf\<^sub>t\<^sub>r\<^sub>m (Fun x1a x2) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Fun x1a x2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S (Fun x1a x2)) = Fun x1a x2
[PROOF STEP]
let ?Q = "\<lambda>c. \<Gamma> (Fun c []) = Var a \<and> public c"
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>x. wf\<^sub>t\<^sub>r\<^sub>m (Var x) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Var x \<longrightarrow> 0 < arity f) \<Longrightarrow> \<Gamma> (fresh_pgwt S (Var x)) = Var x
2. \<And>x1a x2. \<lbrakk>\<And>x2a. \<lbrakk>x2a \<in> set x2; wf\<^sub>t\<^sub>r\<^sub>m x2a \<and> (\<forall>f T. Fun f T \<sqsubseteq> x2a \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S x2a) = x2a; wf\<^sub>t\<^sub>r\<^sub>m (Fun x1a x2) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Fun x1a x2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S (Fun x1a x2)) = Fun x1a x2
[PROOF STEP]
have " {c. ?Q c} - S = {c. ?P c}"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. {c. \<Gamma> (Fun c []) = Var a \<and> public c} - S = {c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c}
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
{c. \<Gamma> (Fun c []) = Var a \<and> public c} - S = {c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c}
goal (2 subgoals):
1. \<And>x. wf\<^sub>t\<^sub>r\<^sub>m (Var x) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Var x \<longrightarrow> 0 < arity f) \<Longrightarrow> \<Gamma> (fresh_pgwt S (Var x)) = Var x
2. \<And>x1a x2. \<lbrakk>\<And>x2a. \<lbrakk>x2a \<in> set x2; wf\<^sub>t\<^sub>r\<^sub>m x2a \<and> (\<forall>f T. Fun f T \<sqsubseteq> x2a \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S x2a) = x2a; wf\<^sub>t\<^sub>r\<^sub>m (Fun x1a x2) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Fun x1a x2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S (Fun x1a x2)) = Fun x1a x2
[PROOF STEP]
hence "infinite {c. ?P c}"
[PROOF STATE]
proof (prove)
using this:
{c. \<Gamma> (Fun c []) = Var a \<and> public c} - S = {c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c}
goal (1 subgoal):
1. infinite {c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c}
[PROOF STEP]
using Diff_infinite_finite[OF assms(1) infinite_typed_consts[of a]]
[PROOF STATE]
proof (prove)
using this:
{c. \<Gamma> (Fun c []) = Var a \<and> public c} - S = {c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c}
infinite ({c. \<Gamma> (Fun c []) = Var a \<and> public c} - S)
goal (1 subgoal):
1. infinite {c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c}
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
infinite {c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c}
goal (2 subgoals):
1. \<And>x. wf\<^sub>t\<^sub>r\<^sub>m (Var x) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Var x \<longrightarrow> 0 < arity f) \<Longrightarrow> \<Gamma> (fresh_pgwt S (Var x)) = Var x
2. \<And>x1a x2. \<lbrakk>\<And>x2a. \<lbrakk>x2a \<in> set x2; wf\<^sub>t\<^sub>r\<^sub>m x2a \<and> (\<forall>f T. Fun f T \<sqsubseteq> x2a \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S x2a) = x2a; wf\<^sub>t\<^sub>r\<^sub>m (Fun x1a x2) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Fun x1a x2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S (Fun x1a x2)) = Fun x1a x2
[PROOF STEP]
hence "\<exists>c. ?P c"
[PROOF STATE]
proof (prove)
using this:
infinite {c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c}
goal (1 subgoal):
1. \<exists>c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c
[PROOF STEP]
using not_finite_existsD
[PROOF STATE]
proof (prove)
using this:
infinite {c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c}
infinite {a. ?P a} \<Longrightarrow> \<exists>a. ?P a
goal (1 subgoal):
1. \<exists>c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
\<exists>c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c
goal (2 subgoals):
1. \<And>x. wf\<^sub>t\<^sub>r\<^sub>m (Var x) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Var x \<longrightarrow> 0 < arity f) \<Longrightarrow> \<Gamma> (fresh_pgwt S (Var x)) = Var x
2. \<And>x1a x2. \<lbrakk>\<And>x2a. \<lbrakk>x2a \<in> set x2; wf\<^sub>t\<^sub>r\<^sub>m x2a \<and> (\<forall>f T. Fun f T \<sqsubseteq> x2a \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S x2a) = x2a; wf\<^sub>t\<^sub>r\<^sub>m (Fun x1a x2) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Fun x1a x2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S (Fun x1a x2)) = Fun x1a x2
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
\<exists>c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S (Var a)) = Var a
[PROOF STEP]
using someI_ex[of ?P]
[PROOF STATE]
proof (prove)
using this:
\<exists>c. c \<notin> S \<and> \<Gamma> (Fun c []) = Var a \<and> public c
\<exists>x. x \<notin> S \<and> \<Gamma> (Fun x []) = Var a \<and> public x \<Longrightarrow> (SOME x. x \<notin> S \<and> \<Gamma> (Fun x []) = Var a \<and> public x) \<notin> S \<and> \<Gamma> (Fun (SOME x. x \<notin> S \<and> \<Gamma> (Fun x []) = Var a \<and> public x) []) = Var a \<and> public (SOME x. x \<notin> S \<and> \<Gamma> (Fun x []) = Var a \<and> public x)
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S (Var a)) = Var a
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Gamma> (fresh_pgwt S (Var a)) = Var a
goal (1 subgoal):
1. \<And>x1a x2. \<lbrakk>\<And>x2a. \<lbrakk>x2a \<in> set x2; wf\<^sub>t\<^sub>r\<^sub>m x2a \<and> (\<forall>f T. Fun f T \<sqsubseteq> x2a \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S x2a) = x2a; wf\<^sub>t\<^sub>r\<^sub>m (Fun x1a x2) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Fun x1a x2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S (Fun x1a x2)) = Fun x1a x2
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>x1a x2. \<lbrakk>\<And>x2a. \<lbrakk>x2a \<in> set x2; wf\<^sub>t\<^sub>r\<^sub>m x2a \<and> (\<forall>f T. Fun f T \<sqsubseteq> x2a \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S x2a) = x2a; wf\<^sub>t\<^sub>r\<^sub>m (Fun x1a x2) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Fun x1a x2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S (Fun x1a x2)) = Fun x1a x2
[PROOF STEP]
case (Fun f T)
[PROOF STATE]
proof (state)
this:
\<lbrakk>?x2a2 \<in> set T; wf\<^sub>t\<^sub>r\<^sub>m ?x2a2 \<and> (\<forall>f T. Fun f T \<sqsubseteq> ?x2a2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S ?x2a2) = ?x2a2
wf\<^sub>t\<^sub>r\<^sub>m (Fun f T) \<and> (\<forall>fa Ta. Fun fa Ta \<sqsubseteq> Fun f T \<longrightarrow> 0 < arity fa)
goal (1 subgoal):
1. \<And>x1a x2. \<lbrakk>\<And>x2a. \<lbrakk>x2a \<in> set x2; wf\<^sub>t\<^sub>r\<^sub>m x2a \<and> (\<forall>f T. Fun f T \<sqsubseteq> x2a \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S x2a) = x2a; wf\<^sub>t\<^sub>r\<^sub>m (Fun x1a x2) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Fun x1a x2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S (Fun x1a x2)) = Fun x1a x2
[PROOF STEP]
have f: "0 < arity f"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. 0 < arity f
[PROOF STEP]
using Fun.prems fun_type_inv
[PROOF STATE]
proof (prove)
using this:
wf\<^sub>t\<^sub>r\<^sub>m (Fun f T) \<and> (\<forall>fa Ta. Fun fa Ta \<sqsubseteq> Fun f T \<longrightarrow> 0 < arity fa)
\<Gamma> ?t = Fun ?f ?T \<Longrightarrow> 0 < arity ?f
goal (1 subgoal):
1. 0 < arity f
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
0 < arity f
goal (1 subgoal):
1. \<And>x1a x2. \<lbrakk>\<And>x2a. \<lbrakk>x2a \<in> set x2; wf\<^sub>t\<^sub>r\<^sub>m x2a \<and> (\<forall>f T. Fun f T \<sqsubseteq> x2a \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S x2a) = x2a; wf\<^sub>t\<^sub>r\<^sub>m (Fun x1a x2) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Fun x1a x2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S (Fun x1a x2)) = Fun x1a x2
[PROOF STEP]
have "\<And>t. t \<in> set T \<Longrightarrow> ?P t"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>t. t \<in> set T \<Longrightarrow> wf\<^sub>t\<^sub>r\<^sub>m t \<and> (\<forall>f T. Fun f T \<sqsubseteq> t \<longrightarrow> 0 < arity f)
[PROOF STEP]
using Fun.prems wf_trm_subtermeq term.le_less_trans Fun_param_is_subterm
[PROOF STATE]
proof (prove)
using this:
wf\<^sub>t\<^sub>r\<^sub>m (Fun f T) \<and> (\<forall>fa Ta. Fun fa Ta \<sqsubseteq> Fun f T \<longrightarrow> 0 < arity fa)
\<lbrakk>wf\<^sub>t\<^sub>r\<^sub>m ?t; ?s \<sqsubseteq> ?t\<rbrakk> \<Longrightarrow> wf\<^sub>t\<^sub>r\<^sub>m ?s
\<lbrakk>?x \<sqsubseteq> ?y; ?y \<sqsubset> ?z\<rbrakk> \<Longrightarrow> ?x \<sqsubset> ?z
?x \<in> set ?X \<Longrightarrow> ?x \<sqsubset> Fun ?f ?X
goal (1 subgoal):
1. \<And>t. t \<in> set T \<Longrightarrow> wf\<^sub>t\<^sub>r\<^sub>m t \<and> (\<forall>f T. Fun f T \<sqsubseteq> t \<longrightarrow> 0 < arity f)
[PROOF STEP]
by metis
[PROOF STATE]
proof (state)
this:
?t2 \<in> set T \<Longrightarrow> wf\<^sub>t\<^sub>r\<^sub>m ?t2 \<and> (\<forall>f T. Fun f T \<sqsubseteq> ?t2 \<longrightarrow> 0 < arity f)
goal (1 subgoal):
1. \<And>x1a x2. \<lbrakk>\<And>x2a. \<lbrakk>x2a \<in> set x2; wf\<^sub>t\<^sub>r\<^sub>m x2a \<and> (\<forall>f T. Fun f T \<sqsubseteq> x2a \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S x2a) = x2a; wf\<^sub>t\<^sub>r\<^sub>m (Fun x1a x2) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Fun x1a x2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S (Fun x1a x2)) = Fun x1a x2
[PROOF STEP]
hence "\<And>t. t \<in> set T \<Longrightarrow> \<Gamma> (fresh_pgwt S t) = t"
[PROOF STATE]
proof (prove)
using this:
?t2 \<in> set T \<Longrightarrow> wf\<^sub>t\<^sub>r\<^sub>m ?t2 \<and> (\<forall>f T. Fun f T \<sqsubseteq> ?t2 \<longrightarrow> 0 < arity f)
goal (1 subgoal):
1. \<And>t. t \<in> set T \<Longrightarrow> \<Gamma> (fresh_pgwt S t) = t
[PROOF STEP]
using Fun.prems Fun.IH
[PROOF STATE]
proof (prove)
using this:
?t2 \<in> set T \<Longrightarrow> wf\<^sub>t\<^sub>r\<^sub>m ?t2 \<and> (\<forall>f T. Fun f T \<sqsubseteq> ?t2 \<longrightarrow> 0 < arity f)
wf\<^sub>t\<^sub>r\<^sub>m (Fun f T) \<and> (\<forall>fa Ta. Fun fa Ta \<sqsubseteq> Fun f T \<longrightarrow> 0 < arity fa)
\<lbrakk>?x2a2 \<in> set T; wf\<^sub>t\<^sub>r\<^sub>m ?x2a2 \<and> (\<forall>f T. Fun f T \<sqsubseteq> ?x2a2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S ?x2a2) = ?x2a2
goal (1 subgoal):
1. \<And>t. t \<in> set T \<Longrightarrow> \<Gamma> (fresh_pgwt S t) = t
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
?t2 \<in> set T \<Longrightarrow> \<Gamma> (fresh_pgwt S ?t2) = ?t2
goal (1 subgoal):
1. \<And>x1a x2. \<lbrakk>\<And>x2a. \<lbrakk>x2a \<in> set x2; wf\<^sub>t\<^sub>r\<^sub>m x2a \<and> (\<forall>f T. Fun f T \<sqsubseteq> x2a \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S x2a) = x2a; wf\<^sub>t\<^sub>r\<^sub>m (Fun x1a x2) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Fun x1a x2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S (Fun x1a x2)) = Fun x1a x2
[PROOF STEP]
hence "map \<Gamma> (map (fresh_pgwt S) T) = T"
[PROOF STATE]
proof (prove)
using this:
?t2 \<in> set T \<Longrightarrow> \<Gamma> (fresh_pgwt S ?t2) = ?t2
goal (1 subgoal):
1. map \<Gamma> (map (fresh_pgwt S) T) = T
[PROOF STEP]
by (induct T) auto
[PROOF STATE]
proof (state)
this:
map \<Gamma> (map (fresh_pgwt S) T) = T
goal (1 subgoal):
1. \<And>x1a x2. \<lbrakk>\<And>x2a. \<lbrakk>x2a \<in> set x2; wf\<^sub>t\<^sub>r\<^sub>m x2a \<and> (\<forall>f T. Fun f T \<sqsubseteq> x2a \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S x2a) = x2a; wf\<^sub>t\<^sub>r\<^sub>m (Fun x1a x2) \<and> (\<forall>f T. Fun f T \<sqsubseteq> Fun x1a x2 \<longrightarrow> 0 < arity f)\<rbrakk> \<Longrightarrow> \<Gamma> (fresh_pgwt S (Fun x1a x2)) = Fun x1a x2
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
map \<Gamma> (map (fresh_pgwt S) T) = T
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S (Fun f T)) = Fun f T
[PROOF STEP]
using fun_type[OF f]
[PROOF STATE]
proof (prove)
using this:
map \<Gamma> (map (fresh_pgwt S) T) = T
\<Gamma> (Fun f ?ts) = Fun f (map \<Gamma> ?ts)
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S (Fun f T)) = Fun f T
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
\<Gamma> (fresh_pgwt S (Fun f T)) = Fun f T
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<Gamma> (fresh_pgwt S \<tau>) = \<tau>
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S (\<Gamma> t)) = \<Gamma> t
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
wf\<^sub>t\<^sub>r\<^sub>m ?\<tau>4 \<and> (\<forall>f T. Fun f T \<sqsubseteq> ?\<tau>4 \<longrightarrow> 0 < arity f) \<Longrightarrow> \<Gamma> (fresh_pgwt S ?\<tau>4) = ?\<tau>4
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S (\<Gamma> t)) = \<Gamma> t
[PROOF STEP]
thus ?thesis
[PROOF STATE]
proof (prove)
using this:
wf\<^sub>t\<^sub>r\<^sub>m ?\<tau>4 \<and> (\<forall>f T. Fun f T \<sqsubseteq> ?\<tau>4 \<longrightarrow> 0 < arity f) \<Longrightarrow> \<Gamma> (fresh_pgwt S ?\<tau>4) = ?\<tau>4
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S (\<Gamma> t)) = \<Gamma> t
[PROOF STEP]
using assms(1) \<Gamma>_wf'[OF assms(2)] \<Gamma>_wf''
[PROOF STATE]
proof (prove)
using this:
wf\<^sub>t\<^sub>r\<^sub>m ?\<tau>4 \<and> (\<forall>f T. Fun f T \<sqsubseteq> ?\<tau>4 \<longrightarrow> 0 < arity f) \<Longrightarrow> \<Gamma> (fresh_pgwt S ?\<tau>4) = ?\<tau>4
finite S
wf\<^sub>t\<^sub>r\<^sub>m (\<Gamma> t)
Fun ?f ?T \<sqsubseteq> \<Gamma> ?t \<Longrightarrow> 0 < arity ?f
goal (1 subgoal):
1. \<Gamma> (fresh_pgwt S (\<Gamma> t)) = \<Gamma> t
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
\<Gamma> (fresh_pgwt S (\<Gamma> t)) = \<Gamma> t
goal:
No subgoals!
[PROOF STEP]
qed
|
{"llama_tokens": 7856, "file": "Stateful_Protocol_Composition_and_Typing_Typing_Result", "length": 43}
|
using VXI11
using Base.Test
|
{"hexsha": "848f14c4169bebbff7b43c1ee228069244814766", "size": 27, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "Keno/VXI11.jl", "max_stars_repo_head_hexsha": "279808d3f0a99e5d09da028dd322692087f7bf27", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2019-07-09T23:55:10.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-20T04:36:06.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "Keno/VXI11.jl", "max_issues_repo_head_hexsha": "279808d3f0a99e5d09da028dd322692087f7bf27", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "Keno/VXI11.jl", "max_forks_repo_head_hexsha": "279808d3f0a99e5d09da028dd322692087f7bf27", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-07-09T23:55:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-20T04:36:19.000Z", "avg_line_length": 13.5, "max_line_length": 15, "alphanum_fraction": 0.8518518519, "num_tokens": 8}
|
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for package mediapy.
To run this test:
pip install -r requirements.txt
./mediapy_test.py
"""
import io
import os
import pathlib
import re
import tempfile
import unittest.mock as mock
from absl.testing import absltest
from absl.testing import parameterized
import IPython
import mediapy as media
import numpy as np
_TEST_TYPES = ['uint8', 'uint16', 'uint32', 'float32', 'float64']
_TEST_SHAPES1 = [(13, 21, 3), (14, 38, 2), (16, 21, 1), (18, 20), (17, 19)]
_TEST_SHAPES2 = [(128, 128, 3), (128, 160, 1), (160, 128), (64, 64, 3),
(64, 64)]
def _rms_diff(a, b) -> float:
"""Compute the root-mean-square of the difference between two arrays."""
a = np.array(a, dtype=np.float64)
b = np.array(b, dtype=np.float64)
if a.shape != b.shape:
raise ValueError(f'Shapes {a.shape} and {b.shape} do not match.')
return np.sqrt(np.mean(np.square(a - b)))
class MediapyTest(parameterized.TestCase):
"""Tests for mediapy package."""
def assert_all_equal(self, a, b):
if not np.all(np.asarray(a) == np.asarray(b)):
self.fail(f'{a} and {b} differ.')
def assert_all_close(self, a, b, **kwargs):
if not np.allclose(a, b, **kwargs):
self.fail(f'{a} and {b} are not close enough.')
def _check_similar(self, original_array, new_array, max_rms, msg=None):
"""Verifies that the rms error between two arrays is less than max_rms."""
self.assert_all_equal(original_array.shape, new_array.shape)
rms = _rms_diff(new_array, original_array)
self.assertLess(rms, max_rms, msg)
def test_chunked(self):
self.assertEqual(list(media._chunked(range(0), 3)), [])
self.assertEqual(list(media._chunked(range(1), 3)), [(0,)])
self.assertEqual(list(media._chunked(range(2), 3)), [(0, 1)])
self.assertEqual(list(media._chunked(range(3), 3)), [(0, 1, 2)])
self.assertEqual(list(media._chunked(range(4), 3)), [(0, 1, 2), (3,)])
self.assertEqual(list(media._chunked(range(5), 3)), [(0, 1, 2), (3, 4)])
self.assertEqual(list(media._chunked(range(0), 1)), [])
self.assertEqual(list(media._chunked(range(1), 1)), [(0,)])
self.assertEqual(list(media._chunked(range(2), 1)), [(0,), (1,)])
self.assertEqual(list(media._chunked(range(3), 1)), [(0,), (1,), (2,)])
self.assertEqual(list(media._chunked(range(0), None)), [])
self.assertEqual(list(media._chunked(range(1), None)), [(0,)])
self.assertEqual(list(media._chunked(range(2), None)), [(0, 1)])
self.assertEqual(list(media._chunked(range(3), None)), [(0, 1, 2)])
def test_peek_first_on_generator(self):
generator = range(1, 5)
first, generator = media.peek_first(generator)
self.assertEqual(first, 1)
self.assert_all_equal(tuple(generator), [1, 2, 3, 4])
def test_peek_first_on_container(self):
container = [1, 2, 3, 4]
first, container = media.peek_first(container)
self.assertEqual(first, 1)
self.assert_all_equal(tuple(container), [1, 2, 3, 4])
def test_run_string(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
media.run('echo "$((17 + 22))"')
self.assertEqual(mock_stdout.getvalue(), '39\n')
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
media.run('/bin/bash -c "echo $((17 + 22))"')
self.assertEqual(mock_stdout.getvalue(), '39\n')
with self.assertRaisesRegex(RuntimeError, 'failed with code 3'):
media.run('exit 3')
def test_run_args_sequence(self):
with mock.patch('sys.stdout', io.StringIO()) as mock_stdout:
media.run(['/bin/bash', '-c', 'echo $((17 + 22))'])
self.assertEqual(mock_stdout.getvalue(), '39\n')
def test_to_type(self):
def check(src, dtype, expected):
output = media.to_type(src, dtype)
self.assertEqual(output.dtype.type, np.dtype(dtype).type)
self.assert_all_equal(output, expected)
max32 = 4_294_967_295
b = np.array([False, True, False])
self.assertEqual(b.dtype, bool)
check(b, np.uint8, [0, 255, 0])
check(b, np.uint16, [0, 65535, 0])
check(b, np.uint32, [0, max32, 0])
check(b, np.float32, [0.0, 1.0, 0.0])
check(b, np.float64, [0.0, 1.0, 0.0])
u8 = np.array([3, 255], dtype=np.uint8)
check(u8, 'uint8', [3, 255])
check(u8, 'uint16', [int(3 / 255 * 65535 + 0.5), 65535])
check(u8, 'uint32', [int(3 / 255 * max32 + 0.5), max32])
check(u8, 'float32', [np.float32(3 / 255), 1.0])
check(u8, 'float64', [3 / 255, 1.0])
u16 = np.array([57, 65535], dtype=np.uint16)
check(u16, np.uint8, [0, 255])
check(u16, np.uint16, [57, 65535])
check(u16, np.uint32, [int(57 / 65535 * max32 + 0.5), max32])
check(u16, np.float32, [np.float32(57 / 65535), 1.0])
check(u16, 'float', [57 / 65535, 1.0])
u32 = np.array([100_000, max32], dtype=np.uint32)
check(u32, 'uint8', [0, 255])
check(u32, 'uint16', [2, 65535])
check(u32, 'uint32', u32)
check(u32, 'float32', [np.float32(100_000 / max32), 1.0])
check(u32, 'float64', [100_000 / max32, 1.0])
f32 = np.array([0.0, 0.4, 1.0], dtype=np.float32)
check(f32, np.uint8, [0, int(np.float32(0.4) * 255 + 0.5), 255])
check(f32, np.uint16, [0, int(np.float32(0.4) * 65535 + 0.5), 65535])
check(f32, np.uint32, [0, int(np.float32(0.4) * max32 + 0.5), max32])
check(f32, np.float32, [0.0, np.float32(0.4), 1.0])
check(f32, np.float64, [0.0, np.float32(0.4), 1.0])
f64 = np.array([0.0, 0.4, 1.0], dtype=np.float64)
check(f64, np.uint8, [0, int(0.4 * 255 + 0.5), 255])
check(f64, np.uint16, [0, int(0.4 * 65535 + 0.5), 65535])
check(f64, np.uint32, [0, int(0.4 * max32 + 0.5), max32])
check(f64, np.float32, [0.0, np.float32(0.4), 1.0])
check(f64, np.float, [0.0, 0.4, 1.0])
# An array with data type 'uint64' is possible, but it is awkward to process
# exactly because it requires more than float64 intermediate precision.
def test_to_type_extreme_value(self):
types = ['uint8', 'uint16', 'uint32', 'uint64', 'float32', 'float64']
max_of_type = dict(
bool=True,
uint8=255,
uint16=65535,
uint32=4294967295,
uint64=18446744073709551615,
float32=1.0,
float64=1.0)
for src_dtype in types + ['bool']:
for dst_dtype in types:
for shape in [(), (1,), (2, 2)]:
src_value = max_of_type[src_dtype]
src = np.full(shape, src_value, dtype=src_dtype)
dst = media.to_type(src, dst_dtype)
dst_value = dst.flat[0]
expected_value = max_of_type[dst_dtype]
msg = f'{src_dtype} {dst_dtype} {shape} {src} {dst}'
self.assertEqual(dst.dtype, dst_dtype, msg=msg)
self.assertEqual(dst.shape, src.shape, msg=msg)
self.assertEqual(dst_value, expected_value, msg=msg)
def test_to_float01(self):
self.assert_all_close(
media.to_float01(np.array([0, 1, 128, 254, 255], dtype=np.uint8)),
[0 / 255, 1 / 255, 128 / 255, 254 / 255, 255 / 255])
self.assert_all_close(
media.to_float01(np.array([0, 1, 128, 254, 65535], dtype=np.uint16)),
[0 / 65535, 1 / 65535, 128 / 65535, 254 / 65535, 65535 / 65535])
a = np.array([0.0, 0.1, 0.5, 0.9, 1.0])
self.assertIs(media.to_float01(a), a)
a = np.array([0.0, 0.1, 0.5, 0.9, 1.0], dtype=np.float32)
self.assertIs(media.to_float01(a), a)
def test_to_uint8(self):
self.assert_all_equal(
media.to_uint8(np.array([0, 1, 128, 254, 255], dtype=np.uint8)),
[0, 1, 128, 254, 255])
self.assert_all_close(
media.to_uint8([-0.2, 0.0, 0.1, 0.5, 0.9, 1.0, 1.1]), [
0, 0,
int(0.1 * 255 + 0.5),
int(0.5 * 255 + 0.5),
int(0.9 * 255 + 0.5), 255, 255
])
def test_color_ramp_float(self):
shape = (2, 3)
image = media.color_ramp(shape=shape)
self.assert_all_equal(image.shape[:2], shape)
self.assert_all_close(image, [
[
[0.5 / shape[0], 0.5 / shape[1], 0.0],
[0.5 / shape[0], 1.5 / shape[1], 0.0],
[0.5 / shape[0], 2.5 / shape[1], 0.0],
],
[
[1.5 / shape[0], 0.5 / shape[1], 0.0],
[1.5 / shape[0], 1.5 / shape[1], 0.0],
[1.5 / shape[0], 2.5 / shape[1], 0.0],
],
])
def test_color_ramp_uint8(self):
shape = (1, 3)
image = media.color_ramp(shape=shape, dtype=np.uint8)
self.assert_all_equal(image.shape[:2], shape)
expected = [[
[int(0.5 / shape[0] * 255 + 0.5),
int(0.5 / shape[1] * 255 + 0.5), 0],
[int(0.5 / shape[0] * 255 + 0.5),
int(1.5 / shape[1] * 255 + 0.5), 0],
[int(0.5 / shape[0] * 255 + 0.5),
int(2.5 / shape[1] * 255 + 0.5), 0],
]]
self.assert_all_equal(image, expected)
@parameterized.parameters(np.uint8, 'uint8', 'float32')
def test_moving_circle(self, dtype):
video = media.moving_circle(shape=(256, 256), num_images=10, dtype=dtype)
self.assert_all_equal(video.shape, (10, 256, 256, 3))
mean_image = np.mean(video, axis=0)
expected_mean = 0.329926 if dtype == 'float32' else 84.295
self.assertAlmostEqual(np.std(mean_image), expected_mean, delta=0.001)
def test_rgb_yuv_roundtrip(self):
image = np.array(
[[0, 0, 0], [255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0],
[0, 255, 255], [255, 0, 255], [255, 255, 255], [128, 128, 128]],
dtype=np.uint8)
new = media.to_uint8(media.rgb_from_yuv(media.yuv_from_rgb(image)))
self.assert_all_close(image, new, atol=1)
def test_rgb_ycbcr_roundtrip(self):
image = np.array(
[[0, 0, 0], [255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 255, 0],
[0, 255, 255], [255, 0, 255], [255, 255, 255], [128, 128, 128]],
dtype=np.uint8)
new = media.to_uint8(media.rgb_from_ycbcr(media.ycbcr_from_rgb(image)))
self.assert_all_close(image, new, atol=1)
def test_pil_image(self):
im = media._pil_image(
np.array([[[10, 11, 12], [40, 41, 42]]], dtype=np.uint8))
self.assertEqual(im.width, 2)
self.assertEqual(im.height, 1)
self.assertEqual(im.mode, 'RGB')
a = np.array(im)
self.assert_all_equal(a.shape, (1, 2, 3))
self.assert_all_equal(a, [[[10, 11, 12], [40, 41, 42]]])
@parameterized.parameters(zip(_TEST_TYPES, _TEST_SHAPES1))
def test_resize_image(self, str_dtype, shape):
dtype = np.dtype(str_dtype)
def create_image(shape):
image = media.color_ramp(shape[:2], dtype=dtype)
return image.mean(
axis=-1).astype(dtype) if len(shape) == 2 else image[..., :shape[2]]
image = create_image(shape)
self.assertEqual(image.dtype, dtype)
new_shape = (17, 19) + shape[2:]
new_image = media.resize_image(image, new_shape[:2])
self.assertEqual(new_image.dtype, dtype)
expected_image = create_image(new_shape)
atol = 0.0 if new_shape == shape else 0.015
self.assert_all_close(
media.to_float01(new_image),
media.to_float01(expected_image),
atol=atol)
@parameterized.parameters(zip(_TEST_TYPES, _TEST_SHAPES2))
def test_resize_video(self, str_dtype, shape):
dtype = np.dtype(str_dtype)
def create_video(shape, num_images=5):
video = media.moving_circle(shape[:2], num_images, dtype=dtype)
return video.mean(
axis=-1).astype(dtype) if len(shape) == 2 else video[..., :shape[2]]
video = create_video(shape)
self.assertEqual(video.dtype, dtype)
new_shape = (17, 19) + shape[2:]
new_video = media.resize_video(video, new_shape[:2])
self.assertEqual(new_video.dtype, dtype)
expected_video = create_video(new_shape)
self._check_similar(
media.to_float01(new_video),
media.to_float01(expected_video),
max_rms=(0.0 if new_shape == shape else 0.07))
def test_read_contents(self):
data = b'Test data'
temp_file = self.create_tempfile(content=data)
new_data = media.read_contents(temp_file)
self.assertEqual(new_data, data)
new_data = media.read_contents(pathlib.Path(temp_file))
self.assertEqual(new_data, data)
def test_read_via_local_file_on_local_file(self):
with tempfile.TemporaryDirectory() as directory_name:
filename = os.path.join(directory_name, 'file')
with open(filename, 'w') as f:
f.write('text')
with media.read_via_local_file(filename) as local_filename:
self.assertEqual(local_filename, filename)
def test_write_via_local_file_on_local_file(self):
with tempfile.TemporaryDirectory() as directory_name:
filename = os.path.join(directory_name, 'file')
with media.write_via_local_file(filename) as local_filename:
self.assertEqual(local_filename, filename)
@parameterized.parameters('uint8', 'uint16')
def test_image_write_read_roundtrip(self, dtype):
image = media.color_ramp((27, 63), dtype=dtype)
if dtype == 'uint16':
# Unfortunately PIL supports only single-channel 16-bit images for now.
image = image[..., 0]
with tempfile.TemporaryDirectory() as directory_name:
path = pathlib.Path(directory_name) / 'test.png'
media.write_image(path, image)
new_image = media.read_image(path, dtype=dtype)
self.assert_all_equal(image.shape, new_image.shape)
self.assertEqual(image.dtype, new_image.dtype)
self.assert_all_equal(image, new_image)
def test_write_image(self):
image = media.color_ramp(shape=(500, 500), dtype=np.uint8)
np.random.seed(1)
image += np.random.randint(0, 10, size=image.shape, dtype=np.uint8)
def get_num_bytes(**kwargs):
with tempfile.TemporaryDirectory() as directory_name:
filename = os.path.join(directory_name, 'test.png')
media.write_image(filename, image, **kwargs)
return os.path.getsize(filename)
self.assertAlmostEqual(get_num_bytes(), 383588, delta=300)
self.assertAlmostEqual(get_num_bytes(optimize=True), 382909, delta=300)
def test_to_rgb(self):
a = np.array([[-0.2, 0.0, 0.2, 0.8, 1.0, 1.2]])
gray_color = lambda x: [x, x, x]
self.assert_all_close(
media.to_rgb(a), [[
gray_color(0.0 / 1.4),
gray_color(0.2 / 1.4),
gray_color(0.4 / 1.4),
gray_color(1.0 / 1.4),
gray_color(1.2 / 1.4),
gray_color(1.4 / 1.4),
]],
atol=0.002)
self.assert_all_close(
media.to_rgb(a, vmin=0.0, vmax=1.0), [[
gray_color(0.0),
gray_color(0.0),
gray_color(0.2),
gray_color(0.8),
gray_color(1.0),
gray_color(1.0),
]],
atol=0.002)
a = np.array([-0.4, 0.0, 0.2])
self.assert_all_close(
media.to_rgb(a, vmin=-1.0, vmax=1.0, cmap='bwr'),
[[0.596078, 0.596078, 1.0], [1.0, 0.996078, 0.996078], [1.0, 0.8, 0.8]],
atol=0.002)
@parameterized.parameters('uint8', 'uint16')
def test_compress_decompress_image_roundtrip(self, dtype):
image = media.color_ramp((27, 63), dtype=dtype)
if dtype == 'uint16':
# Unfortunately PIL supports only single-channel 16-bit images for now.
image = image[..., 0]
data = media.compress_image(image)
new_image = media.decompress_image(data, dtype=dtype)
self.assertEqual(image.shape, new_image.shape)
self.assertEqual(image.dtype, new_image.dtype)
self.assert_all_equal(image, new_image)
def test_show_image(self):
htmls = []
with mock.patch('IPython.display.display', htmls.append):
media.show_image(media.color_ramp())
self.assertLen(htmls, 1)
self.assertIsInstance(htmls[0], IPython.display.HTML)
self.assertLen(re.findall('(?s)<table', htmls[0].data), 1)
self.assertRegex(htmls[0].data, '(?s)<img width=[^<>]*/>')
self.assertLen(re.findall('(?s)<img', htmls[0].data), 1)
def test_show_save_image(self):
with tempfile.TemporaryDirectory() as directory_name:
with media.show_save.to_dir(directory_name):
with mock.patch('IPython.display.display'):
media.show_images({'ramp': media.color_ramp((128, 128))})
filename = os.path.join(directory_name, 'ramp.png')
self.assertTrue(os.path.isfile(filename))
self.assertBetween(os.path.getsize(filename), 200, 1000)
def test_show_image_downsampled(self):
np.random.seed(1)
image = np.random.rand(256, 256, 3)
for downsample in (False, True):
htmls = []
with mock.patch('IPython.display.display', htmls.append):
media.show_image(image, height=64, downsample=downsample)
size_min_max = (10_000, 20_000) if downsample else (200_000, 300_000)
self.assertBetween(len(htmls[0].data), *size_min_max)
def test_show_images_list(self):
htmls = []
with mock.patch('IPython.display.display', htmls.append):
media.show_images([media.color_ramp()] * 2)
self.assertLen(htmls, 1)
self.assertIsInstance(htmls[0], IPython.display.HTML)
self.assertLen(re.findall('(?s)<table', htmls[0].data), 1)
self.assertLen(re.findall('(?s)<img', htmls[0].data), 2)
def test_show_images_dict(self):
htmls = []
with mock.patch('IPython.display.display', htmls.append):
media.show_images({
'title1': media.color_ramp(),
'title2': media.color_ramp()
})
self.assertLen(htmls, 1)
self.assertIsInstance(htmls[0], IPython.display.HTML)
self.assertLen(re.findall('(?s)<table', htmls[0].data), 1)
self.assertRegex(htmls[0].data, '(?s)title1.*<img .*title2.*<img ')
self.assertLen(re.findall('(?s)<img', htmls[0].data), 2)
def test_show_images_over_multiple_rows(self):
htmls = []
with mock.patch('IPython.display.display', htmls.append):
media.show_images([media.color_ramp()] * 5, columns=2)
self.assertLen(htmls, 1)
self.assertIsInstance(htmls[0], IPython.display.HTML)
self.assertLen(re.findall('(?s)<table', htmls[0].data), 3)
self.assertLen(re.findall('(?s)<img', htmls[0].data), 5)
@parameterized.parameters(False, True)
def test_video_non_streaming_write_read_roundtrip(self, use_generator):
shape = (240, 320)
num_images = 10
fps = 40
qp = 20
original_video = media.to_uint8(media.moving_circle(shape, num_images))
video = (
image for image in original_video) if use_generator else original_video
with tempfile.TemporaryDirectory() as directory_name:
filename = os.path.join(directory_name, 'test.mp4')
media.write_video(filename, video, fps=fps, qp=qp)
new_video = media.read_video(filename)
self._check_similar(original_video, new_video, 3.0)
def test_video_streaming_write_read_roundtrip(self):
shape = (62, 744)
num_images = 20
fps = 120
bps = 400_000
with tempfile.TemporaryDirectory() as directory_name:
filename = os.path.join(directory_name, 'test.mp4')
images = []
with media.VideoWriter(filename, shape, fps=fps, bps=bps) as writer:
for image in media.moving_circle(shape, num_images):
image_uint8 = media.to_uint8(image)
writer.add_image(image_uint8)
images.append(image_uint8)
with media.VideoReader(filename) as reader:
self.assertEqual(reader.num_images, num_images)
self.assert_all_equal(reader.shape, shape)
self.assertEqual(reader.fps, fps)
self.assertBetween(reader.bps, 100_000, 500_000)
self.assertEqual(reader.metadata.num_images, reader.num_images)
self.assertEqual(reader.metadata.shape, reader.shape)
self.assertEqual(reader.metadata.fps, reader.fps)
self.assertEqual(reader.metadata.bps, reader.bps)
for index, new_image in enumerate(reader):
self._check_similar(images[index], new_image, 7.0, f'index={index}')
def test_video_streaming_read_write(self):
shape = (400, 400)
num_images = 4
fps = 60
bps = 40_000_000
video = media.to_uint8(media.moving_circle(shape, num_images))
with tempfile.TemporaryDirectory() as directory_name:
filename1 = os.path.join(directory_name, 'test1.mp4')
filename2 = os.path.join(directory_name, 'test2.mp4')
media.write_video(filename1, video, fps=fps, bps=bps)
with media.VideoReader(filename1) as reader:
with media.VideoWriter(
filename2, reader.shape, fps=reader.fps, bps=reader.bps,
encoded_format='yuv420p') as writer:
for image in reader:
writer.add_image(image)
new_video = media.read_video(filename2)
self._check_similar(video, new_video, 3.0)
def test_video_read_write_10bit(self):
shape = (256, 256)
num_images = 4
fps = 60
bps = 40_000_000
horizontal_gray_ramp = media.to_uint(
np.indices(shape)[1] / shape[1], np.uint16)
video = np.broadcast_to(horizontal_gray_ramp, (num_images, *shape))
with tempfile.TemporaryDirectory() as directory_name:
filename = os.path.join(directory_name, 'test3.mp4')
media.write_video(
filename, video, fps=fps, bps=bps, encoded_format='yuv420p10le')
new_video = media.read_video(
filename, dtype=np.uint16, output_format='gray')
self.assertEqual(new_video.dtype, np.uint16)
value_1_of_10bit_encoded_in_16bits = 64
self._check_similar(
video, new_video, max_rms=value_1_of_10bit_encoded_in_16bits * 0.8)
def test_video_read_write_vp9(self):
video = media.moving_circle((256, 256), num_images=4, dtype=np.uint8)
fps = 60
bps = 40_000_000
with tempfile.TemporaryDirectory() as directory_name:
path = pathlib.Path(directory_name) / 'test4.mp4'
media.write_video(path, video, fps=fps, bps=bps, codec='vp9')
new_video = media.read_video(path)
self.assertEqual(new_video.dtype, np.uint8)
self._check_similar(video, new_video, max_rms=5.0)
def test_video_read_write_odd_dimensions(self):
video = media.moving_circle((35, 97), num_images=4, dtype=np.uint8)
fps = 60
bps = 40_000_000
with tempfile.TemporaryDirectory() as directory_name:
path = pathlib.Path(directory_name) / 'test5.mp4'
media.write_video(path, video, fps=fps, bps=bps)
new_video = media.read_video(path)
self.assertEqual(new_video.dtype, np.uint8)
self._check_similar(video, new_video, max_rms=5.0)
def test_compress_decompress_video_roundtrip(self):
video = media.moving_circle((28, 66), num_images=10, dtype=np.uint8)
data = media.compress_video(video)
new_video = media.decompress_video(data)
self.assertEqual(video.shape, new_video.shape)
self.assertEqual(video.dtype, new_video.dtype)
self._check_similar(video, new_video, max_rms=8.0)
def test_html_from_compressed_video(self):
shape = (240, 320)
video = media.moving_circle(shape, 10)
text = media.html_from_compressed_video(
media.compress_video(video), shape[1], shape[0])
self.assertGreater(len(text), 2_000)
self.assertContainsInOrder(
['<video', '<source src="data', 'type="video/mp4"/>', '</video>'], text)
def test_show_video(self):
htmls = []
with mock.patch('IPython.display.display', htmls.append):
media.show_video(media.moving_circle())
self.assertLen(htmls, 1)
self.assertIsInstance(htmls[0], IPython.display.HTML)
self.assertLen(re.findall('(?s)<video', htmls[0].data), 1)
self.assertRegex(htmls[0].data, '(?s)<video .*>.*</video>')
def test_show_video_gif(self):
htmls = []
with mock.patch('IPython.display.display', htmls.append):
media.show_video(media.moving_circle(), codec='gif')
self.assertLen(htmls, 1)
self.assertIsInstance(htmls[0], IPython.display.HTML)
self.assertContainsInOrder(['<img', 'src="data:image/gif'], htmls[0].data)
def test_show_save_video(self):
video = media.moving_circle((32, 32), num_images=10)
with tempfile.TemporaryDirectory() as directory_name:
directory_path = pathlib.Path(directory_name)
with media.show_save.to_dir(directory_path):
with mock.patch('IPython.display.display'):
media.show_videos({'video0': video, 'video1': video})
for i in range(2):
path = directory_path / f'video{i}.mp4'
self.assertTrue(path.is_file())
self.assertBetween(path.stat().st_size, 1500, 3000)
def test_show_video_downsampled(self):
np.random.seed(1)
video = np.random.rand(5, 64, 128, 3)
for downsample in (False, True):
htmls = []
with mock.patch('IPython.display.display', htmls.append):
media.show_video(video, height=32, downsample=downsample)
size_min_max = (8_000, 15_000) if downsample else (40_000, 60_000)
self.assertBetween(len(htmls[0].data), *size_min_max)
def test_show_videos_list(self):
htmls = []
with mock.patch('IPython.display.display', htmls.append):
media.show_videos([media.moving_circle()] * 2)
self.assertLen(htmls, 1)
self.assertIsInstance(htmls[0], IPython.display.HTML)
self.assertLen(re.findall('(?s)<table', htmls[0].data), 1)
self.assertLen(re.findall('(?s)<video', htmls[0].data), 2)
def test_show_videos_dict(self):
htmls = []
with mock.patch('IPython.display.display', htmls.append):
media.show_videos({
'title1': media.moving_circle(),
'title2': media.moving_circle(),
})
self.assertLen(htmls, 1)
self.assertIsInstance(htmls[0], IPython.display.HTML)
self.assertLen(re.findall('(?s)<table', htmls[0].data), 1)
self.assertRegex(htmls[0].data, '(?s)title1.*<video.*title2.*<video')
self.assertLen(re.findall('(?s)<video', htmls[0].data), 2)
def test_show_videos_over_multiple_rows(self):
htmls = []
with mock.patch('IPython.display.display', htmls.append):
media.show_videos([media.moving_circle()] * 12, columns=3)
self.assertLen(htmls, 1)
self.assertIsInstance(htmls[0], IPython.display.HTML)
self.assertLen(re.findall('(?s)<table', htmls[0].data), 4)
self.assertLen(re.findall('(?s)<video', htmls[0].data), 12)
if __name__ == '__main__':
absltest.main()
|
{"hexsha": "b15cee1f520fd65cc9207384183079ad04c5c6c1", "size": 26574, "ext": "py", "lang": "Python", "max_stars_repo_path": "mediapy_test.py", "max_stars_repo_name": "hhoppe/mediapy", "max_stars_repo_head_hexsha": "8a31181da5eab219bde30b1033f8813b6dc3b396", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mediapy_test.py", "max_issues_repo_name": "hhoppe/mediapy", "max_issues_repo_head_hexsha": "8a31181da5eab219bde30b1033f8813b6dc3b396", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mediapy_test.py", "max_forks_repo_name": "hhoppe/mediapy", "max_forks_repo_head_hexsha": "8a31181da5eab219bde30b1033f8813b6dc3b396", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.0210843373, "max_line_length": 80, "alphanum_fraction": 0.6470234063, "include": true, "reason": "import numpy", "num_tokens": 7880}
|
import unittest
import numpy as np
from polynomials_on_simplices.linalg.vector_space_projection import (
subspace_projection_map, vector_oblique_projection_2, vector_projection, vector_rejection)
class TestVectorProjection(unittest.TestCase):
def test_projection(self):
a = np.array([1.3, 1.5, 0])
b = np.array([1, 0, 0])
self.assertTrue(np.array_equal(vector_projection(a, b), np.array([1.3, 0.0, 0.0])))
self.assertTrue(np.array_equal(vector_rejection(a, b), np.array([0.0, 1.5, 0.0])))
def test_oblique_projection(self):
a = np.array([1.0, 1.0])
b = np.array([2.0, 0.0])
c = np.array([1.0, 1.0])
self.assertTrue(np.array_equal(vector_oblique_projection_2(a, b, c), np.array([0.0, 0.0])))
a = np.array([1.0, 1.0])
b = np.array([2.0, 0.0])
c = np.array([1.0, 2.0])
self.assertTrue(np.array_equal(vector_oblique_projection_2(a, b, c), np.array([0.5, 0.0])))
class TestSubspaceProjection(unittest.TestCase):
def test_1d_in_2d(self):
# Test projection onto a 1-dimensional subspace in 2D
origin = np.random.rand(2)
basis = np.random.random_sample((2, 1))
point = np.random.rand(2)
point_projected = subspace_projection_map(basis, origin)(point)
# The difference between the point and it's projection should be orthogonal to all
# basis vectors spanning the subspace
v = point_projected - point
assert np.dot(v, basis[:, 0]) < 1e-12
def test_1d_in_3d(self):
# Test projection onto a 1-dimensional subspace in 3D
origin = np.random.rand(3)
basis = np.random.random_sample((3, 1))
point = np.random.rand(3)
point_projected = subspace_projection_map(basis, origin)(point)
# The difference between the point and it's projection should be orthogonal to all
# basis vectors spanning the subspace
v = point_projected - point
assert np.dot(v, basis[:, 0]) < 1e-12
def test_2d_in_3d(self):
# Test projection onto a 2-dimensional subspace in 3D
origin = np.random.rand(3)
basis = np.random.random_sample((3, 2))
point = np.random.rand(3)
point_projected = subspace_projection_map(basis, origin)(point)
# The difference between the point and it's projection should be orthogonal to all
# basis vectors spanning the subspace
v = point_projected - point
assert np.dot(v, basis[:, 0]) < 1e-12
assert np.dot(v, basis[:, 1]) < 1e-12
def test_3d_in_5d(self):
# Test projection onto a 3-dimensional subspace in 5D
origin = np.random.rand(5)
basis = np.random.random_sample((5, 3))
point = np.random.rand(5)
point_projected = subspace_projection_map(basis, origin)(point)
# The difference between the point and it's projection should be orthogonal to all
# basis vectors spanning the subspace
v = point_projected - point
assert np.dot(v, basis[:, 0]) < 1e-12
assert np.dot(v, basis[:, 1]) < 1e-12
assert np.dot(v, basis[:, 2]) < 1e-12
if __name__ == '__main__':
unittest.main()
|
{"hexsha": "92e56318d1000f06cd15751d94643b2873edcfe8", "size": 3213, "ext": "py", "lang": "Python", "max_stars_repo_path": "polynomials_on_simplices/linalg/test/vector_space_projection_test.py", "max_stars_repo_name": "FAndersson/polynomials_on_simplices", "max_stars_repo_head_hexsha": "f015a4772c817bfa99b0d6b726667a38a174b064", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-17T11:41:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-17T11:41:21.000Z", "max_issues_repo_path": "polynomials_on_simplices/linalg/test/vector_space_projection_test.py", "max_issues_repo_name": "FAndersson/polynomials_on_simplices", "max_issues_repo_head_hexsha": "f015a4772c817bfa99b0d6b726667a38a174b064", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "polynomials_on_simplices/linalg/test/vector_space_projection_test.py", "max_forks_repo_name": "FAndersson/polynomials_on_simplices", "max_forks_repo_head_hexsha": "f015a4772c817bfa99b0d6b726667a38a174b064", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7108433735, "max_line_length": 99, "alphanum_fraction": 0.6374105198, "include": true, "reason": "import numpy", "num_tokens": 892}
|
\input{header.tex}
%\setlength{\headsep}{-10pt}
\setlength{\parskip}{0.2em}
%\setlength{\textheight}{11 in}
\setlength{\skip\footins}{20pt}
% opening
\title{Plotting large datasets, Part 3: Advanced experiments}
\author{Colin Leach, March 2020}
\date{\vspace{-3ex}}
\hyphenpenalty=1000
\begin{document}
\maketitle
This should really be called ``Things I don't understand yet''. It won't be much use for 400B work, but I've posted it for the tiny minority who might be interested.
Getting away from matplotlib, there are some newer\footnote{In other words, typically less finished and poorly documented} options which use the power of modern graphics cards to handle large numbers of points. More powerful, steeper learning curve.
\section{datashader}
Websites: \url{https://github.com/holoviz/datashader} and \url{https://datashader.org}
This needs the data to be arranged columnwise in a (single) pandas dataframe; fairly typical for modern plotting packages. Used in isolation, datashader will make a 2D image. To add axes, labels, etc, it needs to be hooked into a suitable browser-based package such as Bokeh (usually) or Plotly (under development). This can mean a LOT of extra packages to install from conda or pip and lots of potential conflicts\footnote{The recent release of Bokeh 2.0 broke datashader 0.10.0. For now, use Bokeh 1.4.0 and hope datashader 0.13 is released soon. The developer says he's working on it while in self-isolation after flying home from Covid-hit Spain.}
It was a fight for me to get something working. Documentation is extensive, but sometimes out of date and actively misleading (it took over an hour to change the colormap at about the 20th attempt).
The best result so far starts with a bunch of imports:
\lstdefinestyle{py}{
belowcaptionskip=1\baselineskip,
breaklines=true,
frame=L,
xleftmargin=\parindent,
language=Python,
showstringspaces=false,
basicstyle=\footnotesize\ttfamily,
keywordstyle=\bfseries\color{green!40!black},
commentstyle=\itshape\color{purple!40!black},
identifierstyle=\color{blue},
stringstyle=\color{orange},
}
\lstset{style=py}
\begin{lstlisting}
import pandas as pd
import numpy as np
from matplotlib import cm # use the same colormap as other examples
import holoviews as hv
import holoviews.operation.datashader as hd
hv.extension("bokeh")
hv.output(backend="bokeh")
import datashader as ds
\end{lstlisting}
Then massage the data into the right format and plot it:
\begin{lstlisting}
# convert np.array to pandas dataframe
points_df = pd.DataFrame(disks.T, columns=('x (kpc)','y (kpc)','z (kpc)'))
# convert df to Holoviews points
points = hv.Points(points_df)
# plot in Bokeh
hd.datashade(points, cmap=cm.magma).opts(height=600, width=600)
\end{lstlisting}
The results aren't terrible (see below), but we've still to make the cmap logarithmic, fix the font sizes and add a title. All possible, but perhaps another day...
Meanwhile, the best feature is it lets you (in Jupyter) pan with mouse drags and zoom with the mouse wheel or box selection. Matplotlib can't do that so easily (though the ``\%matplotlib notebook'' IPython magic is an option worth knowing about).
{\centering \includegraphics[scale=0.65]{bokeh_datashade} \par}
\section{ipyvolume}
Works natively with 3D data. Only in the browser as it relies on WebGL rendering and Javascript.
Websites: \url{https://github.com/maartenbreddels/ipyvolume} and \url{https://ipyvolume.readthedocs.io/en/latest/}
Spectacular when the developer (Maarten Breddels, a Dutch astronomer) demonstrates it at conferences, not so easy for the rest of us. I totally failed with this about a year ago, still psyching myself up for another attempt. Anyone else tried it?
\section{All things OpenGL}
Now we're at the heavy end of the options. With pyqt5 + (pyopengl or vispy), an experienced graphics programmer with unlimited time can do virtually anything. Though the same person would probably ignore Python and use C++ with GLSL; probably wise, because the C++ libraries at least have excellent documentation.
I played with this a bit over spring break. At the current rate of progress, I may have a crude prototype working by the end of the Spring 2021 semester (but no promises).
\end{document}
|
{"hexsha": "100f3c07efbc37d1e98c40bc031bbbf4d8595871", "size": 4256, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "animations/howto/advanced_experiments.tex", "max_stars_repo_name": "colinleach/400B_Leach", "max_stars_repo_head_hexsha": "656abe04237d7a8de2cf56e9bfe986c333c62739", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-16T12:46:02.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-16T12:46:02.000Z", "max_issues_repo_path": "animations/howto/advanced_experiments.tex", "max_issues_repo_name": "colinleach/400B_Leach", "max_issues_repo_head_hexsha": "656abe04237d7a8de2cf56e9bfe986c333c62739", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "animations/howto/advanced_experiments.tex", "max_forks_repo_name": "colinleach/400B_Leach", "max_forks_repo_head_hexsha": "656abe04237d7a8de2cf56e9bfe986c333c62739", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.8, "max_line_length": 651, "alphanum_fraction": 0.7756109023, "num_tokens": 1094}
|
"""
This module provies a RHESSI `~sunpy.timeseries.TimeSeries` source.
"""
import datetime
import itertools
from collections import OrderedDict
import matplotlib.dates
import matplotlib.pyplot as plt
import numpy as np
from pandas import DataFrame
import astropy.units as u
from astropy.time import TimeDelta
import sunpy.io
from sunpy.time import parse_time
from sunpy.timeseries.timeseriesbase import GenericTimeSeries
from sunpy.util.metadata import MetaDict
from sunpy.visualization import peek_show
__all__ = ['RHESSISummaryTimeSeries']
def uncompress_countrate(compressed_countrate):
"""
Convert the compressed count rate inside of observing summary file from a
compressed byte to a true count rate.
Parameters
----------
compressed_countrate : `byte` array
A compressed count rate returned from an observing summary file.
References
----------
`Hsi_obs_summ_decompress.pro <https://hesperia.gsfc.nasa.gov/ssw/hessi/idl/qlook_archive/hsi_obs_summ_decompress.pro>`_
"""
# Ensure uncompressed counts are between 0 and 255
if (compressed_countrate.min() < 0) or (compressed_countrate.max() > 255):
raise ValueError(
f'Exepected uncompressed counts {compressed_countrate} to in range 0-255')
# TODO Must be a better way than creating entire lookup table on each call
ll = np.arange(0, 16, 1)
lkup = np.zeros(256, dtype='int')
_sum = 0
for i in range(0, 16):
lkup[16 * i:16 * (i + 1)] = ll * 2 ** i + _sum
if i < 15:
_sum = lkup[16 * (i + 1) - 1] + 2 ** i
return lkup[compressed_countrate]
def parse_observing_summary_hdulist(hdulist):
"""
Parse a RHESSI observation summary file.
Parameters
----------
hdulist : `list`
The HDU list from the fits file.
Returns
-------
out : `dict`
Returns a dictionary.
"""
header = hdulist[0].header
reference_time_ut = parse_time(hdulist[5].data.field('UT_REF')[0],
format='utime')
time_interval_sec = hdulist[5].data.field('TIME_INTV')[0]
# label_unit = fits[5].data.field('DIM1_UNIT')[0]
# labels = fits[5].data.field('DIM1_IDS')
labels = ['3 - 6 keV', '6 - 12 keV', '12 - 25 keV', '25 - 50 keV',
'50 - 100 keV', '100 - 300 keV', '300 - 800 keV',
'800 - 7000 keV', '7000 - 20000 keV']
# The data stored in the fits file are "compressed" countrates stored as
# one byte
compressed_countrate = np.array(hdulist[6].data.field('countrate'))
countrate = uncompress_countrate(compressed_countrate)
dim = np.array(countrate[:, 0]).size
time_array = parse_time(reference_time_ut) + \
TimeDelta(time_interval_sec * np.arange(dim) * u.second)
# TODO generate the labels for the dict automatically from labels
data = {'time': time_array, 'data': countrate, 'labels': labels}
return header, data
class RHESSISummaryTimeSeries(GenericTimeSeries):
"""
RHESSI X-ray Summary lightcurve TimeSeries.
The RHESSI mission consists of a single spin-stabilized spacecraft in a low-altitude orbit
inclined 38 degrees to the Earth's equator.
The only instrument on board is a set of 9 Germanium spectrometers with the ability to
obtain high fidelity solar spectra from X rays (down to 3 keV) to gamma rays (1 MeV).
Each spectrometer is coupled to a set of grids with different pitches which enable
fourier-style imaging as the spacecraft spins.
RHESSI provides summary lightcurves in the following passbands:
* 3 - 6 keV
* 6 - 12 keV
* 12 - 25 keV
* 25 - 50 keV
* 50 - 100 keV
* 100 - 300 keV
* 300 - 800 keV
* 800 - 7000 keV
* 7000 - 20000 keV
RHESSI was launched on 5th February 2002.
Examples
--------
>>> import sunpy.data.sample # doctest: +REMOTE_DATA
>>> import sunpy.timeseries
>>> rhessi = sunpy.timeseries.TimeSeries(sunpy.data.sample.RHESSI_TIMESERIES) # doctest: +REMOTE_DATA
>>> rhessi.peek() # doctest: +SKIP
References
----------
* `RHESSI Homepage. <https://hesperia.gsfc.nasa.gov/rhessi3/index.html>`_
* `Mission Paper. <https://doi.org/10.1023/A:1022428818870>`_
"""
# Class attribute used to specify the source class of the TimeSeries.
_source = 'rhessi'
def plot(self, axes=None, **kwargs):
"""
Plots RHESSI Count Rate light curve from a pandas dataframe.
Parameters
----------
axes : `matplotlib.axes.Axes`, optional
The axes on which to plot the TimeSeries. Defaults to current axes.
**kwargs : `dict`
Additional plot keyword arguments that are handed to `~matplotlib.axes.Axes.plot`
functions.
Returns
-------
`~matplotlib.axes.Axes`
The plot axes.
"""
self._validate_data_for_plotting()
if axes is None:
axes = plt.gca()
# These are a matplotlib version of the default RHESSI color cycle
default_colors = ('black', 'tab:pink', 'tab:green', 'tab:cyan',
'tab:olive', 'tab:red', 'tab:blue', 'tab:orange',
'tab:brown')
colors = kwargs.pop('colors', default_colors)
for color, (item, frame) in zip(itertools.cycle(colors),
self.to_dataframe().items()):
axes.plot(self.to_dataframe().index, frame.values,
color=color, label=item, **kwargs)
axes.set_yscale("log")
axes.set_xlabel(datetime.datetime.isoformat(self.to_dataframe().index[0])[0:10])
axes.set_ylabel('Count Rate s$^{-1}$ detector$^{-1}$')
axes.yaxis.grid(True, 'major')
axes.xaxis.grid(False, 'major')
axes.legend()
# TODO: display better tick labels for date range (e.g. 06/01 - 06/05)
formatter = matplotlib.dates.DateFormatter('%H:%M:%S')
axes.xaxis.set_major_formatter(formatter)
axes.fmt_xdata = matplotlib.dates.DateFormatter('%H:%M:%S')
return axes
@peek_show
def peek(self, title="RHESSI Observing Summary Count Rate", **kwargs):
"""
Displays the RHESSI Count Rate light curve by calling
`~sunpy.timeseries.sources.rhessi.RHESSISummaryTimeSeries.plot`.
.. plot::
import sunpy.data.sample
import sunpy.timeseries
rhessi = sunpy.timeseries.TimeSeries(sunpy.data.sample.RHESSI_TIMESERIES, source='RHESSI')
rhessi.peek()
Parameters
----------
title : `str`
The title of the plot. Defaults to "RHESSI Observing Summary Count Rate".
**kwargs : `dict`
Additional plot keyword arguments that are handed to `~matplotlib.axes.Axes.plot`
functions.
"""
fig, ax = plt.subplots()
axes = self.plot(axes=ax, **kwargs)
axes.set_title(title)
fig.autofmt_xdate()
return fig
@classmethod
def _parse_file(cls, filepath):
"""
Parses rhessi FITS data files to create TimeSeries.
Parameters
----------
filepath : `str`
The path to the file you want to parse.
"""
hdus = sunpy.io.read_file(filepath)
return cls._parse_hdus(hdus)
@classmethod
def _parse_hdus(cls, hdulist):
"""
Parses a RHESSI `astropy.io.fits.HDUList` from a FITS file.
Parameters
----------
hdulist : `astropy.io.fits.HDUList`
A HDU list.
"""
header, d = parse_observing_summary_hdulist(hdulist)
# The time of dict `d` is astropy.time, but dataframe can only take datetime
d['time'] = d['time'].datetime
header = MetaDict(OrderedDict(header))
data = DataFrame(d['data'], columns=d['labels'], index=d['time'])
# Add the units data
units = OrderedDict([('3 - 6 keV', u.ct / u.s / u.Unit('detector')),
('6 - 12 keV', u.ct / u.s / u.Unit('detector')),
('12 - 25 keV', u.ct / u.s / u.Unit('detector')),
('25 - 50 keV', u.ct / u.s / u.Unit('detector')),
('50 - 100 keV', u.ct / u.s / u.Unit('detector')),
('100 - 300 keV', u.ct / u.s / u.Unit('detector')),
('300 - 800 keV', u.ct / u.s / u.Unit('detector')),
('800 - 7000 keV', u.ct / u.s / u.Unit('detector')),
('7000 - 20000 keV', u.ct / u.s / u.Unit('detector'))])
# Todo: check units used. https://hesperia.gsfc.nasa.gov/ssw/hessi/doc/guides/hessi_data_access.htm
return data, header, units
@classmethod
def is_datasource_for(cls, **kwargs):
"""
Determines if the file corresponds to a RHESSI X-ray Summary
`~sunpy.timeseries.TimeSeries`.
"""
# Check if source is explicitly assigned
if 'source' in kwargs.keys():
if kwargs.get('source', ''):
return kwargs.get('source', '').lower().startswith(cls._source)
# Check if HDU defines the source instrument
if 'meta' in kwargs.keys():
return kwargs['meta'].get('telescop', '').startswith('HESSI')
|
{"hexsha": "8a189ea1e1100f4687b4079c137ffeb89fb85ad3", "size": 9401, "ext": "py", "lang": "Python", "max_stars_repo_path": "sunpy/timeseries/sources/rhessi.py", "max_stars_repo_name": "RhnSharma/sunpy", "max_stars_repo_head_hexsha": "03700193d287156ca1922eb27c4c2ad50040e53f", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 628, "max_stars_repo_stars_event_min_datetime": "2015-01-14T17:34:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T06:07:50.000Z", "max_issues_repo_path": "sunpy/timeseries/sources/rhessi.py", "max_issues_repo_name": "RhnSharma/sunpy", "max_issues_repo_head_hexsha": "03700193d287156ca1922eb27c4c2ad50040e53f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 3983, "max_issues_repo_issues_event_min_datetime": "2015-01-03T11:16:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:55:38.000Z", "max_forks_repo_path": "sunpy/timeseries/sources/rhessi.py", "max_forks_repo_name": "RhnSharma/sunpy", "max_forks_repo_head_hexsha": "03700193d287156ca1922eb27c4c2ad50040e53f", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 582, "max_forks_repo_forks_event_min_datetime": "2015-01-14T10:09:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T06:07:12.000Z", "avg_line_length": 35.6098484848, "max_line_length": 123, "alphanum_fraction": 0.5979151154, "include": true, "reason": "import numpy,import astropy,from astropy", "num_tokens": 2409}
|
import json
import keras
import numpy as np
import tensorflow as tf
import keras.backend as K
from .data.vocab import TextEncoder
from musket_text.bert.modeling import BertConfig
from .model import create_transformer
def load_openai_transformer(path: str = './openai/model/', use_attn_mask: bool = True,
use_one_embedding_dropout: bool = False, max_len: int = 512) -> keras.Model:
with open(path + 'params_shapes.json') as f:
shapes = json.load(f)
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(path + 'params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
init_params[0] = init_params[0][:min(512, max_len)]
# add special token embedding to token embedding
init_params[1] = np.concatenate(
(init_params[1], np.random.randn(TextEncoder.SPECIAL_COUNT, 768).astype(np.float32) * 0.02), axis=0)
init_params = [np.zeros((TextEncoder.NUM_SEGMENTS, 768)).astype(np.float32)] + init_params # segment embedding
model = create_transformer(embedding_dim=768, embedding_dropout=0.1, vocab_size=40478,
max_len=min(512, max_len), use_attn_mask=use_attn_mask, trainable_pos_embedding=True,
num_heads=12, num_layers=12, use_one_embedding_dropout=use_one_embedding_dropout,
d_hid=4 * 768, attention_dropout=0.1, residual_dropout=0.1)
model.set_weights(init_params)
return model
def load_google_bert(base_location: str = './google_bert/downloads/multilingual_L-12_H-768_A-12/',
use_attn_mask: bool = True, max_len: int = 512, verbose: bool = False,customInputs=None) -> keras.Model:
bert_config = BertConfig.from_json_file(base_location + 'bert_config.json')
init_checkpoint = base_location + 'bert_model.ckpt'
var_names = tf.train.list_variables(init_checkpoint)
check_point = tf.train.load_checkpoint(init_checkpoint)
vocab_size = bert_config.vocab_size #- TextEncoder.BERT_SPECIAL_COUNT - TextEncoder.BERT_UNUSED_COUNT
model = create_transformer(embedding_layer_norm=True, neg_inf=-10000.0, use_attn_mask=use_attn_mask,
vocab_size=vocab_size, accurate_gelu=True, layer_norm_epsilon=1e-12, max_len=max_len,
use_one_embedding_dropout=True, d_hid=bert_config.intermediate_size,
embedding_dim=bert_config.hidden_size, num_layers=bert_config.num_hidden_layers,
num_heads=bert_config.num_attention_heads,
residual_dropout=bert_config.hidden_dropout_prob,
attention_dropout=bert_config.attention_probs_dropout_prob,
customInputs=customInputs)
if K.backend() == 'tensorflow':
weights = [np.zeros(w.shape) for w in model.weights]
else:
weights = [np.zeros(w.get_value().shape) for w in model.weights]
for var_name, _ in var_names:
w_id = None
qkv = None
unsqueeze = False
parts = var_name.split('/')
first_vars_size = 5
if parts[1] == 'embeddings':
n = parts[-1]
if n == 'token_type_embeddings':
w_id = 0
elif n == 'position_embeddings':
w_id = 1
elif n == 'word_embeddings':
w_id = 2
elif n == 'gamma':
w_id = 3
elif n == 'beta':
w_id = 4
else:
raise ValueError()
elif parts[2].startswith('layer_'):
layer_number = int(parts[2][len('layer_'):])
if parts[3] == 'attention':
if parts[-1] == 'beta':
w_id = first_vars_size + layer_number * 12 + 5
elif parts[-1] == 'gamma':
w_id = first_vars_size + layer_number * 12 + 4
elif parts[-2] == 'dense':
if parts[-1] == 'bias':
w_id = first_vars_size + layer_number * 12 + 3
elif parts[-1] == 'kernel':
w_id = first_vars_size + layer_number * 12 + 2
unsqueeze = True
else:
raise ValueError()
elif parts[-2] == 'key' or parts[-2] == 'query' or parts[-2] == 'value':
w_id = first_vars_size + layer_number * 12 + (0 if parts[-1] == 'kernel' else 1)
unsqueeze = parts[-1] == 'kernel'
qkv = parts[-2][0]
else:
raise ValueError()
elif parts[3] == 'intermediate':
if parts[-1] == 'bias':
w_id = first_vars_size + layer_number * 12 + 7
elif parts[-1] == 'kernel':
w_id = first_vars_size + layer_number * 12 + 6
unsqueeze = True
else:
raise ValueError()
elif parts[3] == 'output':
if parts[-1] == 'beta':
w_id = first_vars_size + layer_number * 12 + 11
elif parts[-1] == 'gamma':
w_id = first_vars_size + layer_number * 12 + 10
elif parts[-1] == 'bias':
w_id = first_vars_size + layer_number * 12 + 9
elif parts[-1] == 'kernel':
w_id = first_vars_size + layer_number * 12 + 8
unsqueeze = True
else:
raise ValueError()
if w_id is not None and qkv is None:
if verbose:
print(var_name, ' -> ', model.weights[w_id].name)
if w_id == 1: # pos embedding
weights[w_id][:max_len, :] = check_point.get_tensor(var_name)[:max_len,
:] if not unsqueeze else check_point.get_tensor(var_name)[
None, :max_len, :]
elif w_id == 2: # word embedding
# ours: unk, [vocab], pad, msk(mask), bos(cls), del(use sep again), eos(sep)
# theirs: pad, 99 unused, unk, cls, sep, mask, [vocab]
saved = check_point.get_tensor(var_name) # vocab_size, emb_size
# weights[our_position] = saved[their_position]
weights[w_id] = saved
# weights[w_id][0] = saved[1 + TextEncoder.BERT_UNUSED_COUNT] # unk
# weights[w_id][1:vocab_size] = saved[-vocab_size + 1:]
# weights[w_id][vocab_size + TextEncoder.PAD_OFFSET] = saved[0]
# weights[w_id][vocab_size + TextEncoder.MSK_OFFSET] = saved[4 + TextEncoder.BERT_UNUSED_COUNT]
# weights[w_id][vocab_size + TextEncoder.BOS_OFFSET] = saved[2 + TextEncoder.BERT_UNUSED_COUNT]
# weights[w_id][vocab_size + TextEncoder.DEL_OFFSET] = saved[3 + TextEncoder.BERT_UNUSED_COUNT]
# weights[w_id][vocab_size + TextEncoder.EOS_OFFSET] = saved[3 + TextEncoder.BERT_UNUSED_COUNT]
else:
weights[w_id][:] = check_point.get_tensor(var_name) if not unsqueeze else \
check_point.get_tensor(var_name)[
None, ...]
elif w_id is not None:
if verbose:
print(var_name, ' -> ', model.weights[w_id].name, '::', qkv)
p = {'q': 0, 'k': 1, 'v': 2}[qkv]
if weights[w_id].ndim == 3:
dim_size = weights[w_id].shape[1]
weights[w_id][0, :, p * dim_size:(p + 1) * dim_size] = check_point.get_tensor(
var_name) if not unsqueeze else \
check_point.get_tensor(var_name)[
None, ...]
else:
dim_size = weights[w_id].shape[0] // 3
weights[w_id][p * dim_size:(p + 1) * dim_size] = check_point.get_tensor(var_name)
else:
if verbose:
print('not mapped: ', var_name) # TODO pooler, cls/predictions, cls/seq_relationship
model.set_weights(weights)
# mInputs = model.input
# embOut = [x.output for x in model.layers if x.output.name == "EmbeddingDropOut/Identity:0"][0]
# newModel = keras.Model(inputs=mInputs, outputs=[embOut], name='Test Model')
return model, bert_config
|
{"hexsha": "6a3fa9fd33ea79b3b1574c7e5f9050eb796bdd3f", "size": 8580, "ext": "py", "lang": "Python", "max_stars_repo_path": "musket_text/bert/load.py", "max_stars_repo_name": "petrochenko-pavel-a/musket_text", "max_stars_repo_head_hexsha": "9571b9d554ed66496c911222d319e42242351eb6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "musket_text/bert/load.py", "max_issues_repo_name": "petrochenko-pavel-a/musket_text", "max_issues_repo_head_hexsha": "9571b9d554ed66496c911222d319e42242351eb6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "musket_text/bert/load.py", "max_forks_repo_name": "petrochenko-pavel-a/musket_text", "max_forks_repo_head_hexsha": "9571b9d554ed66496c911222d319e42242351eb6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 54.3037974684, "max_line_length": 125, "alphanum_fraction": 0.5534965035, "include": true, "reason": "import numpy", "num_tokens": 1999}
|
import pickle
import numpy as np
import librosa
import pandas as pd
from raw_audio_segmented_save_data import split_data
path = '/scratch/speech/raw_audio_dataset/raw_audio_segmented_full.pkl'
file = open(path, 'rb')
data = pickle.load(file)
thresh = 32000
sr_standard = 16000
input_new = []
#seq_length_new = []
for i, utterance in enumerate(data['input']):
utterance_new = []
for j, segment in enumerate(utterance):
if len(segment) != thresh:
segment_new = librosa.resample(segment.astype('float'), sr_standard, thresh/(len(segment)/sr_standard))
segment_new = segment_new[0:thresh]
utterance_new.append(segment_new)
print(len(segment_new))
else:
utterance_new.append(segment)
print(len(segment_new))
#seq_length_new.append(len(utterance))
input_new.append(utterance_new)
dataset_updated = {'input': input_new, 'target': data['target'], 'segment_labels': data['segment_labels']}
full_set_new = '/scratch/speech/raw_audio_dataset/raw_audio_segmented_full_equal_lengths.pkl'
train_set_new = '/scratch/speech/raw_audio_dataset/raw_audio_segmented_train_equal_lengths.pkl'
test_set_new = '/scratch/speech/raw_audio_dataset/raw_audio_segmented_test_equal_lengths.pkl'
train, test = split_data(dataset_updated)
with open(full_set_new, 'wb') as f:
pickle.dump(dataset_updated, f)
with open(train_set_new, 'wb') as f:
pickle.dump(train, f)
with open(test_set_new, 'wb') as f:
pickle.dump(test, f)
|
{"hexsha": "15ffffa6106eaafccc94879aa0e1611201167cf4", "size": 1521, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/preprocessing/equalize_segment_lengths.py", "max_stars_repo_name": "dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture", "max_stars_repo_head_hexsha": "a072cb940201bbcdb2d0f4d0dfa1dde478fa4464", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-08-03T03:13:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-11T08:32:10.000Z", "max_issues_repo_path": "src/preprocessing/equalize_segment_lengths.py", "max_issues_repo_name": "dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture", "max_issues_repo_head_hexsha": "a072cb940201bbcdb2d0f4d0dfa1dde478fa4464", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-09-08T16:10:38.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-08T16:10:38.000Z", "max_forks_repo_path": "src/preprocessing/equalize_segment_lengths.py", "max_forks_repo_name": "dem123456789/Speech-Emotion-Recognition-with-Dual-Sequence-LSTM-Architecture", "max_forks_repo_head_hexsha": "a072cb940201bbcdb2d0f4d0dfa1dde478fa4464", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-08-03T21:37:21.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-26T02:19:17.000Z", "avg_line_length": 35.3720930233, "max_line_length": 115, "alphanum_fraction": 0.7278106509, "include": true, "reason": "import numpy", "num_tokens": 355}
|
"""Модуль для определения режима потока в кольцевом пространстве"""
import uniflocpy.uTools.uconst as uc
import math
import scipy.optimize as sp
class flow_pattern_annulus_Caetano(object):
"""Класс для определения режимов потока в кольцевом пространстве
по Caetano (1992)"""
def __init__(self):
self.surface_tension_gl_Nm = 0.020
self.rho_liq_kgm3 = 800
self.rho_gas_kgm3 = 50
self.d_cas_in_m = 0.155
self.d_tube_out_m = 0.080
self.rho_mix_kgm3 = 700
self.mu_mix_pasec = 0.01
self.vs_liq_msec = 0.5
self.vs_gas_msec = 0.9
self.vm_msec = self.vs_liq_msec + self.vs_gas_msec
self.concentric_annulus = True
# рассчитанные параметры
self.equation_part = None
self.v_infinite_z_msec = None
self.v_Taylor_bubble_msec = None
self.d_equi_periphery_m = None
self.d_hydr_m = None
self.flow_pattern = None
self.flow_pattern_name = None
self.const_k = None
self.friction_coeff = None
self.d_max_bubble_m = None
self.d_crit_bubble_m = None
self.number_Re = None
self.k_ratio_d = None
self.Fca = None
self.vs_gas_bubble2slug_msec = None
self.vs_gas_dispbubble2slug_msec = None
self.vs_gas_2annular_msec = None
def __friction_coefficient_Gunn_Darling(self, initial_f):
right_part = (4 * math.log(self.number_Re * (initial_f *
(16 / self.Fca) ** (0.45 * math.exp(-(self.number_Re - 3000) / 10 ** 6))
) ** 0.5) - 0.4)
left_part = 1 / (initial_f * (16 / self.Fca) ** (0.45 * math.exp(-(self.number_Re - 3000) / 10 ** 6))) ** 0.5
return right_part - left_part
def __calc_all__(self):
# Bubble Flow Region Existence
# Harmanty (1960)
self.equation_part = (self.surface_tension_gl_Nm *
(self.rho_liq_kgm3 - self.rho_gas_kgm3) * uc.g /
self.rho_liq_kgm3 ** 2) ** 0.25
self.v_infinite_z_msec = 1.53 * self.equation_part
self.d_equi_periphery_m = self.d_cas_in_m + self.d_tube_out_m
# Nicklin (1962)
self.v_Taylor_bubble_msec = 0.35 * (uc.g * self.d_equi_periphery_m) ** 0.5
# Bubble to Slug Flow Transition
if self.concentric_annulus:
self.vs_gas_bubble2slug_msec = self.vs_liq_msec / 4 + 0.306 * self.equation_part
else:
self.vs_gas_bubble2slug_msec = self.vs_liq_msec / 5.67 + 0.230 * self.equation_part
# bubble or slug to dispersed bubble flow transition
# Calderbank (1958)
self.const_k = 0.725 + 4.15 * (self.vs_gas_msec / self.vm_msec) ** 0.5
self.d_hydr_m = self.d_cas_in_m - self.d_tube_out_m
# Broodkey (1967)
self.number_Re = self.rho_mix_kgm3 * self.vm_msec * self.d_hydr_m / self.mu_mix_pasec
self.k_ratio_d = self.d_tube_out_m / self.d_cas_in_m
# Friction coefficient
# TODO дописать формулы для эксентричного положения трубы (сложные какие-то) - или не надо?
self.Fca = (16 * (1 - self.k_ratio_d) ** 2 /
((1 - self.k_ratio_d ** 4) / (1 - self.k_ratio_d ** 2) -
(1 - self.k_ratio_d ** 2) / math.log(1 / self.k_ratio_d)))
if self.number_Re < 3000: # laminar flow
if self.concentric_annulus:
self.friction_coeff = self.Fca / self.number_Re
else: # turbulent flow
if self.concentric_annulus:
self.friction_coeff = float(sp.fsolve(self.__friction_coefficient_Gunn_Darling, 0.000005))
# Hinze (1955)
self.d_max_bubble_m = (self.const_k * (self.surface_tension_gl_Nm / self.rho_liq_kgm3) ** 0.6 *
(2 * self.vm_msec ** 3 / self.d_hydr_m * self.friction_coeff) ** (-0.4))
self.d_crit_bubble_m = (0.4 * self.surface_tension_gl_Nm / (self.rho_liq_kgm3 -
self.rho_gas_kgm3) / uc.g) ** 0.5
# Dispersed bubble to slug flow transition
self.vs_gas_dispbubble2slug_msec = 1.083 * self.vs_liq_msec + 0.796 * self.equation_part
# Transition to Annular flow
self.vs_gas_2annular_msec = 3.1 * (self.surface_tension_gl_Nm *
(self.rho_liq_kgm3 - self.rho_gas_kgm3) * uc.g /
self.rho_gas_kgm3 ** 2) ** 0.25
# определение режима потока
if self.vs_gas_msec >= self.vs_gas_2annular_msec:
self.flow_pattern = 3
self.flow_pattern_name = 'Annular flow pattern - кольцевой режим'
else:
if self.d_crit_bubble_m >= self.d_max_bubble_m and self.vs_gas_msec <= self.vs_gas_dispbubble2slug_msec:
self.flow_pattern = 1
self.flow_pattern_name = 'Dispersed bubble flow pattern - дисперсионно-пузырьковый режим'
else:
if self.v_Taylor_bubble_msec >= self.v_infinite_z_msec and self.vs_gas_msec < self.vs_gas_bubble2slug_msec:
self.flow_pattern = 0
self.flow_pattern_name = 'Bubble flow pattern - пузырьковый режим'
else:
if self.vs_gas_msec >= self.vs_gas_bubble2slug_msec:
self.flow_pattern = 2
self.flow_pattern_name = 'Slug flow pattern - Пробковый или эмульсионный режим'
if self.flow_pattern == 0 or self.flow_pattern == 1:
self.v_infinite_z_msec = 1.53 * self.equation_part
else:
self.v_infinite_z_msec = 2 ** 0.5 * self.equation_part
def calc_pattern(self, vs_liq_msec, vs_gas_msec):
"""
Расчет режима потока по приведенной скорости жидкости и газа
:param vs_liq_msec: приведенная скорость жидкости, м/сек
:param vs_gas_msec: приведенная скорость газа, м/сек
:return: номер режима потока, где
:0: Bubble flow pattern - пузырьковый режим
:1: Dispersed bubble flow pattern - дисперсионно-пузырьковый режим
:2: Slug flow pattern - Пробковый или эмульсионный режим
:3: Annular flow pattern - кольцевой режим
"""
self.vs_gas_msec = vs_gas_msec
self.vs_liq_msec = vs_liq_msec
self.vm_msec = self.vs_gas_msec + self.vs_liq_msec
self.__calc_all__()
return self.flow_pattern
|
{"hexsha": "a84885d82c83ffbfb64e49f7c2c36c00f8702d96", "size": 6536, "ext": "py", "lang": "Python", "max_stars_repo_path": "uniflocpy/uMultiphaseFlow/flow_pattern_annulus_Caetano.py", "max_stars_repo_name": "Shabonasar/unifloc", "max_stars_repo_head_hexsha": "1f12d6b4110a9ff0e10817560ad99d55c9133954", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-02-05T20:02:44.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-24T13:18:31.000Z", "max_issues_repo_path": "uniflocpy/uMultiphaseFlow/flow_pattern_annulus_Caetano.py", "max_issues_repo_name": "Shabonasar/unifloc", "max_issues_repo_head_hexsha": "1f12d6b4110a9ff0e10817560ad99d55c9133954", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 32, "max_issues_repo_issues_event_min_datetime": "2017-09-29T15:14:59.000Z", "max_issues_repo_issues_event_max_datetime": "2019-12-02T07:17:22.000Z", "max_forks_repo_path": "uniflocpy/uMultiphaseFlow/flow_pattern_annulus_Caetano.py", "max_forks_repo_name": "Shabonasar/unifloc", "max_forks_repo_head_hexsha": "1f12d6b4110a9ff0e10817560ad99d55c9133954", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-05-31T16:14:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-31T06:24:29.000Z", "avg_line_length": 41.8974358974, "max_line_length": 123, "alphanum_fraction": 0.6040391677, "include": true, "reason": "import scipy", "num_tokens": 1977}
|
from gan.output import Output, OutputType, Normalization
from pcaputilities import chunk_and_convert_to_training, convertToFeatures, sequences_sample, chunk_and_convert_ps_and_durations, extract_dictionaries_from_activities, convert_to_durations, signatureExtractionAll, all_greedy_activity_conversion, chunk_and_convert_ps
import sys
import glob
import numpy as np
import pickle
import random
import csv
def normalize_packet_sizes(sequences):
normalized_packets = []
num_seqs = []
max_packet_size = 0
for sequence in sequences:
num_seq = [int(x) for x in sequence]
max_packet_size = max(max([abs(x) for x in num_seq]), max_packet_size)
num_seqs.append(num_seq)
for num_seq in num_seqs:
normalized = [(x + max_packet_size) for x in num_seq]
normalized_packets.append(normalized)
return normalized_packets, (max_packet_size * 2) + 1
def normalize_durations(sequences):
max_d = 0.0
num_seqs = []
final_num_seqs = []
for sequence in sequences:
num_seq = [float(x) for x in sequence]
max_d = max(max(num_seq), max_d)
num_seqs.append(num_seq)
for num_seq in num_seqs:
final_num_seq = [x/max_d for x in num_seq]
final_num_seqs.append(final_num_seq)
return final_num_seqs, max_d
def find_max_len(sequences):
max_len = 0
for sequence in sequences:
max_len = max(len(sequence), max_len)
return max_len
currentLabel = 0
max_duration = 0
packet_sizes = []
durations = []
labels = []
directory = sys.argv[1]
extended = directory + '/*/'
paths = glob.glob(extended)
# convert pcaps to packet size sequences
for path in paths:
pcapPath = path + '/*.pcap'
pcapFiles = glob.glob(pcapPath)
for file in pcapFiles:
featureV = convertToFeatures(file)
durationV = convert_to_durations(file)
if len(featureV) != 0:
packet_sizes.append(featureV)
durations.append(durationV)
labels.append(currentLabel)
currentLabel += 1
D = currentLabel # number of devices
# V is vocab size
normalized_p, V = normalize_packet_sizes(packet_sizes)
all_signatures = signatureExtractionAll(normalized_p, 1, 7, 5, 4)
results = all_greedy_activity_conversion(normalized_p, all_signatures)
signatureToTokens, tokensToSignatures = extract_dictionaries_from_activities(results)
V = len(tokensToSignatures)
with open("sigToToken.pkl", mode='wb') as sigFile:
pickle.dump(signatureToTokens, sigFile)
with open("tokenToSig.pkl", mode='wb') as tokenFile:
pickle.dump(tokensToSignatures, tokenFile)
print("signature to tokens")
print(signatureToTokens)
print("tokens to signature")
print(tokensToSignatures)
seq_length = 20
sequences = []
for sequence in results:
sigs = []
for token in sequence:
sigs.append(signatureToTokens[token])
sequences.append(sigs)
r = chunk_and_convert_ps_and_durations(normalized_p, durations, results, seq_length)
packet_sizes = r[0]
for i in range(len(packet_sizes)):
filename = 'real_packet_sizes.txt'
with open(filename, mode='a') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=' ')
c_ps = packet_sizes[i]
csv_writer.writerow(c_ps)
raw_duration = r[1]
sig_duration = r[2]
signatures = r[3]
for i in range(len(raw_duration)):
filename = 'real_durations.txt'
with open(filename, mode='a') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=' ')
c_d = raw_duration[i]
csv_writer.writerow(c_d)
all_tokens = []
for signature in signatures:
tokens = []
for sig in signature:
tokens.append(signatureToTokens[sig])
all_tokens.append(tokens)
sig_duration, max_duration = normalize_durations(sig_duration)
minDicts = dict()
maxDicts = dict()
minDicts[0] = 10000000
maxDicts[0] = 0
def divide_chunks(l, n):
# looping till length l
for i in range(0, len(l), n):
yield l[i:i + n]
all_chunks = []
all_altered_chunks = []
for i in range(len(normalized_p)):
filename = 'real_data4.csv'
with open(filename, mode='a') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=' ')
chunks = divide_chunks(normalized_p[i], seq_length)
for chunk in chunks:
all_chunks.append(chunk)
if len(chunk) == seq_length:
csv_writer.writerow(chunk)
for i in range(len(sequences)):
filename = 'real_data3.csv'
with open(filename, mode='a') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=' ')
chunks = divide_chunks(sequences[i], seq_length)
for chunk in chunks:
all_chunks.append(chunk)
if len(chunk) == seq_length:
new_list = [x for x in chunk]
csv_writer.writerow(new_list)
def extractSequences(fn):
seqs = []
with open(fn, newline='\n') as csvf:
csv_reader = csv.reader(csvf, delimiter=' ')
for row in csv_reader:
seqs.append(row)
return seqs
#
# mapping = dict()
# real_all_seq = extractSequences("real_data.csv")
# for i in range(len(real_all_seq)):
# print("real")
# print(len(real_all_seq[i]))
# print(real_all_seq[i])
# print("altered")
# print(len(all_altered_chunks[i]))
# print(all_altered_chunks[i])
# zipped = dict(zip(real_all_seq[i], all_altered_chunks[i]))
# mapping.update(zipped)
#
# real_sequences = []
# for real_seq in all_chunks:
# real_sequence = []
# for idx in real_seq:
# real_sequence.append(tokensToSignatures[idx])
# real_sequences.append(real_sequence)
#
# final_reals = sequences_sample(real_sequences)
#
# for i in range(len(final_reals)):
# chunks = divide_chunks(final_reals[i], seq_length)
# for chunk in chunks:
# if min(chunk) < minDicts[0]:
# minDicts[0] = min(chunk)
# if max(chunk) > maxDicts[0]:
# maxDicts[0] = max(chunk)
#
# for i in range(len(final_reals)):
# filename = 'real_datac.txt'
# with open(filename, mode='a') as csvfile:
# csv_writer = csv.writer(csvfile, delimiter=' ')
# alteredChunk = list(map(lambda x: x - minDicts[0], final_reals[i]))
# csv_writer.writerow(alteredChunk)
#
# fake_seqs = extractSequences("fake_data.txt")
# fake_sequences = []
# for fake_seq in fake_seqs:
# fake_sequence = []
# for idx in fake_seq:
# fake_sequence.append(tokensToSignatures[mapping[idx]])
# fake_sequences.append(fake_sequence)
#
# final_fakes = sequences_sample(fake_sequences)
#
# for i in range(len(final_fakes)):
# chunks = divide_chunks(final_fakes[i], seq_length)
# for chunk in chunks:
# if min(chunk) < minDicts[0]:
# minDicts[0] = min(chunk)
# if max(chunk) > maxDicts[0]:
# maxDicts[0] = max(chunk)
#
# for i in range(len(final_fakes)):
# filename = 'fake_datac.txt'
# with open(filename, mode='a') as csvfile:
# csv_writer = csv.writer(csvfile, delimiter=' ')
# alteredChunk = list(map(lambda x: x - minDicts[0], final_fakes[i]))
# csv_writer.writerow(alteredChunk)
train_X, train_y = chunk_and_convert_to_training(signatures, raw_duration, max_duration, signatureToTokens, 7)
print(train_X)
print(train_y)
print(len(train_X))
print(len(train_y))
data_feature_output = [
Output(type_=OutputType.DISCRETE, dim=V, normalization=None, is_gen_flag=False),
Output(type_=OutputType.CONTINUOUS, dim=1, normalization=Normalization.ZERO_ONE, is_gen_flag=False)
]
data_attribute_output = [
Output(type_=OutputType.DISCRETE, dim=1, normalization=None, is_gen_flag=False)
]
data_feature = []
data_attribute = []
data_gen_flag = []
for i in range(len(all_tokens)):
packet_size = all_tokens[i]
normalized_duration = sig_duration[i]
label = 0
data_gen = []
data_feat = []
data_attr = [0] * 1
data_attr[label] = 1.0
for j in range(seq_length):
duration = normalized_duration[j]
packet = packet_size[j]
data_gen.append(1.0)
d = V * [0.0]
d[packet] = 1.0
d.append(duration)
data_feat.append(np.array(d, dtype="float32"))
data_gen_flag.append(np.array(data_gen, dtype="float32"))
data_feature.append(np.array(data_feat))
data_attribute.append(np.array(data_attr, dtype="float32"))
print(D)
print(V)
data_feature = np.array(data_feature)
print(data_feature.shape)
data_attribute = np.array(data_attribute)
print(data_attribute.shape)
data_gen_flag = np.array(data_gen_flag)
print(data_gen_flag.shape)
print("Max Duration")
print(max_duration)
np.savez("data/iot/data_train.npz", data_feature=data_feature, data_attribute=data_attribute, data_gen_flag=data_gen_flag)
with open("train_X.pkl", mode='wb') as sigFile:
pickle.dump(train_X, sigFile)
with open("train_y.pkl", mode='wb') as tokenFile:
pickle.dump(train_y, tokenFile)
with open("max_duration.pkl", mode='wb') as tokenFile:
pickle.dump(max_duration, tokenFile)
with open('data/iot/data_feature_output.pkl', 'wb') as fp:
pickle.dump(data_feature_output, fp, protocol=2)
with open('data/iot/data_attribute_output.pkl', 'wb') as fp:
pickle.dump(data_attribute_output, fp, protocol=2)
|
{"hexsha": "9a123200168c6f72c3ff11b890c2cba71340e953", "size": 9007, "ext": "py", "lang": "Python", "max_stars_repo_path": "data_prep.py", "max_stars_repo_name": "pumperknickle/DoppelGANger", "max_stars_repo_head_hexsha": "bb92853f6d3a4d100caab7d5030c94d5064a7e66", "max_stars_repo_licenses": ["BSD-3-Clause-Clear"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data_prep.py", "max_issues_repo_name": "pumperknickle/DoppelGANger", "max_issues_repo_head_hexsha": "bb92853f6d3a4d100caab7d5030c94d5064a7e66", "max_issues_repo_licenses": ["BSD-3-Clause-Clear"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data_prep.py", "max_forks_repo_name": "pumperknickle/DoppelGANger", "max_forks_repo_head_hexsha": "bb92853f6d3a4d100caab7d5030c94d5064a7e66", "max_forks_repo_licenses": ["BSD-3-Clause-Clear"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.8245033113, "max_line_length": 266, "alphanum_fraction": 0.6989008549, "include": true, "reason": "import numpy", "num_tokens": 2291}
|
import numpy as np
from pymoo.model.mutation import Mutation
from pymoo.operators.repair.to_bound import set_to_bounds_if_outside_by_problem
class PolynomialMutation(Mutation):
def __init__(self, eta, prob=None):
super().__init__()
self.eta = float(eta)
if prob is not None:
self.prob = float(prob)
else:
self.prob = None
def _do(self, problem, X, **kwargs):
X = X.astype(float)
Y = np.full(X.shape, np.inf)
if self.prob is None:
self.prob = 1.0 / problem.n_var
do_mutation = np.random.random(X.shape) < self.prob
Y[:, :] = X
xl = np.repeat(problem.xl[None, :], X.shape[0], axis=0)[do_mutation]
xu = np.repeat(problem.xu[None, :], X.shape[0], axis=0)[do_mutation]
X = X[do_mutation]
delta1 = (X - xl) / (xu - xl)
delta2 = (xu - X) / (xu - xl)
mut_pow = 1.0 / (self.eta + 1.0)
rand = np.random.random(X.shape)
mask = rand <= 0.5
mask_not = np.logical_not(mask)
deltaq = np.zeros(X.shape)
xy = 1.0 - delta1
val = 2.0 * rand + (1.0 - 2.0 * rand) * (np.power(xy, (self.eta + 1.0)))
d = np.power(val, mut_pow) - 1.0
deltaq[mask] = d[mask]
xy = 1.0 - delta2
val = 2.0 * (1.0 - rand) + 2.0 * (rand - 0.5) * (np.power(xy, (self.eta + 1.0)))
d = 1.0 - (np.power(val, mut_pow))
deltaq[mask_not] = d[mask_not]
# mutated values
_Y = X + deltaq * (xu - xl)
# back in bounds if necessary (floating point issues)
_Y[_Y < xl] = xl[_Y < xl]
_Y[_Y > xu] = xu[_Y > xu]
# set the values for output
Y[do_mutation] = _Y
# in case out of bounds repair (very unlikely)
Y = set_to_bounds_if_outside_by_problem(problem, Y)
return Y
class PM(PolynomialMutation):
pass
|
{"hexsha": "e6cb2c448234bb74d1b1f24b1b88cd616c157b62", "size": 1908, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymoo/operators/mutation/polynomial_mutation.py", "max_stars_repo_name": "Alaya-in-Matrix/pymoo", "max_stars_repo_head_hexsha": "02d6e7085f5fe88dbd56b2a9f5173abe20c54caf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-03-28T03:06:35.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-28T03:40:08.000Z", "max_issues_repo_path": "pymoo/operators/mutation/polynomial_mutation.py", "max_issues_repo_name": "Alaya-in-Matrix/pymoo", "max_issues_repo_head_hexsha": "02d6e7085f5fe88dbd56b2a9f5173abe20c54caf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pymoo/operators/mutation/polynomial_mutation.py", "max_forks_repo_name": "Alaya-in-Matrix/pymoo", "max_forks_repo_head_hexsha": "02d6e7085f5fe88dbd56b2a9f5173abe20c54caf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-03-31T08:19:13.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:19:13.000Z", "avg_line_length": 26.5, "max_line_length": 88, "alphanum_fraction": 0.5398322851, "include": true, "reason": "import numpy", "num_tokens": 586}
|
from flask import Flask, render_template, request, flash, redirect, Response, send_from_directory
import os,time
import torch
import cv2
import numpy as np
import glob
import pathlib
UPLOAD_FOLDER = './components'
DATASET_FOLDER = './components/Test'
ALLOWED_IMAGES = {'jpg', 'jpeg'}
ALLOWED_LABELS = {'txt'}
app = Flask(__name__)
app.secret_key = "super secret key"
app.config['SESSION_TYPE'] = 'filesystem'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['DATASET_FOLDER'] = DATASET_FOLDER
app.config['ALLOWED_IMAGES'] = ALLOWED_IMAGES
app.config['ALLOWED_LABELS'] = ALLOWED_LABELS
@app.route('/')
def index():
return render_template('index.html')
###################################################
########### INTERFACE GALLERY #####################
###################################################
@app.route('/interface', methods=['GET', 'POST'])
def interface():
image_names = [f for f in os.listdir('./components/Test/Images_predites') if not f.startswith('.')]
return render_template("interface.html", image_names=image_names)
@app.route('/upload/<filename>')
def send_image(filename):
return send_from_directory("./components/Test/Images_predites", filename)
###################################################
########### DOCUMENT ##############################
###################################################
@app.route('/doc', methods=['GET'])
def doc():
return render_template('doc.html')
###################################################
########### UPLOADS ###############################
###################################################
@app.route('/upload_pt', methods=['POST','GET'])
def upload_pt():
if request.method == 'POST':
# check if the post request has the file part
if 'userfile' not in request.files:
print("OUI")
flash('No file part')
return redirect(request.url)
file = request.files['userfile']
# If the user does not select a file, the browser submits an
# empty file without a filename.
if file.filename == '':
print("NON")
flash('No selected file')
return redirect(request.url)
else :
complete_path = os.path.join(app.config['UPLOAD_FOLDER'], "weights.pt")
file.save(complete_path)
return redirect("/interface")
def allowed_images(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_IMAGES
def allowed_labels(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_LABELS
@app.route('/upload_images', methods=['GET', 'POST'])
def upload_images():
if request.method == 'POST':
print("POST")
# check if the post request has the file part
if 'upload_images[]' not in request.files:
flash('No file part')
return redirect(request.url)
print(request.files)
for file in request.files.getlist('upload_images[]'):
print(file)
# If the user does not select a file, the browser submits an
# empty file without a filename.
if file.filename == '':
print("No selected file")
flash('No selected file')
return redirect(request.url)
if file and allowed_images(file.filename):
print("File OK")
filename = file.filename
file.save(os.path.join(app.config['DATASET_FOLDER'], filename))
return redirect("/interface")
@app.route('/upload_labels', methods=['GET', 'POST'])
def upload_labels():
if request.method == 'POST':
print("POST")
# check if the post request has the file part
if 'upload_labels[]' not in request.files:
flash('No file part')
return redirect(request.url)
print(request.files)
for file in request.files.getlist('upload_labels[]'):
print(file)
# If the user does not select a file, the browser submits an
# empty file without a filename.
if file.filename == '':
print("No selected file")
flash('No selected file')
return redirect(request.url)
if file and allowed_labels(file.filename):
print("File OK")
filename = file.filename
file.save(os.path.join(app.config['DATASET_FOLDER'], filename))
return redirect("/interface")
###################################################
########### CLEAR CACHE ###########################
###################################################
@app.route('/clear_cache', methods=['GET'])
def clear_cache():
os.system("rm -rf ./components/Test/Images_predites/*")
files = glob.glob('./components/Test/*.jpeg') + glob.glob('./components/Test/*.jpg') + glob.glob('./components/Test/*.txt')
for f in files:
os.remove(f)
return redirect("/interface")
###################################################
########### WEBCAM ################################
###################################################
class Detection:
"""
Class implements Yolo5 model to make inferences on a youtube video using Opencv2.
"""
def __init__(self, capture_index, model_name):
"""
Initializes the class with youtube url and output file.
:param url: Has to be as youtube URL,on which prediction is made.
:param out_file: A valid output file name.
"""
self.capture_index = capture_index
self.model = self.load_model(model_name)
self.classes = self.model.names
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Using Device: ", self.device)
def get_video_capture(self):
"""
Creates a new video streaming object to extract video frame by frame to make prediction on.
:return: opencv2 video capture object, with lowest quality frame available for video.
"""
return cv2.VideoCapture(self.capture_index)
def load_model(self, model_name):
"""
Loads Yolo5 model from pytorch hub.
:return: Trained Pytorch model.
"""
if model_name:
model = torch.hub.load('ultralytics/yolov5', 'custom', path=model_name, force_reload=False)
else:
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True, force_reload=False)
return model
def score_frame(self, frame):
"""
Takes a single frame as input, and scores the frame using yolo5 model.
:param frame: input frame in numpy/list/tuple format.
:return: Labels and Coordinates of objects detected by model in the frame.
"""
self.model.to(self.device)
frame = [frame]
results = self.model(frame)
labels, cord = results.xyxyn[0][:, -1], results.xyxyn[0][:, :-1]
return labels, cord
def class_to_label(self, x):
"""
For a given label value, return corresponding string label.
:param x: numeric label
:return: corresponding string label
"""
return self.classes[int(x)]
def plot_boxes(self, results, frame):
"""
Takes a frame and its results as input, and plots the bounding boxes and label on to the frame.
:param results: contains labels and coordinates predicted by model on the given frame.
:param frame: Frame which has been scored.
:return: Frame with bounding boxes and labels ploted on it.
"""
labels, cord = results
n = len(labels)
x_shape, y_shape = frame.shape[1], frame.shape[0]
for i in range(n):
row = cord[i]
if row[4] >= 0.3:
x1, y1, x2, y2 = int(row[0]*x_shape), int(row[1]*y_shape), int(row[2]*x_shape), int(row[3]*y_shape)
bgr = (0, 255, 0)
cv2.rectangle(frame, (x1, y1), (x2, y2), bgr, 2)
cv2.putText(frame, self.class_to_label(labels[i]), (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 0.9, bgr, 2)
return frame
def gen():
# Par défaut, le script python nous situe au chemin /var/www/html
detector = Detection(capture_index=0, model_name='./components/weights.pt')
video = detector.get_video_capture()
width = video.get(cv2.CAP_PROP_FRAME_WIDTH) # float `width`
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT) # float `height`
while True:
success, image = video.read()
assert success
frame = cv2.resize(image, (int(width),int(height)))
start_time = time.time()
results = detector.score_frame(frame)
frame = detector.plot_boxes(results, frame)
end_time = time.time()
fps = 1/np.round(end_time - start_time, 2)
cv2.putText(frame, f'FPS: {int(fps)}', (20,70), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0,255,0), 2)
ret, jpeg = cv2.imencode('.jpg', frame)
frame = jpeg.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route("/stream")
def stream_page():
return render_template("live_streaming.html")
###################################################
########### INFERENCE #############################
###################################################
def make_inferences_on_folder(conf_thres=0.25,iou=0.45):
# Images
imgs=[]
imgs_paths = glob.glob("components/Test/*.jpeg") + glob.glob("components/Test/*.jpg")
for f in imgs_paths:
img = cv2.imread(f)
imgs.append(img)
if len(imgs)!=0:
# Model
model = torch.hub.load('ultralytics/yolov5', 'custom', path='components/weights.pt') # default
model.conf = conf_thres # confidence threshold (0-1)
model.iou = iou # NMS IoU threshold (0-1)
results = model(imgs, size=640) # custom inference size
# Results
results.save(save_dir="components/Test/Images_predites")
@app.route("/inference", methods=['POST'])
def inference():
if request.method == 'POST':
conf_thres = request.form['conf_threshold']
iou = request.form['iou']
make_inferences_on_folder(conf_thres=float(conf_thres),iou=float(iou))
return redirect("/interface")
if __name__ == '__main__':
app.run(debug=True)
|
{"hexsha": "3f9342e3eb91fc40464c8efb54e30fb9a6b39136", "size": 10611, "ext": "py", "lang": "Python", "max_stars_repo_path": "app.py", "max_stars_repo_name": "Dodalpaga/YOLO-Object-Detection-Template", "max_stars_repo_head_hexsha": "cd93d9a9f571976e6fc47cf86c9bc7145a0654c1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "app.py", "max_issues_repo_name": "Dodalpaga/YOLO-Object-Detection-Template", "max_issues_repo_head_hexsha": "cd93d9a9f571976e6fc47cf86c9bc7145a0654c1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "app.py", "max_forks_repo_name": "Dodalpaga/YOLO-Object-Detection-Template", "max_forks_repo_head_hexsha": "cd93d9a9f571976e6fc47cf86c9bc7145a0654c1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.1013986014, "max_line_length": 127, "alphanum_fraction": 0.5645085289, "include": true, "reason": "import numpy", "num_tokens": 2272}
|
#!/usr/bin/env python
# coding: utf-8
# # The Shockley-Queisser limit
#
# By Steven J. Byrnes ([https://sjbyrnes.com/](https://sjbyrnes.com/)). This document lives at [https://github.com/sbyrnes321/SolarCellEfficiencyLimits](https://github.com/sbyrnes321/SolarCellEfficiencyLimits). Please email me any feedback: steven.byrnes@gmail.com
#
# The Shockley-Queisser (SQ) limit is a famous limit on the maximal possible efficiency of solar cells, limited only by fundamental physics. It applies to most solar cell designs in the world, except for "tandem solar cells" and some additional obscure exceptions (discussed at the end of the document). The most important parameter in the SQ model is the bandgap of the semiconductor: If the gap is right, the efficiency can be up to 34%, if the gap is way off, the efficiency limit may be much smaller. [Here is the original SQ paper](http://dx.doi.org/10.1063/1.1736034), but it’s also covered in every solar-cell textbook.
#
# I’m using NREL’s data for the solar spectrum (AM1.5G) and intensity (1000 W/m²). In the original SQ paper, they assumed that
# the sun had a 6000-kelvin blackbody spectrum. So my graphs and values are slightly different. However, other papers and books
# that use AM1.5G spectrum get the same results as I do, for example [link 1](http://www.opticsinfobase.org/abstract.cfm?URI=OSE-2010-SWA1), [link 2](http://www.opticsinfobase.org/abstract.cfm?URI=OSE-2010-SWC4), *Practical Handbook of Photovoltaics* p128-9,
# [link 3](http://dx.doi.org/10.1109/T-ED.1984.21594).
#
# I copied many of these graphs into the Wikipedia article on this topic - [http://en.wikipedia.org/wiki/Shockley-Queisser_limit](http://en.wikipedia.org/wiki/Shockley-Queisser_limit)
#
# In this document you will find:
#
# * A plot of the SQ efficiency limit as a function of bandgap
# * A plot of the SQ limit on short-circuit current, on open-circuit voltage, and on fill-factor, as a function of bandgap
# * A breakdown of exactly which factors lower the SQ limit for which bandgaps
# * A list of some "loopholes" to exceed the SQ limit.
#
# Enjoy!
#
# <p style="font-size:80%">Pronunciation of "Queisser": Hans-Joachim Queisser was German, so a German-speaker helped me guess how the name is pronounced. He guesses that "Queisser" rhymes with "nicer". ("Qu" as in "quick", "ei" as in "Einstein", "ss" as in "kiss", "er" as in "teacher"). (Thanks Florian!)</p>
#
# Note: If you run all the code in this file, including re-creating all the graphs, it may take a few hours. (I made no effort to write efficient code.)
#
# ## General program setup
#
# This document is a mix of text and Python code, written using [Jupyter Notebook](http://jupyter.org/) (You can install Jupyter notebook through [Anaconda](https://www.anaconda.com/distribution/).)
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate, scipy.integrate, pandas, sys
assert sys.version_info >= (3,6), 'Requires Python 3.6+'
# One more package: A units-and-constants package I wrote: http://pypi.python.org/pypi/numericalunits
#
# Example: <span style="color:blue">x = 5 * cm</span> means "x equals 5 centimeters".
#
# Example: <span style="color:blue">y = x / mm</span> means "y is the numerical value of x in millimeters".
# In[2]:
from numericalunits import W, K, nm, m, cm, s, eV, meV, V, mA, c0, hPlanck, kB, e
# ## Program inputs
#
# Solar cell temperature is 300 kelvin:
# In[3]:
Tcell = 300 * K
# The incident light intensity and spectrum is assumed to be the NREL AM1.5G spectrum, which approximates the light coming from the sun and sky at a typical latitude on a clear day. For more information go to https://www.nrel.gov/grid/solar-resource/spectra.html
# In[4]:
# worksheet = pandas.read_excel('https://www.nrel.gov/grid/solar-resource/assets/data/astmg173.xls')
worksheet = pandas.read_excel('/Users/lwheeler/Code/pv-window-bem/Data/astmg173.xls')
downloaded_array = np.array(worksheet)
# Wavelength is in column 0, AM1.5G data is column 2
AM15 = downloaded_array[1:, [0,2]]
# The first line should be 280.0 , 4.7309E-23
# The last line should be 4000.0, 7.1043E-03
# print(AM15)
# Tack on the appropriate units:
# In[5]:
AM15[:,0] *= nm
AM15[:,1] *= W / m**2 / nm
# The NREL data spans the following spectral range (in terms of both photon-wavelength and photon-frequency):
# In[6]:
λ_min = 280 * nm
λ_max = 4000 * nm
E_min = hPlanck * c0 / λ_max
E_max = hPlanck * c0 / λ_min
# Interpolate to get a continuous function which I will be able to do integrals on:
# In[7]:
AM15interp = scipy.interpolate.interp1d(AM15[:,0], AM15[:,1])
# Here’s the plot, it looks correct:
# In[8]:
λs = np.linspace(λ_min, λ_max, num=500)
y_values = np.array([AM15interp(x) for x in λs])
plt.plot(λs / nm , y_values / (W/m**2/nm))
plt.xlabel("Wavelength (nm)")
plt.ylabel("Spectral intensity (W/m$^2$/nm)")
plt.title("Light from the sun");
# ## Properties of incident sunlight
# ### Put solar spectrum data in more convenient form
# It’s a bit more convenient for me to change the units for the solar spectrum, so that I can easily do integrals over photon energy, rather than wavelength, and calculate the number of photons instead of their energy. Therefore, I’ll define the function "SPhotonsPerTEA" which stands for Solar Photons per unit Time, per unit photon Energy-range, per unit Area of the solar cell (assuming the cell is facing normal to the sun). To convert from the AM1.5 data to these new units, the formula is:
#
# $\text{SPhotonsPerTEA} = \frac{d(\text{number of photons per unit time per unit area})}{dE} = \frac{d(\text{photon power per unit area})}{d\lambda} \; \frac{(\text{number of photons per unit time per unit area})}{(\text{photon power per unit area})} \left| \frac{d\lambda}{dE} \right| = $
# $ = (\text{AM1.5 spectrum}) \; \frac{1}{\text{photon energy}} \; \frac{hc}{E^2}$
#
# (I used $\left| \frac{d\lambda}{dE} \right| = \left| \frac{d}{dE} (\frac{hc}{E}) \right| = \frac{hc}{E^2}$.)
# In[9]:
def SPhotonsPerTEA(Ephoton):
λ = hPlanck * c0 / Ephoton
return AM15interp(λ) * (1 / Ephoton) * (hPlanck * c0 / Ephoton**2)
# Example: The following calculation means that there are $1.43 \times 10^{18}$ solar photons with energy between 2eV and 2.001eV that hit a 1-square-meter patch each second:
# In[10]:
print(SPhotonsPerTEA(2 * eV) * (1 * meV) * (1 * m**2) * (1 * s))
# Next: The "Solar constant" is the sun's total irradiance. If I did this right, it should be 1000 watts/meter$^2$, because that's how NREL normalized their data.
# In[11]:
PowerPerTEA = lambda E : E * SPhotonsPerTEA(E)
# quad() is ordinary integration; full_output=1 is (surprisingly) how you hide
# the messages warning about poor accuracy in integrating.
solar_constant = scipy.integrate.quad(PowerPerTEA,E_min,E_max, full_output=1)[0]
print(solar_constant / (W/m**2))
# Close enough!
#
# ### Photons above bandgap
#
# For a given bandgap, I’m defining a function which is the total number of solar photons with energy above the bandgap, per unit
# time, per unit ground area on earth.
# In[12]:
def solar_photons_above_gap(Egap):
return scipy.integrate.quad(SPhotonsPerTEA, Egap, E_max, full_output=1)[0]
# Here's an example: This means that $2.76 \times 10^{21}$ photons with energy above 1.1eV hit a one-square-meter patch of ground in one second:
# In[13]:
print(solar_photons_above_gap(1.1 * eV) * (1 * m**2) * (1 * s))
# Here's a plot:
# In[14]:
Egap_list = np.linspace(0.4 * eV, 3 * eV, num=100)
y_values = np.array([solar_photons_above_gap(E) for E in Egap_list])
plt.plot(Egap_list / eV , y_values / (1e21 * m**-2 * s**-1))
plt.xlabel("Bandgap energy (eV)")
plt.ylabel("Photons above bandgap ($10^{21}$ m$^{-2} \cdot $s$^{-1}$)");
# ## Solar cell recombination and J-V curve
#
# ### Recombination rate
#
# In the best possible case, the only cause of electron-hole-pair recombination is radiative recombination. Radiative recombination occurs when an electron and hole collide, so it depends on how many electrons and holes there are, or more specifically it depends
# on the electron and hole QFLs.
#
# ### Recombination rate when electron QFL = hole QFL ("QFL" is "Quasi-Fermi Level")
#
# This is the case where electron QFL = hole QFL throughout the semiconductor. An example is the solar cell at zero bias in the dark. Then it’s in thermal equilibrium and its radiation can be calculated by the blackbody formula – more specifically, assuming it’s a perfect blackbody above the bandgap and white-body below the bandgap. We also assume isotropic radiation from the top surface, and a mirror on the bottom surface.
#
# Let RR0 be the "Radiative Recombination rate at 0 QFL splitting", (per solar-cell area). By the blackbody formula:
# $$\text{RR0} = \frac{2\pi}{c^2 h^3} \int_{E_{gap}}^{\infty} \frac{E^2 dE}{\exp(E/(k_B T_{cell})) - 1}$$
# In[15]:
def RR0(Egap):
integrand = lambda E : E**2 / (np.exp(E / (kB * Tcell)) - 1)
integral = scipy.integrate.quad(integrand, Egap, E_max, full_output=1)[0]
return ((2 * np.pi) / (c0**2 * hPlanck**3)) * integral
# ## Recombination rate when electron QFL and hole QFL are split
#
# By kinetic theory, the radiative recombination rate is proportional to the product of electron concentration
# and hole concentration, $p\times n$. If you move the electron QFL up towards the conduction band by energy $E$,
# the electron concentration increases by $\exp(-E/kT)$. Likewise, if you move the hole QFL down towards the
# valence band by E, the hole concentration increases by $\exp(E/k_BT)$. Either way,
# $p\times n \propto \exp(E/k_BT)$, where $E$ is the QFL energy splitting.
#
# In the best possible case, the QFL splitting is equal to the external voltage (in reality, it may be larger
# than the external voltage). Therefore, the lowest possible radiative recombaniton rate is:
#
# $$\text{Recomb rate} = e \text{RR0} \exp(e V / k_B T_{cell}),$$
#
# where $V$ is the external voltage.
#
# <p style="font-size:80%">Note for pedants: I’m using the expression for radiative recombination $\frac{2\pi}{c^2 h^3} \exp(eV/k_B T_{cell})\int_{E_{gap}}^\infty \frac{E^2 dE}{\exp(E/k_B T_{cell})-1}.$ This isn't quite right: A more accurate expression is: $\frac{2\pi}{c^2 h^3} \int_{E_{gap}}^\infty \frac{E^2 dE}{\exp((E-eV)/k_B T_{cell})-1}.$ The difference is negligible except for tiny tiny bandgaps (less than 200meV). For explanation see <a href="http://dx.doi.org/10.1109/T-ED.1980.19950">link</a> or <a href="http://dx.doi.org/10.1007/BF00901283">link</a>. (Thanks Ze’ev!)</p>
#
# ## J-V curve
# The current is from the electron-hole pairs that are created but which don’t recombine. In the best case, all the solar photons possible are absorbed, while none recombine except radiatively. This gives:
#
# $$J = e (\text{SolarPhotonsAboveGap} - \text{RR0} (\exp(e V / k_B T_{cell}) - 1 ))$$
#
# where $J$ is the current per unit area, and $V$ is the forward bias on the junction. The "-1" on the right accounts for spontaneous
# generation of e-h pairs through thermal fluctuations at 300K. I will leave out the "-1" below because
# $\text{RR0} \ll \text{SolarPhotonsAboveGap}$, at least in the range of bandgaps that I'm plotting.
# In[16]:
def current_density(voltage, Egap):
return e * (solar_photons_above_gap(Egap) - RR0(Egap) * np.exp(e * voltage / (kB * Tcell)))
# ## Open-circuit voltage, short-circuit current
# In[17]:
def JSC(Egap):
return current_density(0, Egap)
def VOC(Egap):
return (kB * Tcell / e) * np.log(solar_photons_above_gap(Egap) / RR0(Egap))
# **Example:** An ideal 1.1eV-bandgap solar cell has a short-circuit current of 44 mA/cm², and an open-circuit voltage of 0.86V.
# In[18]:
print(JSC(1.1 * eV) / (mA / cm**2))
print(VOC(1.1 * eV) / V)
# **Plot:**
# In[19]:
Egap_list = np.linspace(0.4 * eV, 3 * eV, num=100)
JSC_list = np.array([JSC(E) for E in Egap_list])
plt.plot(Egap_list / eV , JSC_list / (mA / cm**2))
plt.xlabel("Bandgap energy (eV)")
plt.ylabel("Ideal short-circuit current (mA/cm$^2$)")
plt.title("Ideal short-circuit current as a function of bandgap")
plt.xlim(0.4, 3)
plt.ylim(0,70);
print("After plot 1")
# In[20]:
Egap_list = np.linspace(0.4 * eV, 3 * eV, num=20)
VOC_list = np.array([VOC(E) for E in Egap_list])
plt.plot(Egap_list / eV , VOC_list / V,
np.linspace(0,3) , np.linspace(0,3), '--')
plt.xlabel("Bandgap energy (eV)")
plt.ylabel("Ideal open-circuit voltage (V)")
plt.title("Ideal open-circuit voltage as a function of bandgap\n(dashed is bandgap)")
plt.xlim(0.4,3);
print("After plot 2")
# ## Ideal bandgap and maximum efficiency
#
# Given what we’ve already done, it’s now simple to calculate the ideal bandgap and efficiency, by numerically maximizing the product JV for each bandgap. The "maximum power point" (MPP) is the point on the JV curve at which this maximum occurs, the maximum power is the power generated at the MPP, and the efficiency is the power divided by the solar constant (i.e. incoming light power).
#
# In[21]:
# SciPy only comes with minimization, not maximization. Let's fix that...
from scipy.optimize import fmin
def fmax(func_to_maximize, initial_guess=0):
"""return the x that maximizes func_to_maximize(x)"""
func_to_minimize = lambda x : -func_to_maximize(x)
return fmin(func_to_minimize, initial_guess, disp=False)[0]
def V_mpp(Egap):
""" voltage at max power point """
return fmax(lambda voltage : voltage * current_density(voltage, Egap))
def J_mpp(Egap):
""" current at max power point """
return current_density(V_mpp(Egap), Egap)
def max_power(Egap):
voltage = V_mpp(Egap)
return voltage * current_density(voltage, Egap)
def max_efficiency(Egap):
return max_power(Egap) / solar_constant
# **Example:** An ideal 1.1eV-bandgap solar cell has an efficiency of 32.9%.
# In[22]:
print(max_efficiency(1.1 * eV))
# **Plot:** The famous SQ efficiency limit!
#
# In[23]:
Egap_list = np.linspace(0.4 * eV, 3 * eV, num=100)
eff_list = np.array([max_efficiency(E) for E in Egap_list])
plt.plot(Egap_list / eV , 100 * eff_list)
plt.xlabel("Bandgap energy (eV)")
plt.ylabel("Max efficiency (%)")
plt.title("SQ efficiency limit as a function of bandgap")
plt.xlim(0.4, 3)
plt.ylim(0,35);
print("After plot 3")
# ### Fill factor
# In[24]:
def fill_factor(Egap):
return max_power(Egap) / (JSC(Egap) * VOC(Egap))
# **Plot:**
# In[25]:
Egap_list = np.linspace(0.4 * eV, 3 * eV, num=30)
FF_list = np.array([fill_factor(E) for E in Egap_list])
plt.plot(Egap_list / eV , FF_list)
plt.xlabel("Bandgap energy (eV)")
plt.ylabel("Ideal fill factor")
plt.title("Ideal fill factor as a function of bandgap")
plt.xlim(0.4, 3)
plt.ylim(0,1);
print("After plot 4")
# # Quantifying losses
#
# We split the incoming light power into five parts:
#
# * Power converted into useful electricity;
# * Power of below-bandgap photons, which is wasted power because these photons are not absorbed;
# * Excess photon energy beyond the bandgap, which is wasted because the electron and hole just immediately relax to the band edges. For example, for a 1eV-bandgap semiconductor, a 3eV photon creates the same electron-hole pair as a 1.01eV photon. All the 2eV of extra energy carried by a 3eV photon in that case is wasted.
# * Power lost due to electron-hole
# recombination at the max-power-point,
# which is wasted as heat;
# * Power lost because the voltage of the cell at the max-power-point is less than the bandgap.
#
# To say the same thing using equations:
#
# $(\text{Light power in}) = V_{MPP} \times I_{MPP}$<br>
# $ \qquad + (\text{Power of below-bandgap photons})$<br>
# $ \qquad + (\text{Power of above-bandgap photons} - \text{Number of above-bandgap photons} \times \text{Bandgap energy})$<br>
# $ \qquad + ((\text{Number of above-bandgap photons}) - I_{MPP} / e) \times (\text{Bandgap energy})$<br>
# $ \qquad + I_{MPP} \times (\text{Bandgap voltage} - V_{MPP})$<br>
#
# I’ll write everything as a fraction of the incident light power.
# In[26]:
def useful_electricity(Egap):
return max_efficiency(Egap)
def below_gap_energy(Egap):
integrand = lambda E : E * SPhotonsPerTEA(E)
return scipy.integrate.quad(integrand, E_min, Egap, full_output=1)[0] / solar_constant
def excess_beyond_gap(Egap):
integrand = lambda E : (E - Egap) * SPhotonsPerTEA(E)
return scipy.integrate.quad(integrand, Egap, E_max, full_output=1)[0] / solar_constant
def mpp_recombination(Egap):
return (solar_photons_above_gap(Egap) - J_mpp(Egap) / e) * Egap / solar_constant
def mpp_voltage_is_less_than_gap(Egap):
return J_mpp(Egap) * (Egap / e - V_mpp(Egap)) / solar_constant
# **Example:** Accounting for the energy and losses of an ideal 1.1eV solar cell. Everything adds up to 100% (within numerical accuracy) as expected.
# In[27]:
breakdown = (useful_electricity(1.1 * eV),
below_gap_energy(1.1 * eV),
excess_beyond_gap(1.1 * eV),
mpp_recombination(1.1 * eV),
mpp_voltage_is_less_than_gap(1.1 * eV))
print(breakdown)
# In[28]:
sum(breakdown)
# **Plot**
# In[29]:
Egap_list = np.linspace(0.4 * eV, 3 * eV, num=100)
loss_list = [[useful_electricity(Egap), below_gap_energy(Egap), excess_beyond_gap(Egap),
mpp_recombination(Egap), mpp_voltage_is_less_than_gap(Egap)] for Egap in Egap_list]
loss_list = np.array(loss_list)
# cumulative sums to stack the contributions on top of each other
loss_list = np.cumsum(loss_list,axis=1)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.fill_between(Egap_list / eV, 0, loss_list[:,0], facecolor="k")
ax1.fill_between(Egap_list / eV, loss_list[:,0], loss_list[:,1], facecolor="m")
ax1.fill_between(Egap_list / eV, loss_list[:,1], loss_list[:,2], facecolor="g")
ax1.fill_between(Egap_list / eV, loss_list[:,2], loss_list[:,3], facecolor="b")
ax1.fill_between(Egap_list / eV, loss_list[:,3], 1, facecolor="0.75")
plt.title('POWER GOES TO...\n'
'Useful electricity (black);\n'
'Below-gap photons (magenta);\n'
'e-h relaxation to the band edges (green);\n'
'Current loss from radiative recombination (blue)\n'
'Voltage is less than bandgap (gray)')
plt.xlabel('Bandgap (eV)')
plt.ylabel('Fraction of incident light power')
plt.xlim(0.4, 3)
plt.ylim(0,1);
# # Partial list of "loopholes" to exceed the SQ limit
#
# Exceeding the SQ limit is a sort of obsession of solar cell physicists. The standard reference book on this subject is *Third Generation Photovoltaics* by Martin Green. Here is a partial list.
#
# ## Tandem solar cells
#
# Tandem solar cells can and do exceed the SQ limit. In the SQ model, it is assumed that photons with energy below the bandgap are not absorbed at all, and with energy far above the bandgap are absorbed but all that excess energy is wasted. A tandem solar cell allows high-energy photons to excite a large-bandgap solar cell, and lower-energy photons to excite a smaller-bandgap solar cell. Usually 2 or 3 or 4 solar cells are used. Tandem solar cells are widely used and available. They are used commercially to make the very highest-efficiency most-expensive solar cells. They are also used commercially in some lower-end solar cells, such as amorphous silicon.
#
# In the above derivation, where exactly is the loophole that tandem cells take advantage of? It is the step where I wrote "In the best possible case, the QFL splitting is equal to the external voltage (in reality, it may be larger than the external voltage)." Normally, this is true because, to get a net flow of photogenerated electrons into the cathode lead, the electron QFL has to tilt downward towards the cathode lead, and likewise to get a net flow of photogenerated holes into the anode lead, the hole QFL has to tilt upwards towards the cathode lead. However, in a tandem cell, the QFLs repeatedly split apart and come back together, such that the external voltage can be much greater than the QFL splitting at any given point.
#
# ## Light concentration
#
# The sun is a small point in the sky, so sunlight can be focused to very high concentrations (in theory as much as 50,000X!) Higher concentrations generally lead to higher efficiencies, assuming the solar panel does not heat up too much. In other words, if you focus sunlight to make it 100X more intense on a solar cell, the power can go up by *more* than 100X in the SQ model. Although light concentration is commonly used in practice for solar cells, the possible modest efficiency gain is *not* the *primary* reason that concentrated solar cells are used, in my understanding. More important is that small solar cells with huge mirrors/lenses can sometimes be cheaper than huge solar cells with no mirrors/lenses.
#
# ## Anisotropic radiation
#
# If you engineer a solar cell to only radiate in one direction, its theoretical efficiency increases. (The direction it radiates has to point towards the sun, otherwise light cannot get in.) The theoretical increase here is the same as if you were doing light-concentration. Unfortunately, in practice, it is not as helpful as you might hope, because it reduces radiative recombination but does not reduce nonradiative recombination.
#
# ## Hot electrons
#
# Electrons are usually excited to an energy far above the conduction-band-minimum, and holes far below the valence-band-maximum. Usually they relax to the band edges very quickly. If you can stop that relaxation, you can theoretically exceed the SQ limit by a lot. It seems to be very difficult in practice, and so far there is not even a proof-of-principle laboratory demonstration of a complete hot-electron device.
#
# ## Multiple-exciton generation
#
# In the SQ calculation, it is assumed that each photon above the bandgap creates just one electron-hole pair. In theory, a photon at twice the bandgap could produce two e-h pairs, at 3X the bandgap could produce 3, etc. This is called "Multiple-exciton generation" (MEG). There has been a lot of work on this in recent years, particularly in quantum dots, and for many years there was controversy over whether MEG had been observed. The "smoking gun" for MEG–i.e., unarguable proof of its occurrance–is a device with "external quantum efficiency" above 100% (i.e., more than one electron flows through the ammeter, for each photon coming into the device). After a lot of work, that milestone was finally reached in December 2011: [link](http://dx.doi.org/10.1126/science.1209845). So now we know for sure that MEG is possible in some devices. Of course, we are still a long way away from seeing a commercial device that takes advantage of MEG.
#
# ## Intermediate-band solar cell
#
# In the SQ calculation, it is assumed that photons below the bandgap are wasted. Instead, it is possible to have an energy level in the gap, and excite electrons from the valence to conduction band in two steps. There have been a few laboratory proof-of-principle demonstrations of the concept: [link 1](http://dx.doi.org/10.1103/PhysRevLett.106.028701), [link 2](http://dx.doi.org/10.1063/1.3166863), [link 3](http://dx.doi.org/10.1103/PhysRevLett.97.247701). So far there is no demonstration that intermediate-band solar cells can have high efficiencies in practice.
#
# ## Chemical upconversion
#
# In the SQ calculation, it is assumed that photons below the bandgap are wasted. Instead, it is possible to incorporate an upconversion chemical that absorbs two low-energy photons and emits one high-energy photon. Such chemicals exist, and upconverting solar cells have been demonstrated, but so far these have not been incorporated into commercial solar cells, I think primarily because the chemicals are expensive rare-earth compounds, and they can only upconvert within a narrow wavelength range. (But I’m not sure.)
#
# ## Chemical downconversion
#
# In the SQ calculation, it is assumed that each photon above the bandgap creates just one electron-hole pair. In theory, a photon at twice the bandgap could produce two e-h pairs, at 3X the bandgap could produce 3, etc. The idea of "downconversion" is to incorporate a downconversion chemical that absorbs one very-high-energy photon and re-radiates two lower-energy photons (lower-energy but still above the bandgap) that add up to the energy of the original photon. The system is only worthwhile if each incoming high-energy photon is converted to *more than 1 on average* outgoing above-bandgap photon. This requires high downconversion efficiency and very low loss, and this benchmark that has not yet been reached (as far as I know).
#
# ## Non-blackbody solar cell and "photon recycling"
#
# Say you have a material with too small a bandgap, like 0.5eV, but you want it to use it in a solar cell anyway. You are at a disadvantage because the Shockley-Queisser efficiency limit for 0.5eV bandgap is much lower than the limit for, say, 1.2eV bandgap. However, what you can do is put your solar cell behind a dichroic mirror or coating which reflects all light with frequency below 1.2eV and transmits all light with frequency above 1.2eV. If you do this, the 0.5eV-bandgap material behaves as if it had a 1.2eV bandgap, and in particular it has the same theoretical efficiency limit as if it had a 1.2eV bandgap. What’s happening is that most of the photons emitted in electron-hole recombination events in the semiconductor are reflected right back into the semiconductor, where they are reabsorbed into a new electron-hole pair. Therefore you can get a huge density of electrons and holes with very little net radiative recombination. These electrons and holes fill up the conduction and valence bands until the "Dynamic Burstein-Moss"-shifted bandgap is close to 1.2eV. Another way to look at it is, you have made the material into a whitebody below 1.2eV and a blackbody above 1.2eV, which is the same as a 1.2eV bandgap material. See [link](http://dx.doi.org/10.1063/1.3682101) for more details.
#
# More generally, when a radiative recombination photon is re-absorbed to make a new electron-hole pair, as in the previous paragraph, it’s sometimes called "Photon recycling". Photon recycling is important for modeling solar cells, for example <a href="http://dx.doi.org/10.1016/0927-0248(93)90142-P">link</a>. However, photon recycling cannot on its own make a solar cell overcome the overall Shockley-Queisser limit of ~34%. (An exception is the anisotropic radiation design, see above.) Moreover, photon recycling reduces net radiative recombination, but does not reduce nonradiative recombination, so it’s not as helpful as you might otherwise hope.
|
{"hexsha": "5e3ba66a20ef0a70dbd60cdb6bba98c5d0cfff3e", "size": 26673, "ext": "py", "lang": "Python", "max_stars_repo_path": "sq.py", "max_stars_repo_name": "NREL/PVwindow", "max_stars_repo_head_hexsha": "df7091c9d1ebd280aca53c50015e3b1ee7a3183e", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sq.py", "max_issues_repo_name": "NREL/PVwindow", "max_issues_repo_head_hexsha": "df7091c9d1ebd280aca53c50015e3b1ee7a3183e", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sq.py", "max_forks_repo_name": "NREL/PVwindow", "max_forks_repo_head_hexsha": "df7091c9d1ebd280aca53c50015e3b1ee7a3183e", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 51.7922330097, "max_line_length": 1308, "alphanum_fraction": 0.7292767968, "include": true, "reason": "import numpy,import scipy,from scipy", "num_tokens": 7500}
|
#include <Eigen/Core>
#include <Eigen/Geometry>
#include <pcl/point_cloud.h>
#include <pcl/point_types.h>
#include <pcl/io/pcd_io.h>
#include <pcl/visualization/cloud_viewer.h>
#include <pcl/common/transforms.h>
#include <iostream>
#include <fstream>
#include <string>
int main(int argc, char* argv[])
{
if(argc != 3)
{
std::cout << "Need 2 argument for Area and Room!" << std::endl;
return 0;
}
std::string Area_num = argv[1];
std::string Area_name = "Area_" + Area_num;
std::string Room_name = argv[2];
pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGB>);
pcl::PCDWriter Pclwriter;
std::ifstream read_file;
std::cout << "Opening : ../Stanford3dDataset_v1.2_Aligned_Version/"
+ Area_name + "/" + Room_name + "/" + Room_name + ".txt" << std::endl;
read_file.open("../Stanford3dDataset_v1.2_Aligned_Version/"
+ Area_name + "/" + Room_name + "/" + Room_name + ".txt", ios::binary);
std::string line;
float pos;
float color;
pcl::PointXYZRGB point;
float midpoint[3] = {0};
std::vector<pcl::PointXYZRGB> PointVec;
while(std::getline(read_file, line))
{
std::stringstream lineinput(line);
lineinput >> pos;
point.x = pos;
lineinput >> pos;
point.y = pos;
lineinput >> pos;
point.z = pos;
lineinput >> color;
point.r = int(color);
lineinput >> color;
point.g = int(color);
lineinput >> color;
point.b = int(color);
PointVec.push_back(point);
midpoint[0] -= point.x;
midpoint[1] -= point.y;
midpoint[2] -= point.z;
}
cloud->width = PointVec.size();
cloud->height = 1;
cloud->resize(cloud->height * cloud->width);
for(uint i = 0;i < PointVec.size();i++)
{
cloud->points[i] = PointVec[i];
}
std::cout << "File read successfully" << std::endl;
Eigen::Affine3f transform = Eigen::Affine3f::Identity();
Eigen::Vector3f translation;
translation << midpoint[0]/PointVec.size(), midpoint[1]/PointVec.size(), midpoint[2]/PointVec.size();
transform.translate(translation);
pcl::transformPointCloud(*cloud, *cloud, transform);
pcl::visualization::CloudViewer viewer("Dataset Visualizer");
viewer.showCloud(cloud);
while(!viewer.wasStopped ())
{
}
// Pclwriter.write("../savings/" + Area_num + "_" + Room_num + ".pcd",*(cloud));
return 0;
}
|
{"hexsha": "be400482b1b99a5c435d1eba605549f3b9baf4a5", "size": 2526, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "data/run_dataset_visualizer.cpp", "max_stars_repo_name": "StarRealMan/StarNet", "max_stars_repo_head_hexsha": "5fd36b4a545a494eb4dc6d309469696b5d2f8abb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2020-12-02T14:16:26.000Z", "max_stars_repo_stars_event_max_datetime": "2020-12-02T14:20:15.000Z", "max_issues_repo_path": "data/run_dataset_visualizer.cpp", "max_issues_repo_name": "StarRealMan/StarNet", "max_issues_repo_head_hexsha": "5fd36b4a545a494eb4dc6d309469696b5d2f8abb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data/run_dataset_visualizer.cpp", "max_forks_repo_name": "StarRealMan/StarNet", "max_forks_repo_head_hexsha": "5fd36b4a545a494eb4dc6d309469696b5d2f8abb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2020-12-02T15:04:40.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-02T15:04:40.000Z", "avg_line_length": 26.3125, "max_line_length": 105, "alphanum_fraction": 0.5958036421, "num_tokens": 655}
|
# -*- coding: utf-8 -*-
"""
# Light field images: input_Cam000-080.png
# All viewpoints = 9x9(81)
# -- LF viewpoint ordering --
# 00 01 02 03 04 05 06 07 08
# 09 10 11 12 13 14 15 16 17
# 18 19 20 21 22 23 24 25 26
# 27 28 29 30 31 32 33 34 35
# 36 37 38 39 40 41 42 43 44
# 45 46 47 48 49 50 51 52 53
# 54 55 56 57 58 59 60 61 62
# 63 64 65 66 67 68 69 70 71
# 72 73 74 75 76 77 78 79 80
# We use star-shape 9x9 viewpoints
# for depth estimation
#
# 00 04 08
# 10 13 16
# 20 22 24
# 30 31 32
# 36 37 38 39 40 41 42 43 44
# 48 49 50
# 56 58 60
# 64 67 70
# 72 76 80
"""
#import numpy as np
import numpy as np
import os, sys
from datasets import hci, lytro
import cv2
import time
from epinet_fun.func_pfm import write_pfm
#from epinet_fun.func_makeinput import make_epiinput
from epinet_fun.func_makeinput import make_multiinput
from epinet_usefuls import infer_cos_from_corner
from epinet_fun.func_cepinetmodel import define_cepinet
from tensorflow import keras
#from tensorflow.keras.utils import plot_model
import matplotlib.pyplot as plt
from shutil import copyfile
import inspect
from glob import glob
###CONFIG####
os.environ["CUDA_VISIBLE_DEVICES"]="0"
ds= hci("/path/to/heidelberg_full_data/") #'hci' # lytro, hci, lytro_lenslet
samples = ds.samples
#samples = ["town","kitchen","museum","vinyl"]
run_dir = '/media/emre/Data/cepinet_runs/08-05-12-40_greek/'
model_iter = '0034'
corner_coordss = ((11,11),)#"all"
#corner_coords = (0,0) # for hci: 0,0 : NW 0,8: NE 8,0: SW 8,8: SE
#corner_code = "SE"
'''set Model parameters '''
model_conv_depth=7
model_filt_num=70
model_cw = 9
############
path_weight = glob(run_dir + 'logs/*'+model_iter+"*")[0]
test_dir = run_dir + "tests/"
output_dir = test_dir + model_iter + '__' + time.strftime("%d-%m-%H-%M")+'__all_corners__'+ds.ds_name+'/'
#output_dir = '/media/emre/Data/cepinet_runs/08-05-12-40_greek/tests/0034__09-05-14-02__all_corners__lytro/'
Setting02_AngualrViews = range(model_cw) # number of views ( 0~8 for 9x9 )
#if __name__ == '__main__':
# Input : input_Cam000-080.png
# Depth output : image_name.pfm
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(output_dir+'pfms/'):
os.makedirs(output_dir+'pfms/')
curr_file = inspect.getfile(inspect.currentframe()) # script filename (usually with path)
copyfile(curr_file,output_dir + curr_file.split("/")[-1])
# GPU setting ( gtx 1080ti - gpu0 )
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
'''
/// Setting 1. LF Images Directory
Setting01_LFdir = 'synthetic': Test synthetic LF images (from 4D Light Field Benchmark)
"A Dataset and Evaluation Methodology for
Depth Estimation on 4D Light Fields".
http://hci-lightfield.iwr.uni-heidelberg.de/
Setting01_LFdir = 'Lytro': Test real LF images(Lytro)
'''
'''
/// Setting 2. Angular Views
# ------ 5x5 viewpoints -----
#
# 20 22 24
# 30 31 32
# 38 39 40 41 42
# 48 49 50
# 56 58 60
#
# ---------------------------
# ------ 9x9 viewpoints -----
#
# 00 04 08
# 10 13 16
# 20 22 24
# 30 31 32
# 36 37 38 39 40 41 42 43 44
# 48 49 50
# 56 58 60
# 64 67 70
# 72 76 80
#
# ---------------------------
'''
img_scale=1 # 1 for small_baseline(default) <3.5px,
# 0.5 for large_baseline images < 7px
img_scale_inv=int(1/img_scale)
model_learning_rate=0
corner_dict = ds.get_corner_dict(model_cw)
if corner_coordss == "all":
corner_coordss = corner_dict.keys()
for corner_coords in corner_coordss:
corner_code = corner_dict[corner_coords]
image_h = ds.ir_max
image_w = ds.ic_max
if corner_code in ("NE","SW"):
model_512=define_cepinet(round(img_scale*image_w),
round(img_scale*image_h),
model_cw,
model_conv_depth,
model_filt_num,
model_learning_rate)
else:
model_512=define_cepinet(round(img_scale*image_h),
round(img_scale*image_w),
model_cw,
model_conv_depth,
model_filt_num,
model_learning_rate)
''' Model Initialization '''
model_512.load_weights(path_weight)
dum_sz=model_512.input_shape[0]
dum=np.zeros((1,dum_sz[1],dum_sz[2],dum_sz[3]),dtype=np.float32)
dummy=model_512.predict([dum, dum, dum],batch_size=1)
""" Depth Estimation """
center_offset = infer_cos_from_corner(corner_coords,corner_code,an=ds.iar_max,model_cw=model_cw)
print ( "center offset: " )
print ( center_offset )
print ("corner code:")
print(corner_code)
for sample in samples:
(val_90d , val_0d, val_M45d, impaths)=make_multiinput(sample,
Setting02_AngualrViews,
ds=ds,center_offsets=center_offset, net_type="corner", corner_code = corner_code)
start=time.clock()
# predict
val_output_tmp=model_512.predict([ val_90d[:,::img_scale_inv,::img_scale_inv],
val_0d[:,::img_scale_inv,::img_scale_inv],
val_M45d[:,::img_scale_inv,::img_scale_inv]],
batch_size=1);
if corner_code=="NE":
val_output_tmp = np.rot90(val_output_tmp,axes=(2,1))
elif corner_code=="SW":
val_output_tmp = np.rot90(val_output_tmp,axes=(1,2))
elif corner_code=="SE":
val_output_tmp = np.rot90(val_output_tmp,k=2,axes=(2,1))
runtime=time.clock() - start
plt.imshow(val_output_tmp[0,:,:,0])
plt.imsave(output_dir + sample + "_" + str(corner_coords[0])+ "_" + str(corner_coords[1]) + '.png',val_output_tmp[0,:,:,0])
plt.show()
print("runtime: %.5f(s)" % runtime)
# save .pfm file
pfm_path = output_dir+"pfms/"+ sample + "_" +str(corner_coords[0])+ "_" + str(corner_coords[1]) + '.pfm'
write_pfm(val_output_tmp[0,:,:,0], pfm_path )
print('pfm file saved in ' + pfm_path)
|
{"hexsha": "0839d6e887afb113940e4f47eba9d051b1e37d9a", "size": 7085, "ext": "py", "lang": "Python", "max_stars_repo_path": "cEPINET_test.py", "max_stars_repo_name": "marmus12/CornerView", "max_stars_repo_head_hexsha": "f76cd1cb4c402c59bafbf66b5e038c2d1ab9610b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-03-27T13:36:18.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-28T13:56:15.000Z", "max_issues_repo_path": "cEPINET_test.py", "max_issues_repo_name": "marmus12/CornerView", "max_issues_repo_head_hexsha": "f76cd1cb4c402c59bafbf66b5e038c2d1ab9610b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cEPINET_test.py", "max_forks_repo_name": "marmus12/CornerView", "max_forks_repo_head_hexsha": "f76cd1cb4c402c59bafbf66b5e038c2d1ab9610b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8937007874, "max_line_length": 142, "alphanum_fraction": 0.5378969654, "include": true, "reason": "import numpy", "num_tokens": 1947}
|
\documentclass{article}
\usepackage{alltt}
\newcommand{\RewriteGen}{{\sf RewriteGen}}
\newcommand{\KW}[2]{\newcommand{#1}[1]{{\bf #2}\ }}
\newcommand{\END}{{\bf end}}
\newcommand{\OF}{{\bf of}}
\KW{\FUN}{fun} \KW{\VAL}{val}
\KW{\CASE}{case} \KW{\LET}{let}
\KW{\INCLUDE}{include} \KW{\SIGNATURE}{signature} \KW{\DATATYPE}{datatype}
\newcommand{\SIG}{{\bf sig}}
\KW{\IN}{in}
\KW{\AND}{and}
\title{\RewriteGen{} 1.0 Manual}
\author{Allen Leung\thanks{New York University. Email:{\tt leunga@cs.nyu.edu}}}
\begin{document}
\maketitle
\section{Introduction}
Often, given a set of recursively
datatype definitions, {\em natural} generalizations for
functions such as \verb|map|,
\verb|fold|, and \verb|app| exist, defined by the inductive structure
of the datatypes. However, in Standard ML, these functions
must be painstakingly written for each datatype. Furthermore, whenever
the definition of the datatype changes, these must be modified to match
the new definitions. This is often an error prone process.
\RewriteGen\ is a source-to-source translator that automates some of the
mundane bookkeeping of writing transformations on user defined datatypes.
It can automatically generate rewriters, app and fold functions.
Originally, the core transformation routines of \RewriteGen\
are developed for the MDGen machine description generator.
But they turn out to be useful for writing MDGen itself.
The result is this tool.
\section{An Example}
Suppose that in file \verb|"wff.sig"|
we are given a datatype definition for well-formed formulas in logic:
\begin{alltt}
\SIGNATURE WFF =
\SIG
\DATATYPE wff = FALSE | TRUE | VAR \OF string
| AND \OF wff * wff
| OR \OF wff * wff
| NOT \OF wff
\VAL simplify : wff -> wff
\END
\end{alltt}
\noindent and we want to implement the function \verb|simplify|,
which simplifes complex wffs.
We can do this by writing the template:
\begin{alltt}
\FUN simplify e =
Generic.rewrite(
\LET \INCLUDE "wff.sig"
\FUN wff (NOT FALSE) = TRUE
| wff (NOT TRUE) = FALSE
| wff (NOT(NOT x)) = x
| wff (AND(TRUE,x)) = x
| wff (AND(x,TRUE)) = x
| wff (AND(FALSE,x)) = FALSE
| wff (AND(x,FALSE)) = FALSE
| wff (OR(TRUE,x)) = TRUE
| wff (OR(x,TRUE)) = TRUE
| wff (OR(FALSE,x)) = x
| wff (OR(x,FALSE)) = x
\IN rewrite'wff e
\END)
\end{alltt}
The \verb|Generic.rewrite|
template will read the datatype specification from the
file \verb|"wff.sig"|,
then create a rewriting function for the type \verb|wff| using the
normalization rules specified by the user as the function \verb|wff|.
The generated rewriter
will try to apply the normalization rules\footnote{Once} for each
subpart of a wff. The rewriting function
is called \verb|rewrite'wff|.
In this example, the template is transformed into the following SML code:
\begin{alltt}
\FUN simplify e =
\LET \FUN rewrite'wff redex =
\LET \VAL redo = rewrite'wff
\IN \CASE redex \OF
FALSE => redex
| TRUE => redex
| VAR string => redex
| AND(wff1, wff2) =>
(\CASE (rewrite'wff wff1, rewrite'wff wff2) \OF
(TRUE, x) => x
| (x, TRUE) => x
| (FALSE, x) => FALSE
| (x, FALSE) => FALSE
| arg => AND arg
)
| OR(wff1, wff2) =>
(\CASE (rewrite'wff wff1, rewrite'wff wff2) \OF
(TRUE, x) => TRUE
| (x, TRUE) => TRUE
| (FALSE, x) => x
| (x, FALSE) => x
| arg => OR arg
)
| NOT wff =>
(\CASE rewrite'wff wff \OF
FALSE => TRUE
| TRUE => FALSE
| NOT x => x
| arg => NOT arg
)
)
\END
\IN rewrite'wff e
\END
\end{alltt}
\subsection{App and fold functions}
App and fold functions can also be easily generated.
For example, suppose we want to write a function that counts all occurrances
of \verb|NOT| in a wff. This can be specified with the
\verb|Generic.app| template:
\begin{alltt}
\FUN countNots e =
\LET \VAL count = ref 0
\IN Generic.app(
\LET \INCLUDE "wff.sig"
\FUN wff (NOT _) = count := !count + 1
| wff _ = ()
\IN app'wff e;
!count
\END)
\END
\end{alltt}
Alternatively, we can implement the same function
functionally using the \verb|Generic.fold| template:
\begin{alltt}
\FUN countNots2 e =
Generic.fold(
\LET include "wff.sig"
\FUN wff (NOT _, n) = n+1
| wff (_, n) = n
\IN fold'wff(e, 0)
\END)
\end{alltt}
Similarly, a function that enumerates all the identifiers in a wff
can be written as:
\begin{alltt}
\FUN allVars e =
Generic.fold(
\LET include "wff.sig"
\FUN wff (VAR v, vs) = v::vs
| wff (_, vs) = vs
\IN fold'wff(e, [])
\END)
\end{alltt}
This function has type \verb|addVars : wff -> string list|.
\section{Multiple Sorts}
Real life applications usually make use of multiple
mutually recursive datatypes. In general, mutually recursive
transformations functions such as rewrite, fold, and app, can be also defined
by specifying a set of rules for each of the datatypes that are involved
in the recursion.
For example, suppose the abstract syntax tree
for a simple function programming language includes
the datatypes \verb|expr| (expression),
\verb|decl| (declaratons).
\begin{alltt}
\DATATYPE expr =
LET \OF decl * expr
| APPLY \OF expr * expr
| ID \OF string
| LAMBDA \OF string * expr
| {\em others}
\AND decl =
VAL \OF string * expr
| VALREC \OF string * expr
| LOCAL \OF decl * decl
| {\em others}
\end{alltt}
We can define a rewriter that renames every identifier
from lowercase to uppercase as follows:
\begin{alltt}
\FUN renameToUpperCase program =
Generic.rewrite(
\LET {\em include datatype definitions}
\FUN expr (ID x) = ID(String.map Char.toUpper x)
\AND decl d = d
\IN rewrite'decl program
\END
)
\end{alltt}
Note that:
\begin{enumerate}
\item the function \verb|decl| is necessary to tell that
\RewriteGen\ that the type \verb|decl| involved in the recursion
in this set of rewriting rules.
\item the functions \verb|decl| and \verb|expr| must be defined
mutual recursively with the \AND{} connector.
\end{enumerate}
\section{Lists and other Type Constructors}
Suppose we have the following definition of datatype
\verb|exp| (expression), where we make use of lists.
\begin{alltt}
\DATATYPE exp = OPER \OF string * exp list
| VAR \OF string
| {\em others}
\end{alltt}
Let's say this definition is contained in the file \verb|"exp-def.sig"|.
We can define a rewriter that translates all unary expression $+e$
into $e$ as follows:
\begin{alltt}
\FUN transform e =
Generic.rewrite
( \LET \INCLUDE "exp-def.sig"
\DATATYPE 'a list = nil | :: \OF 'a * 'a list
\FUN list x = x
\FUN exp (OPER("+",[x])) = x
\IN rewrite'exp e
\END
)
\end{alltt}
Note that we have to make the definition of
type \verb|list| visible\footnote{We can also import it from some file
via \INCLUDE.}, and define the function \verb|list| to tell \RewriteGen{}
that type \verb|list| is involved in the rewriting.
In general, user defined polymorphic datatypes are also supported.
We generate code by arity raising\footnote{More on this later.}.
Of course, we cannot define transformations that make use of
polymorphic recursion, since SML cannot type such programs.
\section{Running \RewriteGen}
\RewriteGen\ is a source-to-source translator. First, you should
build the system by running the script \verb|"build.sml"| in this
directory. This will create an SML image called \verb|rwgen.<arch>|.
To run the program, type:
\begin{alltt}
sml @SMLload=rwgen {\em input-filename} > {\em output-filename}
\end{alltt}
\noindent on the Unix commmand.
The input file can contain arbitrary SML code. \RewriteGen\ looks
for templates like \verb|Generic.rewrite|, \verb|Generic.app|,
and \verb|Generic.fold|, transform those templates, and
leave the rest of the code unchanged.
\section{Bugs and Missing Features}
There are too many to list, but some important ones are:
\begin{enumerate}
\item
\RewriteGen\ is understands only the syntax of SML but not its semantics,
so it can get horribly confused with scoping rules and/or variable captures.
\item \RewriteGen\ can get confused with \verb|infix|, \verb|infixr|
declarations. Try to avoid them when possible.
\item
The generated code for a rewriter is not ideal. Better algorithms
should be used in the future.
\item
Currently conditional patterns (i.e. patterns with guards)
are not supported but this should be available in the next version,
once I hook up the match compiler into the tool.
\item
Subterms built with type constructors, such as lists, are currently not
handled entirely properly.
\item
Binary polytypic functions like \verb|equal|, or \verb|zip|, are currently
missing. Also we should also be able to generate, pretty printers,
parsers, pickers and unpickers. But these are currently missing.
\item
Ideally, there should be a generic mechanism for defining new
generic templates. This is probably the best way of extending the
tool. I'll consider how to add this later.
\end{enumerate}
\end{document}
|
{"hexsha": "6e5c38101bba048c97750ec893907824f7c98f10", "size": 9756, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "build/lib/sml/mlrisc-lib/Tools/Doc/rewrite-gen.tex", "max_stars_repo_name": "Bxc8214/mlton-test", "max_stars_repo_head_hexsha": "153db2d029f5191b26d68361922be34eabf4cac9", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-01-12T07:08:31.000Z", "max_stars_repo_stars_event_max_datetime": "2019-01-12T07:08:31.000Z", "max_issues_repo_path": "lib/mlrisc-lib/MLRISC/Tools/Doc/rewrite-gen.tex", "max_issues_repo_name": "Bxc8214/mlton-test", "max_issues_repo_head_hexsha": "153db2d029f5191b26d68361922be34eabf4cac9", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/mlrisc-lib/MLRISC/Tools/Doc/rewrite-gen.tex", "max_forks_repo_name": "Bxc8214/mlton-test", "max_forks_repo_head_hexsha": "153db2d029f5191b26d68361922be34eabf4cac9", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8484848485, "max_line_length": 82, "alphanum_fraction": 0.6468839688, "num_tokens": 2730}
|
"""
Tools for nonparametric statistics, mainly density estimation and regression.
For an overview of this module, see docs/source/nonparametric.rst
"""
from statsmodels.tools._testing import PytestTester
test = PytestTester()
|
{"hexsha": "bdd0fc3a075b4a0f46ac85251bf1990ca5cc8695", "size": 229, "ext": "py", "lang": "Python", "max_stars_repo_path": "venv/Lib/site-packages/statsmodels/nonparametric/__init__.py", "max_stars_repo_name": "EkremBayar/bayar", "max_stars_repo_head_hexsha": "aad1a32044da671d0b4f11908416044753360b39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6931, "max_stars_repo_stars_event_min_datetime": "2015-01-01T11:41:55.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T17:03:24.000Z", "max_issues_repo_path": "venv/Lib/site-packages/statsmodels/nonparametric/__init__.py", "max_issues_repo_name": "EkremBayar/bayar", "max_issues_repo_head_hexsha": "aad1a32044da671d0b4f11908416044753360b39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6137, "max_issues_repo_issues_event_min_datetime": "2015-01-01T00:33:45.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T22:53:17.000Z", "max_forks_repo_path": "venv/Lib/site-packages/statsmodels/nonparametric/__init__.py", "max_forks_repo_name": "EkremBayar/bayar", "max_forks_repo_head_hexsha": "aad1a32044da671d0b4f11908416044753360b39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2608, "max_forks_repo_forks_event_min_datetime": "2015-01-02T21:32:31.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T07:38:30.000Z", "avg_line_length": 22.9, "max_line_length": 77, "alphanum_fraction": 0.7991266376, "include": true, "reason": "from statsmodels", "num_tokens": 48}
|
import logging
import argparse
import ast
from collections import OrderedDict
import torch
import os
import scipy
from datetime import datetime
import time
import math
import random
import numpy as np
import sys
import cv2
irange = range
def mylogger(logpath='./param.log'):
logger = logging.getLogger('mylogger')
logger.setLevel('DEBUG')
logger.propagate = False
BASIC_FORMAT = "%(asctime)s:%(levelname)s:%(message)s"
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(BASIC_FORMAT, DATE_FORMAT)
fhlr = logging.FileHandler(logpath) #
fhlr.setFormatter(formatter)
logger.addHandler(fhlr)
str_sharp = '#####################################################################'
logger.info(str_sharp +'Record Experiment Information and Conditions\n')
# logger.info(' Experiment Setting and Running Logs\n\n')
chlr = logging.StreamHandler() #
chlr.setFormatter(formatter)
chlr.setLevel('DEBUG') #
logger.addHandler(chlr)
def parseArg ():
parseArgs = argparse.ArgumentParser(description='Arguments for project.')
# parseArgs.add_argument('--attr_loss_type',type=str, default='triplet', help='the type of loss function for attributes')
parseArgs.add_argument('--batch',type=int, default= 200, help='Number of batch size')
parseArgs.add_argument('--baseline', action='store_true', default=False, help='the mode of training baseline or our model')
parseArgs.add_argument('--dataset',type=str,default = 'CUB_200_2011', help='specify the training dataset')
parseArgs.add_argument('--epoch',type=int, default= 0, help='the num of training epoch for restore,0 means training from scrach')
parseArgs.add_argument('--exp_att',type=str, default='CUB_test', help='the name of current experiment')
parseArgs.add_argument('--gpu_ids',type=str, default= '', help='the ids of GPUs')
parseArgs.add_argument('--gamma',type=float, default=1.0, help='the factor of MSE Loss')
parseArgs.add_argument('--img_size',type=int, default=299, help='the size of image (299,299)')
parseArgs.add_argument('--info', '-I', type=str, default='Info for running program', help='This info is used to record the running conditions for the current program, which is stored in param.log')
parseArgs.add_argument('--iteration',type=int, default= 1000, help='the num of max iteration')
parseArgs.add_argument('--logfile',type=str, default= './param.log', help='the name of log file')
parseArgs.add_argument('--loss_weights', type=float, nargs='+', default=[1,0,1,0], help='whether to add the position loss')
parseArgs.add_argument('--log_interval',type=int, default= 200, help='the interval of training epoch for logging')
parseArgs.add_argument('--modelname',type=str, default= 'inceptionV3', help='the name of DNN for explaining')
parseArgs.add_argument('--num_classes',type=int, default= 200, help='the num of classes in celebA dataset')
parseArgs.add_argument('--num_workers',type=int, default= 8, help='the num of strides for loading data')
parseArgs.add_argument('--num_experts',type=int, default= 4, help='the num of experts')
parseArgs.add_argument('--pertur',type=str, default= 'line', help='the method to perturbate the image')
parseArgs.add_argument('--path_conflict',type=str, default= '', help='Use for path conflict')
parseArgs.add_argument('--phase',type=str, default= 'test', help='train or test')
# parseArgs.add_argument('--region',type=list, default= [[120,60],[50,50],[120,60],[60,60],[26,26],[26,26],[26,26],[60,60],[160,160],[50,50],[26,26],[60,60],[160,160],[60,60],[26,26]], help='the regions for different positions') # w,h
# parseArgs.add_argument('--region',type=list, default= [[28,10],[10,16],[28,10],[16,10],[36,36]], help='the regions for different positions') #w,h
# parseArgs.add_argument('--region',type=list, default= [[76,28],[28,44],[76,28],[44,28],[98,98]], help='the regions for different positions') #w,h
parseArgs.add_argument('--region',type=list, default= [[76,28],[76,76],[76,28],[44,28],[98,98]], help='the regions for different positions') #w,h the second means nose and mouth
parseArgs.add_argument('--res_dir',type=str, default='./experiments', help='the path for saving results')
parseArgs.add_argument('--restore_file',type=str, default='experiments/pre_trained_models/inceptonV3.pth', help='the path/file for restore models')
parseArgs.add_argument('--seed', type=int, default= 1, help='the seed for random selection')
parseArgs.add_argument('--test_batch',type=int, default= 300, help='Number of test batch size')
parseArgs.add_argument('--v', type=ast.literal_eval, default = False, help='display the debug info or not')
parseArgs.add_argument('--weight_attr', type=str, default = 'continuous', help='The method of using attributes')
#parseArgs.add_argument('--train_num',type=int, default= 0, help='0 means using all the image in train dir')
return parseArgs.parse_args()
def time_stamp():
TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S}".format(datetime.now())
return TIMESTAMP
def create_name_experiment(parameters, attribute_experiment):
name_experiment = '{}/{}'.format(parameters['dataset'], attribute_experiment)
print('Name experiment: {}'.format(name_experiment))
return name_experiment
def create_folder(folder, force=True):
if not os.path.exists(folder):
os.makedirs(folder)
else:
if force:
folder = folder+str(np.random.randint(100))
os.makedirs(folder)
return folder
def normalize_range(t, range=None):
'''
@Description: Normlize the input t into [0,1]
@param {type} {t: tensor}
@return:
'''
def norm_ip1(img, min, max):
img.clamp_(min=min, max=max)
img.add_(-min).div_(max - min + 1e-5)
return img
if range is not None:
output = norm_ip1(t, range[0], range[1])
else:
output = norm_ip1(t, float(t.min()), float(t.max()))
return output
def loadweights(model, filename_model, gpu_ids=''):
'''
@Description: Load weights for pytorch model in different hardware environments
@param {type} : {model: pytorch model, model that waits for loading weights
filename_model: str, name of pretrained weights
gpu_ids: list, available gpu list}
@return:
'''
if filename_model != '' and os.path.exists(filename_model):
if len(gpu_ids) == 0:
# load weights to cpu
state_dict = torch.load(filename_model, map_location=lambda storage, loc: storage)
# create new OrderedDict that does not contain `module.`
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('module.','') # remove `module.`
new_state_dict[name] = v
state_dict = new_state_dict
elif len(gpu_ids) == 1:
state_dict = torch.load(filename_model)
# create new OrderedDict that does not contain `module.`
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k.replace('module.','') # remove `module.`
new_state_dict[name] = v
state_dict = new_state_dict
else:
state_dict = torch.load(filename_model)
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if 'module' not in k:
name = ''.join(['module.',k]) # add `module.`
new_state_dict[name] = v
if new_state_dict:
state_dict = new_state_dict
else:
state_dict = ''
if len(gpu_ids) > 1:
model = torch.nn.DataParallel(model,device_ids=gpu_ids)
return model, state_dict
def cover_pytorch_tf(pytorch_weights, tf_model_var, sess, match_dict):
'''
@Description: This function is used to copy trained weights from pytorch to tensorflow.
@param {type} : {pytorch_weights: OrderDict, save the weights of one model
tf_model_var: tf variable list, save the variable list in tf model
sess: tf.Session()
match_dict: dic, the match relationship between pytorch weights and tf weiths}
@return: copied weights file name for tf
'''
import tensorflow as tf
# py_weights_name = ['num_batches_tracked']
tf_py_weights_name = {'kernel':'weight', 'bias':'bias', 'gamma':'weight', 'beta':'bias', 'moving_mean':'running_mean', 'moving_variance':'running_var'}
for tf_v in tf_model_var:
tf_names = tf_v.name.split('/')
tf_layer_name = '/'.join(tf_names[1:3]) # used for confirm the layer relationship
py_weight_name = tf_py_weights_name.get(tf_names[3].split(':')[0]) # used for confirming the weight or bias relationship
py_layer_name = match_dict.get(tf_layer_name)
if py_layer_name == None:
continue
py_name = '.'.join([py_layer_name, py_weight_name])
py_w = pytorch_weights.get(py_name)
if len(py_w.shape) == 4:
py_w = py_w.permute(3,2,1,0) # [64, 3, 3, 3] => [3, 3, 3, 64]
elif py_w.dim() == 2:
py_w = py_w.permute(1,0)
assign_op = tf.assign(tf_v, py_w.cpu().detach().numpy())
sess.run(assign_op)
return tf_model_var
def make_single_grid(tensor, nrow=8, padding=2,
normalize=False, range=None, scale_each=False, pad_value=0):
"""Make a grid of images.
Args:
tensor (Tensor or list): 4D mini-batch Tensor of shape (B x 1 x H x W)
or a list of images all of the same size.
nrow (int, optional): Number of images displayed in each row of the grid.
The Final grid size is (B / nrow, nrow). Default is 8.
padding (int, optional): amount of padding. Default is 2.
normalize (bool, optional): If True, shift the image to the range (0, 1),
by subtracting the minimum and dividing by the maximum pixel value.
range (tuple, optional): tuple (min, max) where min and max are numbers,
then these numbers are used to normalize the image. By default, min and max
are computed from the tensor.
scale_each (bool, optional): If True, scale each image in the batch of
images separately rather than the (min, max) over all images.
pad_value (float, optional): Value for the padded pixels.
"""
if not (torch.is_tensor(tensor) or
(isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))):
raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor)))
# if list of tensors, convert to a 4D mini-batch Tensor
if isinstance(tensor, list):
tensor = torch.stack(tensor, dim=0)
if tensor.dim() == 2: # single image H x W
tensor = tensor.view(1, tensor.size(0), tensor.size(1))
if tensor.dim() == 3: # single image
tensor = tensor.view(1, tensor.size(0), tensor.size(1), tensor.size(2))
if normalize is True:
tensor = tensor.clone() # avoid modifying tensor in-place
if range is not None:
assert isinstance(range, tuple), \
"range has to be a tuple (min, max) if specified. min and max are numbers"
def norm_ip(img, min, max):
img.clamp_(min=min, max=max)
img.add_(-min).div_(max - min + 1e-5)
def norm_range(t, range):
if range is not None:
norm_ip(t, range[0], range[1])
else:
norm_ip(t, float(t.min()), float(t.max()))
if scale_each is True:
for t in tensor: # loop over mini-batch dimension
norm_range(t, range)
else:
norm_range(tensor, range)
if tensor.size(0) == 1:
return tensor.squeeze()
# make the mini-batch of images into a grid
nmaps = tensor.size(0)
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
grid = tensor.new(tensor.size(1), height * ymaps + padding, width * xmaps + padding).fill_(pad_value)
k = 0
for y in irange(ymaps):
for x in irange(xmaps):
if k >= nmaps:
break
grid.narrow(1, y * height + padding, height - padding)\
.narrow(2, x * width + padding, width - padding)\
.copy_(tensor[k])
k = k + 1
return grid
TOTAL_BAR_LENGTH = 55.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def gauss(x,a,b,c):
return torch.exp(-torch.pow(torch.add(x,-b),2).div(2*c*c)).mul(a)
def colorize(x):
''' Converts a one-channel grayscale image to a color heatmap image '''
if x.dim() == 2:
torch.unsqueeze(x, 0, out=x)
if x.dim() == 3:
cl = torch.zeros([3, x.size(1), x.size(2)])
cl[0] = gauss(x,.5,.6,.2) + gauss(x,1,.8,.3)
cl[1] = gauss(x,1,.5,.3)
cl[2] = gauss(x,1,.2,.3)
cl[cl.gt(1)] = 1
elif x.dim() == 4:
cl = torch.zeros([x.size(0), 3, x.size(2), x.size(3)])
cl[:,0,:,:] = gauss(x,.5,.6,.2) + gauss(x,1,.8,.3)
cl[:,1,:,:] = gauss(x,1,.5,.3)
cl[:,2,:,:] = gauss(x,1,.2,.3)
return cl
def visualize(logpath, d_inputs, c_att):
in_c, in_y, in_x = d_inputs.shape
# for item_img, item_att in zip(d_inputs, c_att):
v_img = d_inputs.transpose(1,2,0)* 255. # change to h*w*c
v_img = v_img[:, :, ::-1] # change to bgr
resize_att = cv2.resize(c_att[0], (in_x, in_y))
resize_att *= 255.
# cv2.imwrite(os.path.join(logpath, 'CV_oriImg.png'), v_img)
# cv2.imwrite(os.path.join(logpath, 'CV_attnImg.png'), resize_att)
# v_img = cv2.imread(os.path.join(logpath, 'CV_oriImg.png'))
# vis_map = cv2.imread(os.path.join(logpath, 'CV_attnImg.png'), 0)
jet_map = cv2.applyColorMap(resize_att.astype(np.uint8), cv2.COLORMAP_JET)
jet_map = cv2.add(0.6*v_img.astype(np.uint8), 0.4*jet_map)
# out_path = os.path.join(logpath, 'attention_combine.png')
# cv2.imwrite(out_path, jet_map)
# out_path = os.path.join(logpath, 'rawImage.png')
# cv2.imwrite(out_path, v_img)
# count += 1
return jet_map
|
{"hexsha": "a12a49d4eec43e7302a7258e334726d9304e57aa", "size": 16402, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/myutils.py", "max_stars_repo_name": "zhouxiaowei1120/practice", "max_stars_repo_head_hexsha": "95dd7ffa65f34a867578bea2f80404677cc5f5e5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-12-17T15:49:37.000Z", "max_stars_repo_stars_event_max_datetime": "2017-12-17T15:49:37.000Z", "max_issues_repo_path": "tools/myutils.py", "max_issues_repo_name": "zhouxiaowei1120/practice", "max_issues_repo_head_hexsha": "95dd7ffa65f34a867578bea2f80404677cc5f5e5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/myutils.py", "max_forks_repo_name": "zhouxiaowei1120/practice", "max_forks_repo_head_hexsha": "95dd7ffa65f34a867578bea2f80404677cc5f5e5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.1631578947, "max_line_length": 238, "alphanum_fraction": 0.6216315084, "include": true, "reason": "import numpy,import scipy", "num_tokens": 4305}
|
function [Population,W,B] = WeightUpdate(Population,W,Archive,Z,T,Global)
% Weight Update
%------------------------------- Copyright --------------------------------
% Copyright (c) 2023 BIMK Group. You are free to use the PlatEMO for
% research purposes. All publications which use this platform or any code
% in the platform should acknowledge the use of "PlatEMO" and reference "Ye
% Tian, Ran Cheng, Xingyi Zhang, and Yaochu Jin, PlatEMO: A MATLAB platform
% for evolutionary multi-objective optimization [educational forum], IEEE
% Computational Intelligence Magazine, 2017, 12(4): 73-87".
%--------------------------------------------------------------------------
%% Routine to find undeveloped individuals (correspondingly their weights) in the archive set
% Normalisation
N_arc = length(Archive);
fmin_arc = min(Archive.objs);
fmax_arc = max(Archive.objs);
Archiveobjs = (Archive.objs - repmat(fmin_arc,N_arc,1) )./repmat(fmax_arc - fmin_arc,N_arc,1);
Populaionobjs = (Population.objs - repmat(fmin_arc,Global.N,1) )./repmat(fmax_arc - fmin_arc,Global.N,1);
% Euclidean distance between individuals in the archive set and individuals in the Population
dis1 = pdist2(Archiveobjs,Populaionobjs);
dis1 = sort(dis1,2);
% Euclidean distance between any two individuals in the archive set
dis2 = pdist2(Archiveobjs,Archiveobjs);
dis2 = sort(dis2,2);
% Calculate the niche size(median of the distances from their closest solution in the archive )
niche_size = median(dis2(:,2));
% Find undeveloped
Archive_und = Archive(dis1(:,1) >= niche_size);
N_und = length(Archive_und);
%% If the undeveloped individuals are promising then add them into the evolutionary Population
% Obtain their corresponding weights.
if ~isempty(Archive_und)
W1 = (Archive_und.objs - repmat(Z,N_und,1))./repmat( sum(Archive_und.objs,2)-repmat(sum(Z),N_und,1), 1, Global.M );
for i = 1 : size(W1,1)
W_all = [W;W1(i,:)];
B1 = pdist2(W_all,W_all);
B1(logical(eye(length(B1)))) = inf;
[~,B1] = sort(B1,2);
B1 = B1(:,1:T);
Population1 = [Population,Archive_und(i)];
Population2 = Population1(B1(end,:));
Value_Tche_all = max(abs(Population2.objs-repmat(Z,T,1))./repmat(W1(i,:),T,1),[],2);
Value_Tche = max(abs(Archive_und(i).obj - Z )./W1(i,:),[],2);
index = find(Value_Tche_all<Value_Tche, 1);
if isempty(index)
% Put the wight into the W, as well as the corresponding solution
W = [W;W1(i,:)];
Population = [Population Archive_und(i)];
% Update neighbour solutions after adding a weight
P = B1(end,:);
g_old = max( abs( Population(P).objs - repmat(Z,T,1) )./W(P,:),[],2 );
g_new = max( abs( repmat(Archive_und(i).obj,T,1) - repmat(Z,T,1) )./W(P,:),[],2 );
Population(P(g_old > g_new)) = Archive_und(i);
end
end
end
%% Delet the poorly performed weights until the size of W is reduced to N
% find out the solution that is shared by the most weights in the population
while length(Population) > Global.N
[~,ai,bi] = unique(Population.objs,'rows');
if length(ai) == length(bi) % If every solution in the population corresponds to only one weight
% Normalisation
fmax = max(Population.objs,[],1);
fmin = min(Population.objs,[],1);
PCObj = (Population.objs-repmat(fmin,length(Population),1))./repmat(fmax-fmin,length(Population),1);
% Determine the radius of the niche
d = pdist2(PCObj,PCObj);
d(logical(eye(length(d)))) = inf;
sd = sort(d,2);
num_obj = size(Population.objs,2);
r = median(sd(:,min(num_obj,size(sd,2))));
R = min(d./r,1);
% Delete solution one by one
while length(Population) > Global.N
[~,worst] = max(1-prod(R,2));
Population(worst) = [];
R(worst,:) = [];
R(:,worst) = [];
W(worst,:) = [];
end
else
Index = find(bi==mode(bi));
Value_Tche2 = max(abs(Population(Index).objs-repmat(Z,size(Index,1),1))./W(Index,:),[],2);
Index_max= find(Value_Tche2 == max(Value_Tche2));
Population(Index(Index_max(1)))=[];
W(Index(Index_max(1)),:)=[];
end
end
% Update the neighbours of each weight
B = pdist2(W,W);
[~,B] = sort(B,2);
B = B(:,1:T);
end
|
{"author": "BIMK", "repo": "PlatEMO", "sha": "c5b5b7c37a9bb42689a5ac2a0d638d9c4f5693d5", "save_path": "github-repos/MATLAB/BIMK-PlatEMO", "path": "github-repos/MATLAB/BIMK-PlatEMO/PlatEMO-c5b5b7c37a9bb42689a5ac2a0d638d9c4f5693d5/PlatEMO/Algorithms/Multi-objective optimization/AdaW/WeightUpdate.m"}
|
import yaml
import operator
import random
import os
import math
import numpy
import sys
from collections import OrderedDict
from utils import *
import itertools
import subprocess
import pandas
from pyDOE import *
import warnings
import pickle
import core_count
import json
# Returns an ordered dictionary based on the order in which the parameters were found in the file
#http://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts
def ordered_load(stream, Loader=yaml.Loader, object_pairs_hook=OrderedDict):
class OrderedLoader(Loader):
pass
def construct_mapping(loader, node):
loader.flatten_mapping(node)
return object_pairs_hook(loader.construct_pairs(node))
OrderedLoader.add_constructor(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
construct_mapping)
return yaml.load(stream, OrderedLoader)
# Get the number rounded to multiple of a particular number
def roundMultiple(number, multiple):
num = number + (multiple-1)
return int(num - (num % multiple))
def roundDownMultiple(number,multiple):
num = number + 1
if num % multiple == 0:
return int(num - multiple)
else:
return int(num - (num % multiple))
def put_limits(value,start,end,step):
if value<start:
return start
elif value>end:
return end
else:
return roundMultiple(value,step)
def utility_function(metric,cw,i):
metrics = metric.split(',')
limits = dict()
improve_metric = ''
for m in metrics:
if '=' in m:
limits[m.split('=')[0]] = int(m.split('=')[1])
else:
improve_metric = m
for m in limits.keys():
if cw[m][list(cw['no.']).index(i)]>=limits[m]:
return cw[improve_metric][list(cw['no.']).index(i)]
return sys.maxint
# Write the configuration to the file
# Can write multiple entries based on the start end end entries
def write(design_space, start,end ,basefile):
folder = "config_files"
if not os.path.exists(folder):
os.makedirs(folder)
for i in range(start,end):
fname = folder+"/"+"test"+str(i)+".yaml"
selective_write(design_space[i],fname, basefile)
def dict_product(dic):
product = [x for x in apply(itertools.product, dic.values())]
print product
return [dict(zip(dic.keys(), p)) for p in product]
# Get results for the specific configurations
def get_results(start, end, design_space, basefile, metric):
write(design_space, start,end ,basefile)
for i in range(start, end):
bashCommand = "./onescript.sh " + str(i)
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output = process.communicate()[0]
cw = pandas.read_csv("numbers.csv")
metric_values = list()
ret_array = list()
for i in range(start, end):
if i in list(cw['no.']):
ret_array.append(i)
metric_values.append(utility_function(metric,cw,i))
else:
ret_array.append(i)
metric_values.append(sys.maxint)
print metric_values
print ret_array
return metric_values,ret_array
# Get the metric readings for all experiments
def get_metric(metric):
cw = pandas.read_csv("numbers.csv")
metrics = list()
for i in range(0,len(cw)):
metrics.append(utility_function(metric,cw,i))
return metrics
def get_tp(index):
cw = pandas.read_csv("numbers.csv")
metric = cw['throughput'][index]
return metric
# Generates the initial set of exploration points instead of using random points or LHS
def generate_initial(result,start,end,step,typ,relations,p,conf):
cores = int(core_count.get())
threads = np.random.dirichlet([1000,1000],1)
k=0
for c in conf:
if c in typ.keys():
if typ[c] == "boolean":
# FIXME: This shouldn't be a random choice
result[c] = random.choice([True,False])
else:
result[c] = pow(2,int(start[c]))
else:
if "component.spout" in c or "topology.acker" in c:
result[c] = result["topology.workers"]
elif "component" in c:
#Max so that it never gets below the min of one executor per bolt per worker
result[c] = max(result["topology.workers"],roundDownMultiple((cores-1-3*int(result["topology.workers"]))*threads[0][k],result["topology.workers"]))
k+=1
else:
result[c] = roundMultiple(int(start[c]),step[c])
if c in relations:
for e in relations[c]:
result[e] = pow(2,int(math.ceil(math.log(result[c],2))))
if result[c]*1.1>result[e]:
result[e] = pow(2,int(math.ceil(math.log(result[c],2)))+1)
print result
return result
def step_func(result,start,end,step,typ,p,c,relations,change):
if c in typ.keys():
if typ[c] == "boolean":
if result[c]==True: result[c]=False;
else: result[c]=True;
if typ[c] =="exp":
result[c] = pow(2,put_limits(math.log(result[c],2) + ((change)*(step[c]))),start[c],end[c],step[c])
else:
result[c] = put_limits(result[c] + ((change)*(step[c])),start[c],end[c],step[c])
print "New value for " + str(c) + " = " + str(result[c])
print start[c]
print end[c]
if c in relations:
for e in relations[c]:
result[e] = pow(2,int(math.ceil(math.log(result[c],2))))
if result[c]*1.1>result[e]:
result[e] = pow(2,int(math.ceil(math.log(result[c],2)))+1)
return result
def getstats(index):
direc = "json_files/"
tuples = dict()
latency = dict()
processtime = dict()
bolts = dict()
capacity = dict()
with open(direc+'topology'+str(index)+'.json') as data_file:
data = json.load(data_file)
for i in data['bolts']:
tuples[i['boltId']] = int(i['acked'])
latency[i['boltId']] = float(i['processLatency'])
capacity[i['boltId']] = float(i['capacity'])
#for i in tuples.keys():
# processtime[i] = tuples[i]*latency[i]
#print processtime
#bolt_time = max(processtime.iteritems(), key=operator.itemgetter(1))[1]
#print bolt_time
#for i in processtime.keys():
# bolts[i] = float(processtime[i]/bolt_time)
#print bolts
#for i in bolts:
# for k in bolt_ids:
# if i in k:
# result[k] = roundDownMultiple(int(result[k]) * bolts[i], step[k])
return capacity
def adjust(capacity,result,bolt_ids,step):
print step
maxcap_bolt = max(capacity.iteritems(), key=operator.itemgetter(1))[0]
for i in capacity:
for k in bolt_ids:
if i in k:
result[k] = roundDownMultiple(int(result[k]) * (capacity[i]/capacity[maxcap_bolt]), step[k])
print result
return result
def downgrade(capacity,result,bolt_ids,step):
maxcap_bolt = max(capacity.iteritems(), key=operator.itemgetter(1))[0]
#cap = min(capacity[maxcap_bolt]+0.2,1.0)
cap = capacity[maxcap_bolt]
print "Max Capacity is at " + str(cap)
for k in bolt_ids:
if maxcap_bolt in k:
result[k] = roundDownMultiple(int(result[k]) * cap, step[k])
print result
return result
def checkit(c,x0,start,end,change):
if x0[c]==start[c] and change==-1:
return False
elif x0[c]==end[c] and change==1:
return False
else:
return True
def hill_climbing(conf,sample,start,end,step,typ, relations,basefile, metric,lat_p,tp_p,behav_tp,behav_lat):
#steps
#Step 1
#Generate initial configuration
p = []
design_space = list(dict())
config = generate_initial(dict(sample),start,end,step,typ,relations,p,conf)
length = len(design_space)
design_space.append(config)
metric_values,numbers = get_results(length,length+1,design_space, basefile,metric)
fx0 = min(metric_values)
x0 = dict(config)
lastx0 = dict(config)
lastfx0 = fx0
index = 0
f = True
retry = False
while f:
print f
max_capacity = 0
print retry
while max_capacity<0.8:
print "Increasing utilization"
length = len(design_space)
capacity = getstats(index)
max_capacity = max(capacity.iteritems(), key=operator.itemgetter(1))[1]
config = downgrade(capacity,dict(x0),["component.rolling_count_bolt_num","component.split_bolt_num"],step)
design_space.append(config)
metric_values,numbers = get_results(length,length+1,design_space, basefile,metric)
fx_new = min(metric_values)
x_new = dict(config)
if fx_new==sys.maxint:
break
elif not retry:
index +=1
fx0 = fx_new
x0 = dict(x_new)
elif fx_new<fx0:
fx0 = fx_new
x0 = dict(x_new)
print "Max Capacity is at " + str(max_capacity)
print "Adjusting the thread distribution"
length = len(design_space)
print index
capacity = getstats(index)
config = adjust(capacity,dict(x0),["component.rolling_count_bolt_num","component.split_bolt_num"],step)
design_space.append(config)
metric_values,numbers = get_results(length,length+1,design_space, basefile,metric)
fx_new = min(metric_values)
x_new = dict(config)
if not retry or fx_new<fx0:
fx0 = fx_new
x0 = dict(x_new)
met = dict()
if fx0==sys.maxint:
print "Current constraint is throughput"
constraint = "throughput"
met = dict(tp_p)
change = dict(behav_tp)
else:
print "Current constraint is latency"
constraint = "latency"
met = dict(lat_p)
change = dict(behav_lat)
priority = 0
t = 6
print "Best configuration " + str(x0)
tp_old = get_tp(length)
print tp_old
while priority<t:
print met[priority]
print change[met[priority]]
print x0
c = met[priority]
check = checkit(c,x0,start,end,change[met[priority]])
if not check:
priority+=1
if priority == t and constraint=="latency":
f = False
break
continue
x_new = step_func(dict(x0),start,end,step,typ,p,c,relations,change[met[priority]])
length = len(design_space)
design_space.append(x_new)
metric_values,numbers = get_results(length,length+1,design_space, basefile,metric)
fx_new = min(metric_values)
tp = get_tp(length)
if fx_new<fx0:
fx0 = fx_new
x0 = dict(x_new)
continue
elif constraint=="throughput" and tp>tp_old and fx_new<=fx0:
fx0 = fx_new
x0 = dict(x_new)
else:
priority += 1
if (priority == t and constraint=="throughput") or (constraint=="throughput" and fx_new!=sys.maxint):
print "Changing current constraint to latency"
constraint = "latency"
met = dict(lat_p)
change = dict(behav_lat)
priority = 0
retry = True
break
if priority == t and constraint=="latency":
f = False
break
tp_old = tp
print "Best configuration is " + str(x0)
print "Best metric values is " + str(fx0)
def main():
warnings.simplefilter('ignore', numpy.RankWarning)
# python rrs.py conf.yaml rollingtopwords.yaml lat_90 relations.yaml
conf_file = sys.argv[1]
basefile = sys.argv[2]
metric = sys.argv[3]
ref = open(conf_file, "r")
sample = yaml.load(ref)
result = dict(sample)
start = dict(); end = dict(); step = dict(); typ = dict()
ref = open(conf_file, "r")
conf = ordered_load(ref, yaml.SafeLoader).keys()
for k in sample:
vrange = sample[k]
if len(vrange.split(","))==2:
start[k] = int(vrange.split(",")[0])
end[k] = int(vrange.split(",")[1])
if len(vrange.split(","))==3:
start[k] = int(vrange.split(",")[0])
end[k] = int(vrange.split(",")[1])
step[k] = int(vrange.split(",")[2])
if len(vrange.split(","))==4:
typ[k] = vrange.split(",")[3]
if vrange.split(",")[2] != "null":
step[k] = int(vrange.split(",")[2])
start[k] = int(vrange.split(",")[0])
end[k] = int(vrange.split(",")[1])
relation_file = sys.argv[4]
rel = open(relation_file, "r")
rel_dict = dict(yaml.load(rel))
relations = dict()
for r in rel_dict:
split = rel_dict[r].split(",")
relations[r] = list(split[:len(split)-1])
lat_file = sys.argv[5]
ref = open(lat_file, "r")
lat_conf = ordered_load(ref, yaml.SafeLoader)
lat_p = dict()
behav_lat = dict()
i = 0
print lat_conf
for c in lat_conf.keys():
lat_p[i] = c
print lat_conf[c]
behav_lat[c] = int(lat_conf[c])
i +=1
tp_file = sys.argv[6]
ref = open(tp_file, "r")
tp_conf = ordered_load(ref, yaml.SafeLoader)
behav_tp = dict()
tp_p = dict()
i = 0
for c in tp_conf.keys():
tp_p[i] = c
behav_tp[c] = int(tp_conf[c])
i +=1
#print relations
hill_climbing(conf,sample,start,end,step,typ,relations,basefile,metric,lat_p,tp_p,behav_tp,behav_lat)
if __name__ == '__main__':
main()
|
{"hexsha": "88147a2a5e0d14f9d64bcf5e1e2692dbfcadab2c", "size": 13833, "ext": "py", "lang": "Python", "max_stars_repo_path": "tuning/misc/rule_based1.py", "max_stars_repo_name": "MBtech/stormbenchmark", "max_stars_repo_head_hexsha": "16bd8971011ff4ac34b5d457cecb55f5dfc76106", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2017-10-16T15:24:35.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-22T20:34:51.000Z", "max_issues_repo_path": "tuning/misc/rule_based1.py", "max_issues_repo_name": "MBtech/stormbenchmark", "max_issues_repo_head_hexsha": "16bd8971011ff4ac34b5d457cecb55f5dfc76106", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tuning/misc/rule_based1.py", "max_forks_repo_name": "MBtech/stormbenchmark", "max_forks_repo_head_hexsha": "16bd8971011ff4ac34b5d457cecb55f5dfc76106", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2017-11-20T06:14:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-07T03:37:38.000Z", "avg_line_length": 34.4104477612, "max_line_length": 163, "alphanum_fraction": 0.5860623148, "include": true, "reason": "import numpy", "num_tokens": 3413}
|
/-
Copyright (c) 2021 OpenAI. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Kunhao Zheng, Stanislas Polu, David Renshaw, OpenAI GPT-f
-/
import mathzoo.imports.miniF2F
open_locale nat rat real big_operators topological_space
theorem mathd_algebra_142
(m b : ℝ)
(h₀ : m * 7 + b = -1)
(h₁ : m * (-1) + b = 7) :
m + b = 5 :=
begin
linarith,
end
|
{"author": "leanprover-community", "repo": "mathzoo", "sha": "87e9b492daeb929838706942aaa2437621b34a0e", "save_path": "github-repos/lean/leanprover-community-mathzoo", "path": "github-repos/lean/leanprover-community-mathzoo/mathzoo-87e9b492daeb929838706942aaa2437621b34a0e/src/mathzoo/olympiads/mathd/algebra/p142.lean"}
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# @author : east
# @time : 2020/7/28 14:39
# @file : basic_func.py
# @project : SpectralMethod
# @software : PyCharm
import numpy as np
# RHS Equation
# -------------
def g(k, t):
return np.exp((k**4 - k**2) * t)
def spec_rhs(t0, vt, kx):
# print("tspan:", tspan)
gg = g(kx, t0)
wt = vt / gg
w = np.fft.ifft(wt).real
w_x = np.fft.ifft(1j * kx * wt).real
rhs = - gg * np.fft.fft(w * w_x)
return rhs
# Exact Solution
# ---------------
def exact_solution(t, x, L):
return np.cos(x/L) * (1 + np.sin(x / L))
|
{"hexsha": "e9ea63f46d0b4caedcaee91f608fba6d0a855e00", "size": 603, "ext": "py", "lang": "Python", "max_stars_repo_path": "example/KS/basic_func.py", "max_stars_repo_name": "EastMagica/SpectralMethod", "max_stars_repo_head_hexsha": "fbed7fa236c26cfe5cc77d65e4309fd33dca3e3b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example/KS/basic_func.py", "max_issues_repo_name": "EastMagica/SpectralMethod", "max_issues_repo_head_hexsha": "fbed7fa236c26cfe5cc77d65e4309fd33dca3e3b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example/KS/basic_func.py", "max_forks_repo_name": "EastMagica/SpectralMethod", "max_forks_repo_head_hexsha": "fbed7fa236c26cfe5cc77d65e4309fd33dca3e3b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 16.2972972973, "max_line_length": 44, "alphanum_fraction": 0.5140961857, "include": true, "reason": "import numpy", "num_tokens": 216}
|
-- Vect.idr
--
-- Vector type to demonstrate dependent types
||| Vect data type: A List with defined length
data Vect : Nat -> Type -> Type where
||| Empty vector
Nil : Vect Z a
||| Prepend a new element to vector
(::) : (x : a) -> (xs : Vect k a) -> Vect (S k) a
%name Vect xs, ys, zs
||| appends two vectors
append : Vect n elem -> Vect m elem -> Vect (n + m) elem
append [] ys = ys
append (x :: xs) ys = x :: append xs ys
||| Combine two vectors to a vector of pairs
zip : Vect n a -> Vect n b -> Vect n (a, b)
zip [] ys = []
zip (x :: xs) (y :: ys) = (x, y) :: zip xs ys
|
{"hexsha": "016cc43efeae5a7e7a94543443f9b6741cccd4ac", "size": 591, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "Idris/TDD/Chapter_4/Vect.idr", "max_stars_repo_name": "kkirstein/proglang-playground", "max_stars_repo_head_hexsha": "d00be09ba2bb2351c6f5287cc4d93fcaf21f75fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Idris/TDD/Chapter_4/Vect.idr", "max_issues_repo_name": "kkirstein/proglang-playground", "max_issues_repo_head_hexsha": "d00be09ba2bb2351c6f5287cc4d93fcaf21f75fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Idris/TDD/Chapter_4/Vect.idr", "max_forks_repo_name": "kkirstein/proglang-playground", "max_forks_repo_head_hexsha": "d00be09ba2bb2351c6f5287cc4d93fcaf21f75fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.8888888889, "max_line_length": 56, "alphanum_fraction": 0.5752961083, "num_tokens": 194}
|
!
! Special_Funcitons
!
! Module containing procedures for providing/evaluating various special
! functions.
!
!
! CREATION HISTORY:
! Written by: Paul van Delst, 28-Nov-2001
! paul.vandelst@nooa.gov
!
MODULE Special_Functions
! -----------------
! Environment setup
! -----------------
! Module use
USE Type_Kinds, ONLY: fp, Single
USE Message_Handler, ONLY: SUCCESS, FAILURE, Display_Message
! Disable all implicit typing
IMPLICIT NONE
! ------------
! Visibilities
! ------------
PRIVATE
PUBLIC :: ln_Gamma
PUBLIC :: Factorial
PUBLIC :: Binomial_Coefficient
! -----------------
! Module parameters
! -----------------
! Version Id for the module
CHARACTER(*), PARAMETER :: MODULE_VERSION_ID = &
! Message string length
INTEGER, PARAMETER :: ML = 256
! Literal constants
REAL(fp), PARAMETER :: ZERO = 0.0_fp
REAL(fp), PARAMETER :: ONE = 1.0_fp
REAL(fp), PARAMETER :: TWO = 2.0_fp
CONTAINS
!------------------------------------------------------------------------------
!:sdoc+:
!
! NAME:
! ln_Gamma
!
! PURPOSE:
! Function to return the natural logarithm of the Gamma function.
!
! CALLING SEQUENCE:
! result = ln_Gamma( x )
!
! INPUTS:
! x: Argument of the Gamma function, Gamma(x), for which
! the Gamma function logarithm is required. x > or = 1.
! UNITS: N/A
! TYPE: REAL(fp)
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! FUNCTION RESULT:
! result: The return value is LN(GAMMA(x)). If the passed
! value of x is less than 1.0, the returned result
! is -1.0.
! UNITS: N/A
! TYPE: REAL(fp)
! DIMENSION: Scalar
!
! PROCEDURE:
! The algorithm used here is based on the Lanczos approximation detailed in,
!
! [1] Abramowitz, M., and Stegun, I.A. (eds) 1972, "Handbook of Mathematical
! Functions", Applied Mathematics Series, vol.55 (Washington:
! National Bureau of Standards)
!
! and discussed in,
!
! [2] Press, W.H. etal., 1992, "Numerical Recipes in Fortran", 2nd ed.,
! Cambridge University Press, pp206-207
!
! the latter reference from which this code is adapted.
!
! The Gamma function is defined by the integral,
!
! Inf
! /\
! | z-1 -t
! Gamma(z) = | t .e dt
! \/
! 0
!
! When the argument z is an integer,
!
! n! = Gamma(n+1)
!
! The Gamma function satisfies the recurrance relation,
!
! Gamma(z+1) = z.Gamma(z) .....(1)
!
! For z > 0, the Lanczos approximation to the Gamma function
! can be written as,
!
! z+0.5 ___ [ c1 c2 cN ]
! Gamma(z+1) = (z+y+0.5) .exp(-(z+y+0.5)) . \/2pi [ c0 + ----- + ----- + ... + ----- + e ]
! [ z+1 z+2 z+N ]
!
! .....(2)
!
! where e is the error term.
!
! For y = 5, N = 6 and using the coefficients from ref.[2], the
! error is smaller that |e| < 2e-10. For the purposes which this
! function will be used, that is good enough.
!
! The natural log of the Gamma function is obtained simply by taking
! the logarithm of the RHS of eqn(2).
!
!:sdoc-:
!------------------------------------------------------------------------------
FUNCTION ln_Gamma( x )
! Arguments
REAL(fp), INTENT(IN) :: x
! Function result
REAL(fp) :: ln_Gamma
! Local parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'ln_Gamma'
! ...The series coefficients
INTEGER, PARAMETER :: N_GAMMA_COEFFICIENTS = 6
REAL(fp), PARAMETER :: GAMMA_COEFFICIENT( N_GAMMA_COEFFICIENTS ) = &
[ 76.18009172947146_fp, -86.50532032941677_fp, 24.01409824083091_fp, &
-1.231739572450155_fp, 1.208650973866179e-03_fp, -5.395239384953e-06_fp ]
! ...The other series parameters
REAL(fp), PARAMETER :: Y = 5.0_fp
REAL(fp), PARAMETER :: SQRT_2PI = 2.5066282746310005_fp
REAL(fp), PARAMETER :: C0_AND_ERROR_TERM = 1.000000000190015_fp
! ...Literal constants
REAL(fp), PARAMETER :: POINT5 = 0.5_fp
! Local variables
CHARACTER(ML) :: msg
REAL(fp) :: z
! Set up
IF ( x < ONE ) THEN
ln_Gamma = -ONE
msg = 'Input X argument must be > or = 1.'
CALL Display_Message( ROUTINE_NAME, msg, FAILURE )
RETURN
END IF
! Z is the temporary argument variable
z = x
! Calculate the multipler terms for ln(Gamma(x+1))
ln_Gamma = z + Y + POINT5
ln_Gamma = ( ( z + POINT5 ) * LOG(ln_Gamma) ) - ln_Gamma
! Compute the series approximation term
! ...Initialise the sum to c0 + e.
series_sum = C0_AND_ERROR_TERM
! ...Sum the series.
DO i = 1, N_GAMMA_COEFFICIENTS
series_sum = series_sum + ( GAMMA_COEFFICIENT(i) / ( z + REAL(i,fp) ) )
END DO
! Complete the calculation. Note that the division
! by "z" here is to ensure that ln(Gamma(X)) is
! being calculated, NOT ln(Gamma(x+1))
ln_Gamma = ln_Gamma + LOG(SQRT_2PI * series_sum / z)
END FUNCTION ln_Gamma
!------------------------------------------------------------------------------
!:sdoc+:
!
! NAME:
! Factorial
!
! PURPOSE:
! Function to compute the factorial, n!
!
! CALLING SEQUENCE:
! result = Factorial( n )
!
! INPUT ARGUMENTS:
! n: Value for which n! is required. n > or = 0. The upper
! limit depends on the definition of the floating point
! kind type, fp. See RESTRICTIONS below.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! FUNCTION RESULT:
! result: The return value is n!. If the passed value of n is
! less than 0, or if it is so large that a floating point
! variable with the "fp" kind type cannot represent the
! value of n!, the returned result is -1.0.
! UNITS: N/A
! TYPE: REAL(fp)
! DIMENSION: Scalar
!
! RESTRICTIONS:
! Input n argument must be > or = 0. Upper limits are:
! Single precision: n < or = 30.
! Double/Quad precision: n < or = 168.
! Note that these limits are one less than the values that would place the
! result of n! on the bleeding edge of what can be represented.
!
! The type of floating point variables used are determined by the fp
! data type defined in the Type_Kinds module.
!
! PROCEDURE:
! If the input value of n is < or = 30, the returned factorial is obtained from
! a table of values. For n > 30 and <= 168,
!
! n! = EXP(LN(Gamma(n+1)))
!
!:sdoc-:
!------------------------------------------------------------------------------
FUNCTION factorial( n )
! Arguments
INTEGER, INTENT(IN) :: n
! Function result
REAL(fp) :: factorial
! Local parameters
CHARACTER( * ), PARAMETER :: ROUTINE_NAME = 'Factorial'
! ...Maximum value of n allowed.
INTEGER, PARAMETER :: MAX_N = 168
! ...The tabulated values up to n = 30
INTEGER, PARAMETER :: N_TABULATED_VALUES = 30
REAL(fp), PARAMETER :: FACTORIAL_TABLE(0:N_TABULATED_VALUES) = &
! 3 2 1
! 3 21098765432109876543210987654321
[ 1.00000000000000000000000000000000e+00_fp, &
1.00000000000000000000000000000000e+00_fp, &
2.00000000000000000000000000000000e+00_fp, &
6.00000000000000000000000000000000e+00_fp, &
2.40000000000000000000000000000000e+01_fp, &
1.20000000000000000000000000000000e+02_fp, &
7.20000000000000000000000000000000e+02_fp, &
5.04000000000000000000000000000000e+03_fp, &
4.03200000000000000000000000000000e+04_fp, &
3.62880000000000000000000000000000e+05_fp, &
3.62880000000000000000000000000000e+06_fp, &
3.99168000000000000000000000000000e+07_fp, &
4.79001600000000000000000000000000e+08_fp, &
6.22702080000000000000000000000000e+09_fp, &
8.71782912000000000000000000000000e+10_fp, &
1.30767436800000000000000000000000e+12_fp, &
2.09227898880000000000000000000000e+13_fp, &
3.55687428096000000000000000000000e+14_fp, &
6.40237370572800000000000000000000e+15_fp, &
1.21645100408832000000000000000000e+17_fp, &
2.43290200817664000000000000000000e+18_fp, &
5.10909421717094400000000000000000e+19_fp, &
1.12400072777760768000000000000000e+21_fp, &
2.58520167388849766400000000000000e+22_fp, &
6.20448401733239439360000000000000e+23_fp, &
1.55112100433309859840000000000000e+25_fp, &
4.03291461126605635584000000000000e+26_fp, &
1.08888694504183521607680000000000e+28_fp, &
3.04888344611713860501504000000000e+29_fp, &
8.84176199373970195454361600000000e+30_fp, &
2.65252859812191058636308480000000e+32_fp ]
! Local variables
CHARACTER(ML) :: msg
REAL(fp) :: x
! Setup
IF ( n < 0 ) THEN
Factorial = -ONE
msg = 'Input n argument must be > or = 0.'
CALL Display_Message( ROUTINE_NAME, msg, FAILURE )
RETURN
END IF
! Calculate n!
IF ( n <= N_TABULATED_VALUES ) THEN
! ...Get value from table
Factorial = FACTORIAL_TABLE(n)
ELSE
! ...Check if data type can handle the value
IF ( fp == Single ) THEN
Factorial = -ONE
WRITE(msg,'("Floating point type is single precision. "&
&"Cannot represent ",i0,"! accurately")') n
CALL Display_Message( ROUTINE_NAME, msg, FAILURE )
RETURN
ELSE
IF ( n > MAX_N ) THEN
Factorial = -ONE
msg = 'Input value of n is just too big!'
CALL Display_Message( ROUTINE_NAME, msg, FAILURE )
RETURN
END IF
END IF
! ...Calculate factorial using Gamma function.
x = REAL(n, fp)
Factorial = EXP(ln_Gamma( x + ONE ))
END IF
END FUNCTION Factorial
!------------------------------------------------------------------------------
!:sdoc+:
!
! NAME:
! Binomial_Coefficient
!
! PURPOSE:
! Function to compute the binomial coefficient.
!
! CALLING SEQUENCE:
! result = Binomial_Coefficient( n, k )
!
! INPUTS:
! n: Total number of values from which an unordered
! combination is required.
! Must be > or = k.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! k: The number of unordered sequences to select from
! n numbers.
! Must be > or = 0.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
! ATTRIBUTES: INTENT(IN)
!
! FUNCTION RESULT:
! result: The return value is the binomial coefficient. If the
! passed arguments are invalid, or the result is not
! representable (too big), the returned value is -1.
! UNITS: N/A
! TYPE: INTEGER
! DIMENSION: Scalar
!
! PROCEDURE:
! The number of ways of picking k unordered sequences from n numbers
! is the binomial coefficient or combinatorial number. It is given by,
!
! n!
! n(C)k = --------------- .....................(1)
! ( n-k )! . k!
!
! The factorial can be written as a Gamma function,
!
! n! = Gamma( n+1 )
!
! thus eqn(1) can be expressed as,
!
! Gamma( n+1 )
! n(C)k = ------------------------------- .....(2)
! Gamma( n-k+1 ) . Gamma( k+1 )
!
! Depending on the values of n and k, the resultant Gamma values can
! be quite large (and unrepresentable) so eqn(2) can be recast to
! provide the logarithm of the binomial coefficient,
!
!
! b = LN(Gamma( n+1 )) - LN(Gamma( n-k+1 )) - LN(Gamma( k+1 ))
!
! from which the binomial coefficient is easily obtained,
!
! n(C)k = EXP( b ) ............................(3)
!
! In this function, before eqn(3) is evaluated, the value of b
! is tested so that the integer form of EXP( b ) is representable.
!
!:sdoc-:
!------------------------------------------------------------------------------
FUNCTION Binomial_Coefficient( n, k )
! Arguments
INTEGER, INTENT(IN) :: n
INTEGER, INTENT(IN) :: k
! Function result
INTEGER :: Binomial_Coefficient
! Local parameters
CHARACTER(*), PARAMETER :: ROUTINE_NAME = 'Binomial_Coefficient'
! Local variables
CHARACTER(ML) :: msg
REAL(fp) :: xn, xk
REAL(fp) :: ln_coeff
! Set up
IF ( n < 0 .OR. k < 0 ) THEN
Binomial_Coefficient = -1
msg = 'Input n, k arguments must be > or = 0.'
CALL Display_Message( ROUTINE_NAME, msg, FAILURE )
RETURN
END IF
IF ( n < k ) THEN
Binomial_Coefficient = 0
msg = 'Input n is < input k. Setting result to 0.'
CALL Display_Message( ROUTINE_NAME, msg, WARNING )
RETURN
END IF
IF ( k == 0 ) THEN
Binomial_Coefficient = 1
RETURN
END IF
! Convert the input integer arguments to floats
xn = REAL(n, fp)
xk = REAL(k, fp)
! Calulate the natural log of the binomial
! coefficient using the ln_Gamma() function
ln_coeff = ln_Gamma( xn+ONE ) - ln_Gamma( xk+ONE ) - ln_Gamma( xn-xk+ONE )
! Determine if the result is representable
IF ( ln_coeff > LOG(TWO**DIGITS(Binomial_Coefficient) - ONE) ) THEN
Binomial_Coefficient = -1
msg = 'Coefficient value too large to represent.'
CALL Display_Message( ROUTINE_NAME, msg, FAILURE )
RETURN
END IF
! Convert the coefficient logarithm to its actual value
Binomial_Coefficient = NINT(EXP(ln_coeff))
END FUNCTION Binomial_Coefficient
END MODULE Special_Functions
|
{"hexsha": "feb228ed6196c479280663ae8494ee95596609c5", "size": 14611, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/Utility/Math_Utility/Special_Functions.f90", "max_stars_repo_name": "hsbadr/crtm", "max_stars_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-11-19T10:00:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T02:42:18.000Z", "max_issues_repo_path": "src/Utility/Math_Utility/Special_Functions.f90", "max_issues_repo_name": "hsbadr/crtm", "max_issues_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-11-05T21:04:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-18T18:23:10.000Z", "max_forks_repo_path": "src/Utility/Math_Utility/Special_Functions.f90", "max_forks_repo_name": "hsbadr/crtm", "max_forks_repo_head_hexsha": "bfeb9955637f361fc69fa0b7af0e8d92d40718b1", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-10-29T17:54:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-29T08:42:45.000Z", "avg_line_length": 31.6255411255, "max_line_length": 102, "alphanum_fraction": 0.5458216412, "num_tokens": 4038}
|
#include <iostream>
#include <armadillo>
using namespace std;
using namespace arma;
int main()
{
// Constructor
arma::mat x,y;
x << 0.1778 << 0.1203 << -0.2264 << endr
<< 0.0957 << 0.2403 << -0.3400 << endr
<< 0.1397 << 0.1925 << -0.3336 << endr
<< 0.2256 << 0.3144 << -0.8695 << endr;
y << 1 << 1 << -1 << endr
<< 1 << -1 << 1 << endr
<< -1 << 1 << 1 << endr
<< 1 << 1 << 1 << endr;
// Forward
arma::mat loss_none = arma::log(1 + arma::exp(-y % x));
double loss_sum = arma::sum(arma::sum(loss_none));
double loss_mean = loss_sum / x.n_elem;
// Backward
arma::mat output ;
output.set_size(size(x));
arma::mat numerator = -y % arma::exp(-y % x);
arma::mat denominator = 1 + arma::exp(-y % x);
output = numerator / denominator;
// Display
cout << "------------------------------------------------------------------" << endl;
cout << "USER-PROVIDED MATRICES : " << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "Input shape : "<< x.n_rows << " " << x.n_cols << endl;
cout << "Input : " << endl << x << endl;
cout << "Target shape : "<< y.n_rows << " " << y.n_cols << endl;
cout << "Target : " << endl << y << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "SUM " << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "FORWARD : " << endl;
cout << "Loss : \n" << loss_none << '\n';
cout << "Loss (sum):\n" << loss_sum << '\n';
cout << "BACKWARD : " << endl;
cout << "Output shape : "<< output.n_rows << " " << output.n_cols << endl;
cout << "Output (sum) : " << endl << output << endl;
cout << "Sum of all values in this matrix : " << arma::as_scalar(arma::accu(output)) << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "MEAN " << endl;
cout << "------------------------------------------------------------------" << endl;
cout << "FORWARD : " << endl;
cout << "Loss (mean):\n" << loss_mean << '\n';
cout << "BACKWARD : " << endl;
cout << "Output shape : "<< output.n_rows << " " << output.n_cols << endl;
cout << "Output (mean) : " << endl << output / x.n_elem << endl;
cout << "Sum of all values in this matrix : " << arma::as_scalar(arma::accu(output / x.n_elem)) << endl;
cout << "------------------------------------------------------------------" << endl;
return 0;
}
|
{"hexsha": "ba168f0b6926a37fa8e7f3f4238aefa434d5151d", "size": 2517, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "soft_margin_loss/test.cpp", "max_stars_repo_name": "iamshnoo/mlpack-testing", "max_stars_repo_head_hexsha": "43f9fde18afc7f1e6d54c0a2bd59709c103eed55", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "soft_margin_loss/test.cpp", "max_issues_repo_name": "iamshnoo/mlpack-testing", "max_issues_repo_head_hexsha": "43f9fde18afc7f1e6d54c0a2bd59709c103eed55", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "soft_margin_loss/test.cpp", "max_forks_repo_name": "iamshnoo/mlpack-testing", "max_forks_repo_head_hexsha": "43f9fde18afc7f1e6d54c0a2bd59709c103eed55", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9523809524, "max_line_length": 106, "alphanum_fraction": 0.417560588, "num_tokens": 735}
|
#!/usr/bin/env python
# coding: utf-8
# In[38]:
import os
import time
import csv
import numpy as np
import torch
import torch.nn.parallel
import torch.optim
import models
import utils
from PIL import Image
import matplotlib.pyplot as plt
# In[2]:
checkpoint = torch.load('./mobilenet-nnconv5dw-skipadd-pruned.pth.tar',map_location=torch.device('cpu'))
# In[33]:
if type(checkpoint) is dict:
start_epoch = checkpoint['epoch']
best_result = checkpoint['best_result']
model = checkpoint['model']
else:
start_epoch = 0
model = checkpoint
# In[41]:
def loadimg(filepath):
img = Image.open(filepath).convert('RGB').resize((224,224),Image.NEAREST)
img = np.asarray(img).astype('float')
img /= 255.0
img = np.expand_dims(img,axis=0)
img = np.transpose(img, (0,3, 1, 2))
return torch.from_numpy(img).float().to('cpu')
# In[72]:
img = loadimg('./examples/IMG_2148.png')
# In[73]:
with torch.no_grad():
pred = model(img)
# In[74]:
pred[0][0]
# In[75]:
result = pred[0][0].numpy()
# In[76]:
from mpl_toolkits.mplot3d import Axes3D
# generate some sample data
import scipy.misc
# create the x and y coordinate arrays (here we just use pixel indices)
xx, yy = np.mgrid[0:result.shape[0], 0:result.shape[1]]
# create the figure
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(xx, yy, result ,rstride=1, cstride=1, cmap=plt.cm.gray,
linewidth=0)
# show it
plt.show()
# In[ ]:
|
{"hexsha": "ea97d3c3b2211833ce020361db1602f9cab7509f", "size": 1479, "ext": "py", "lang": "Python", "max_stars_repo_path": "fastdepth.py", "max_stars_repo_name": "tolleybot/fast-depth", "max_stars_repo_head_hexsha": "f5488d8bcfbfc2f50186fb200224f06509c4ef23", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fastdepth.py", "max_issues_repo_name": "tolleybot/fast-depth", "max_issues_repo_head_hexsha": "f5488d8bcfbfc2f50186fb200224f06509c4ef23", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fastdepth.py", "max_forks_repo_name": "tolleybot/fast-depth", "max_forks_repo_head_hexsha": "f5488d8bcfbfc2f50186fb200224f06509c4ef23", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.5, "max_line_length": 104, "alphanum_fraction": 0.6653144016, "include": true, "reason": "import numpy,import scipy", "num_tokens": 425}
|
import Thermodynamics
import StatsBase
import PrettyTables
import OrderedCollections
const TD = Thermodynamics
const TP = TD.Parameters
using JET
using Test
import UnPack
import BenchmarkTools
import CLIMAParameters
const CP = CLIMAParameters
const FT = Float64
toml_dict = CP.create_toml_dict(FT; dict_type = "alias")
aliases = string.(fieldnames(TP.ThermodynamicsParameters))
param_pairs = CP.get_parameter_values!(toml_dict, aliases, "Thermodynamics")
const param_set = TP.ThermodynamicsParameters{FT}(; param_pairs...)
#####
##### Finding indexes in profiles satisfying certain conditions
#####
function find_freezing_index(profiles)
i = findfirst(T -> T === TP.T_freeze(param_set), profiles.T)
isnothing(i) && error("Freezing index not found")
return i
end
function find_dry(profiles)
i = findfirst(q_tot -> q_tot === 0.0, profiles.q_tot)
isnothing(i) && error("Dry index not found")
return i
end
function find_moist_index(profiles)
i = findfirst(q_tot -> q_tot > 0.01, profiles.q_tot)
isnothing(i) && error("Moist index not found")
return i
end
function find_sat_adjust_index(profiles, f::TDC) where {TDC}
kwargs = get_kwargs(profiles, f)
ts = f.(param_set, values(kwargs)...) # TDC is the thermo constructor
T_sa = TD.air_temperature.(param_set, ts)
Z = zip(T_sa, profiles.T)
i = findfirst(x -> abs(x[1] - x[2]) > 0.0001, collect(Z))
isnothing(i) && error("Saturation adjustment index not found")
# @info "T_error for i_sat_adjust = $(abs(T_sa[i] - T[i]))"
return i
end
#=
conditions_index
Find an index in the given `profiles` that satisfies
some condition. For example, find the index that we're
sure that saturation adjustment is performed for a given
(profiles.p[i], profiles.θ_liq_ice[i], profiles.q_tot[i]).
=#
function conditions_index(profiles, sym, constructor)
i = if sym == :dry
find_dry(profiles)
elseif sym == :freezing
find_freezing_index(profiles)
elseif sym == :sat_adjust
find_sat_adjust_index(profiles, constructor)
elseif sym == :moist
find_moist_index(profiles)
else
error("Bad sym given")
end
return i
end
function sample_args(profiles, sym, constructor)
i = conditions_index(profiles, sym, constructor)
kwargs = get_kwargs(profiles, constructor)
return getindex.(values(kwargs), i)
end
#####
##### BenchmarkTools's trial utils
#####
get_summary(trial) = (;
# Using some BenchmarkTools internals :/
mem = BenchmarkTools.prettymemory(trial.memory),
nalloc = trial.allocs,
t_min = BenchmarkTools.prettytime(minimum(trial.times)),
t_max = BenchmarkTools.prettytime(maximum(trial.times)),
t_mean = BenchmarkTools.prettytime(StatsBase.mean(trial.times)),
t_med = BenchmarkTools.prettytime(StatsBase.median(trial.times)),
n_samples = length(trial),
)
function tabulate_summary(summary)
summary_keys = collect(keys(summary))
mem = map(k -> summary[k].mem, summary_keys)
nalloc = map(k -> summary[k].nalloc, summary_keys)
t_mean = map(k -> summary[k].t_mean, summary_keys)
t_min = map(k -> summary[k].t_min, summary_keys)
t_max = map(k -> summary[k].t_max, summary_keys)
t_med = map(k -> summary[k].t_med, summary_keys)
n_samples = map(k -> summary[k].n_samples, summary_keys)
table_data = hcat(
string.(collect(keys(summary))),
mem,
nalloc,
t_min,
t_max,
t_mean,
t_med,
n_samples,
)
header = (
[
"Constructor",
"Memory",
"allocs",
"Time",
"Time",
"Time",
"Time",
"N-samples",
],
[
"(+conditions)",
"estimate",
"estimate",
"min",
"max",
"mean",
"median",
"",
],
)
println()
PrettyTables.pretty_table(
table_data;
header,
crop = :none,
alignment = vcat(:l, repeat([:r], length(header[1]) - 1)),
)
end
#####
##### Constructor-specific configurations
#####
# unpack variables from profiles into NamedTuple:
up(profiles, syms) = (; zip(syms, getproperty.(Ref(profiles), syms))...)
get_kwargs(p, ::typeof(TD.PhaseEquil_ρeq)) = up(p, :(ρ, e_int, q_tot).args)
get_kwargs(p, ::typeof(TD.PhaseEquil_ρTq)) = up(p, :(p, T, q_tot).args)
get_kwargs(p, ::typeof(TD.PhaseEquil_pTq)) = up(p, :(p, T, q_tot).args)
get_kwargs(p, ::typeof(TD.PhaseEquil_peq)) = up(p, :(p, e_int, q_tot).args)
get_kwargs(p, ::typeof(TD.PhaseEquil_phq)) = up(p, :(p, h, q_tot).args)
get_kwargs(p, ::typeof(TD.PhaseEquil_ρθq)) = up(p, :(ρ, θ_liq_ice, q_tot).args)
get_kwargs(p, ::typeof(TD.PhaseEquil_pθq)) = up(p, :(p, θ_liq_ice, q_tot).args)
get_kwargs(p, ::typeof(TD.PhaseEquil_ρpq)) = up(p, :(ρ, p, q_tot).args)
# Conditions to perform microbenchmarks and JET tests:
# note: No freezing points exist in
# TD.TestedProfiles.PhaseEquilProfiles(param_set, ArrayType)!
# so we're not testing performance of these branches.
conditions(::typeof(TD.PhaseEquil_ρeq)) = (:dry, :sat_adjust)
conditions(::typeof(TD.PhaseEquil_ρTq)) = (:dry, :moist)
conditions(::typeof(TD.PhaseEquil_pTq)) = (:dry, :moist) # no sat adjust exists!
conditions(::typeof(TD.PhaseEquil_peq)) = (:dry, :sat_adjust)
conditions(::typeof(TD.PhaseEquil_phq)) = (:dry, :sat_adjust)
conditions(::typeof(TD.PhaseEquil_ρθq)) = (:dry, :sat_adjust)
conditions(::typeof(TD.PhaseEquil_pθq)) = (:dry, :sat_adjust)
conditions(::typeof(TD.PhaseEquil_ρpq)) = (:dry, :sat_adjust)
|
{"hexsha": "f4caadc78e7b8e7bbe112af30929fceebea2a44d", "size": 5610, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "perf/common_micro_bm.jl", "max_stars_repo_name": "climate-machine/MoistThermodynamics.jl", "max_stars_repo_head_hexsha": "a7cadd68d5241d6059eafc82133da2358a1a4ec9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "perf/common_micro_bm.jl", "max_issues_repo_name": "climate-machine/MoistThermodynamics.jl", "max_issues_repo_head_hexsha": "a7cadd68d5241d6059eafc82133da2358a1a4ec9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-03-27T20:37:12.000Z", "max_issues_repo_issues_event_max_datetime": "2020-05-06T03:45:50.000Z", "max_forks_repo_path": "perf/common_micro_bm.jl", "max_forks_repo_name": "climate-machine/MoistThermodynamics.jl", "max_forks_repo_head_hexsha": "a7cadd68d5241d6059eafc82133da2358a1a4ec9", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3407821229, "max_line_length": 80, "alphanum_fraction": 0.6536541889, "num_tokens": 1555}
|
# -*- coding: utf-8 -*-
"""customer_Churn_prediction_using_ANN.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/114T0FexoqqbqdxPn9f-FqSy7F0ly6CMY
"""
from google.colab import drive
drive.mount('/content/gdrive/')
# Importing useful Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Importing the datasets
dataset = pd.read_csv("/content/gdrive/My Drive/customer_churn_prediction/Churn_Modelling.csv")
dataset.info()
dataset.shape
dataset.head()
# Since RowNumber,CustomerId And Surname doesn't provide much information on predicting the customer churning behaviour in a bank
# So, we remove those columns in our dataset
X = dataset.iloc[:,3:13]
y = dataset.iloc[:,13]
X.shape
# Printing the feature values of our dataset
X
# Printing the labels of our datasets
y
# By observing the feature values ,we know that country and gender are categorical values in the dataset and
# while building our machine learning models the categorical varaibale and values are not allowed. So we need to encode those categorical data
# Encoding Categorical Data
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
label_X_1 = LabelEncoder()
X['Geography'] = label_X_1.fit_transform(X['Geography'])
label_X_2 = LabelEncoder()
X['Gender'] = label_X_2.fit_transform(X['Gender'])
# 0 stans for France , 2 stands for Spain and 1 stands for germany
# 0 stands for Female and 1 stands for Male
X
# Since we are encoding three different countries France , Spain and germany as 0 , 2 and 1 . However there are not any relationship between these countries
# but encoding them like this shows that Spain is greater than germany and France mathematically. So for this purpose we need to perform one hot encoding.
X.Geography.values.shape
onehotencoder = OneHotEncoder()
ohe = onehotencoder.fit_transform(X.Geography.values.reshape(-1,1)).toarray()
# 0 stans for France , 2 stands for Spain and 1 stands for germany
# 0 stands for Female and 1 stands for Male
X.head(5)
i = 0
for items in ohe:
print(items)
i+= 1
if(i==5):
break
encoded_df = pd.DataFrame(ohe,columns=['France','Germany','Spain'])
encoded_df
import pandas as pd
X = pd.concat([encoded_df,X],axis=1)
X
# Removing one dummy feature / variable / columns.
# Dropping Geography columns and one dummy variable columns i.e. France
#
preprocessed_dataframe = X.drop(['France','Geography'],axis=1)
preprocessed_dataframe.head()
trainable_data = preprocessed_dataframe.iloc[:,:].values
trainable_data[0]
trainable_labels = dataset.iloc[:,13].values
trainable_labels
trainable_data.shape
# Splitting the dataset into Training set and Test set
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(trainable_data,trainable_labels,test_size=0.2,random_state=0)
#Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
# Now Data preprocessing step is finished now we must focus on building the architecture of ANN
#Importing the Keras Libraries and Packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Activation
import tensorflow as tf
classifier = Sequential()
#Adding the input layer and the first hidden layer
classifier.add(Dense(units=8,kernel_initializer='uniform',input_dim=11))
classifier.add(Activation('relu'))
#Adding the second hidden layer
classifier.add(Dense(units = 8,kernel_initializer='uniform',activation='relu'))
#Adding the output layer
classifier.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid'))
#Compiling the ANN model
classifier.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999, amsgrad=False),loss='binary_crossentropy',metrics=['accuracy'])
# Fitting the ANN to the trainable splits
history = classifier.fit(X_train,y_train,batch_size=25,epochs=100)
classifier.evaluate(X_test,y_test,verbose=2)
from sklearn.metrics import confusion_matrix
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
cn = confusion_matrix(y_test,y_pred)
cn
acc = history.history['accuracy']
# val_acc = history.history['val_accuracy']
loss = history.history['loss']
# val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
# plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
# plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training Accuracy')
# plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
# plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Loss')
plt.xlabel('epoch')
plt.show()
# Cross validating the model using splitting validation datasets by 20%
train_valid_history =classifier.fit(X_train,y_train,batch_size=25,epochs=100,validation_split=0.2)
acc = train_valid_history.history['accuracy']
val_acc = train_valid_history.history['val_accuracy']
loss = train_valid_history.history['loss']
val_loss = train_valid_history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
#Time for prediction
# Predict a single new observation if the customer with the following information will leave the bank or not
# Geography: France
# Credit Score : 600
# Gender : Male
# Age : 40
# Tenure : 3
# Balance : 60000
# No.of products: 2
# Has credit card : Yes
# Is Active Member : Yes
# Estimated Salary : 50000
preprocessed_dataframe
new_prediction_score = classifier.predict(sc.transform(np.array([[0,0,600,1,40,3,60000,2,1,1,50000]])))
new_prediction = (new_prediction_score > 0.5)
if(new_prediction==False):
print("The customer won't leave the bank")
else:
print("The customer will leave the bank")
# K-Fold Cross Validation Strategy for getting the accurate model accuracy
import sklearn
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
# Building a function to load the keras model
def build_model():
model = Sequential()
model.add(Dense(units=8,kernel_initializer='uniform',input_dim=11))
model.add(Activation('relu'))
model.add(Dense(units = 8,kernel_initializer='uniform',activation='relu'))
model.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid'))
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999, amsgrad=False),loss='binary_crossentropy',metrics=['accuracy'])
return model
model = KerasClassifier(build_fn=build_model,epochs=100,batch_size=25)
# Performing cross validation technique in all of our datasets
trainable_data.shape
len(trainable_labels)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
k_fold_train_valid = sc.fit_transform(trainable_data)
k_fold_train_valid.shape
cross_val_accuraies = cross_val_score(estimator=model,X=k_fold_train_valid,y=trainable_labels,cv=5,n_jobs=-1)
mean = cross_val_accuraies.mean()
variance = cross_val_accuraies.std()
print("The mean accuracy of our model after using k-fold cross validation is : ",mean)
print("The minimum accuracy of our model after using k-fold cross validation is : ",min(cross_val_accuraies))
print("The maximum accuracy of our model after using k-fold cross validation is : ",max(cross_val_accuraies))
# Tuning the hyperparameters of ANN
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from keras.models import Sequential
from keras.layers import Dense
import tensorflow as tf
def build_classifier(units):
tuning_model = Sequential()
tuning_model.add(Dense(units=units,kernel_initializer='glorot_uniform',activation='relu',input_dim=11))
tuning_model.add(Dense(units=units,kernel_initializer='glorot_uniform',activation='relu'))
tuning_model.add(Dense(units=1,kernel_initializer='glorot_uniform',activation='sigmoid'))
tuning_model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.01),loss='binary_crossentropy',metrics=['accuracy'])
return tuning_model
tuning_model = KerasClassifier(build_classifier)
# Choosing the hyperparameters by our ANN automatically for optimizing the accuracy of our models
parameters = {'batch_size':[16,32],
'epochs':[150,200],
'units':[16,32]}
grid_search = GridSearchCV(estimator = tuning_model,param_grid=parameters,scoring='accuracy',cv=5)
grid_search = grid_search.fit(X=X_train,y=y_train)
best_parameters = grid_search.best_params_
best_accuracy = grid_search.best_score_
print(best_parameters)
print(best_accuracy)
# Applying best parameters and checking the model accuracy
best_model = Sequential()
best_model.add(Dense(units=16,kernel_initializer='uniform',input_dim=11))
best_model.add(Activation('relu'))
best_model.add(Dense(units = 16,kernel_initializer='uniform',activation='relu'))
best_model.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid'))
best_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999, amsgrad=False),loss='binary_crossentropy',metrics=['accuracy'])
best_model.fit(x=X_train,y=y_train,batch_size=16,epochs=200,validation_split=0.2)
best_model.fit(X_test,y_test,verbose=2)
# Since,batch_size of 25 is not included in our parameter tuning but 25 is the perfect batch size for getting good performance of our models. So checking
# the accuracy with other hyperparameters obtained from parameter tuning with batch_size of 25
best_model.fit(x=X_train,y=y_train,batch_size=25,epochs=200,validation_split=0.2)
best_model.evaluate(X_test,y_test)
best_model = Sequential()
best_model.add(Dense(units=16,kernel_initializer='uniform',activation='relu',input_dim=11))
best_model.add(Dense(units = 16,kernel_initializer='uniform',activation='relu'))
best_model.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid'))
best_model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
best_model.fit(x=X_train,y=y_train,batch_size=25,epochs=200,validation_data=(X_test,y_test))
rms_model = Sequential()
rms_model.add(Dense(units=16,kernel_initializer='uniform',activation='relu',input_dim=11))
rms_model.add(Dense(units = 16,kernel_initializer='uniform',activation='relu'))
rms_model.add(Dense(units=1,kernel_initializer='uniform',activation='sigmoid'))
rms_model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['accuracy'])
final_history = rms_model.fit(x=X_train,y=y_train,batch_size=25,epochs=200,validation_data=(X_test,y_test))
# From all the computations we can say that rms prop optimizer perform better than adam optimizer. So we the final model has 16 hidden layers with batch
# size of 25 and optimizer equals adam and the epochs is equal to 200 and the average validation and testing accuracy is 86%
acc = final_history.history['accuracy']
val_acc = final_history.history['val_accuracy']
loss = final_history.history['loss']
val_loss = final_history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
# Saving our model and it's architecture
# Since rms prop is performing well so we serailize rms_prop model to use it further for deployment.
# serialize model to JSON
model_json = rms_model.to_json()
with open("customer_churn_prediction_model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
rms_model.save_weights("customer_churn_prediction_model.h5")
print("Saved model to disk")
# Saving the model weights in your google drive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# get the folder id where you want to save your file
file = drive.CreateFile({'parents':[{u'id': '1WPJKqUwcgnQz6aMP2yZM9l4d-71cUyBa'}]})
file.SetContentFile('/content/customer_churn_prediction_model.h5')
file.Upload()
# Saving your model architecture in your google drive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# 1. Authenticate and create the PyDrive client.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# get the folder id where you want to save your file
file = drive.CreateFile({'parents':[{u'id': '1WPJKqUwcgnQz6aMP2yZM9l4d-71cUyBa'}]})
file.SetContentFile('/content/customer_churn_prediction_model.json')
file.Upload()
|
{"hexsha": "e52d661c507de0bad4b8749b44cbe0526c3519fe", "size": 13786, "ext": "py", "lang": "Python", "max_stars_repo_path": "customer_churn_prediction_using_ann.py", "max_stars_repo_name": "prabhat-123/Customer-Churn-Prediction-Deployment-In-Flask", "max_stars_repo_head_hexsha": "5d596f0d5d4b3bf52687ceb14e1053783334ec18", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-04-29T02:22:30.000Z", "max_stars_repo_stars_event_max_datetime": "2020-04-29T02:22:30.000Z", "max_issues_repo_path": "customer_churn_prediction_using_ann.py", "max_issues_repo_name": "prabhat-123/Customer-Churn-Prediction-Deployment-In-Flask", "max_issues_repo_head_hexsha": "5d596f0d5d4b3bf52687ceb14e1053783334ec18", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "customer_churn_prediction_using_ann.py", "max_forks_repo_name": "prabhat-123/Customer-Churn-Prediction-Deployment-In-Flask", "max_forks_repo_head_hexsha": "5d596f0d5d4b3bf52687ceb14e1053783334ec18", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-08T19:58:00.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-08T19:58:00.000Z", "avg_line_length": 32.0604651163, "max_line_length": 163, "alphanum_fraction": 0.7817350936, "include": true, "reason": "import numpy", "num_tokens": 3310}
|
import sys
import unittest
import array
import pickle
import operator
import platform
import numpy
from deap import creator
from deap import base
from deap import gp
from deap import tools
def func():
return "True"
class Pickling(unittest.TestCase):
def setUp(self):
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("IndList", list, fitness=creator.FitnessMax)
creator.create("IndArray", array.array, typecode='f', fitness=creator.FitnessMax)
creator.create("IndNDArray", numpy.ndarray, typecode='f', fitness=creator.FitnessMax)
creator.create("IndTree", gp.PrimitiveTree, fitness=creator.FitnessMax)
self.toolbox = base.Toolbox()
self.toolbox.register("func", func)
self.toolbox.register("lambda_func", lambda: "True")
def tearDown(self):
del creator.FitnessMax
del creator.IndList
del creator.IndArray
del creator.IndNDArray
del creator.IndTree
def test_pickle_fitness(self):
fitness = creator.FitnessMax()
fitness.values = (1.0,)
fitness_s = pickle.dumps(fitness)
fitness_l = pickle.loads(fitness_s)
self.assertEqual(fitness, fitness_l, "Unpickled fitness != pickled fitness")
def test_pickle_ind_list(self):
ind = creator.IndList([1.0, 2.0, 3.0])
ind.fitness.values = (4.0,)
ind_s = pickle.dumps(ind)
ind_l = pickle.loads(ind_s)
self.assertEqual(ind, ind_l, "Unpickled individual list != pickled individual list")
self.assertEqual(ind.fitness, ind_l.fitness, "Unpickled individual fitness != pickled individual fitness")
def test_pickle_ind_array(self):
ind = creator.IndArray([1.0, 2.0, 3.0])
ind.fitness.values = (4.0,)
ind_s = pickle.dumps(ind)
ind_l = pickle.loads(ind_s)
self.assertEqual(ind, ind_l, "Unpickled individual array != pickled individual array")
self.assertEqual(ind.fitness, ind_l.fitness, "Unpickled individual fitness != pickled individual fitness")
# @unittest.skipIf(platform.python_implementation() == "PyPy", "PyPy support for pickling ndarrays is very unstable.")
def test_pickle_ind_ndarray(self):
ind = creator.IndNDArray([1.0, 2.0, 3.0])
ind.fitness.values = (4.0,)
ind_s = pickle.dumps(ind)
ind_l = pickle.loads(ind_s)
self.assertTrue(all(ind == ind_l), "Unpickled individual numpy.ndarray != pickled individual numpy.ndarray")
self.assertEqual(ind.fitness, ind_l.fitness, "Unpickled individual fitness != pickled individual fitness")
def test_pickle_tree_input(self):
pset = gp.PrimitiveSetTyped("MAIN", [int], int, "IN")
pset.addPrimitive(operator.add, [int, int], int)
expr = gp.genFull(pset, min_=1, max_=1)
ind = creator.IndTree(expr)
ind.fitness.values = (1.0,)
ind_s = pickle.dumps(ind, pickle.HIGHEST_PROTOCOL)
ind_l = pickle.loads(ind_s)
msg = "Unpickled individual %s != pickled individual %s" % (str(ind), str(ind_l))
self.assertEqual(ind, ind_l, msg)
msg = "Unpickled fitness %s != pickled fitness %s" % (str(ind.fitness), str(ind_l.fitness))
self.assertEqual(ind.fitness, ind_l.fitness, msg)
def test_pickle_tree_term(self):
pset = gp.PrimitiveSetTyped("MAIN", [], int, "IN")
pset.addPrimitive(operator.add, [int, int], int)
pset.addTerminal(1, int)
expr = gp.genFull(pset, min_=1, max_=1)
ind = creator.IndTree(expr)
ind.fitness.values = (1.0,)
ind_s = pickle.dumps(ind, pickle.HIGHEST_PROTOCOL)
ind_l = pickle.loads(ind_s)
msg = "Unpickled individual %s != pickled individual %s" % (str(ind), str(ind_l))
self.assertEqual(ind, ind_l, msg)
msg = "Unpickled fitness %s != pickled fitness %s" % (str(ind.fitness), str(ind_l.fitness))
self.assertEqual(ind.fitness, ind_l.fitness, msg)
def test_pickle_tree_ephemeral(self):
pset = gp.PrimitiveSetTyped("MAIN", [], int, "IN")
pset.addPrimitive(operator.add, [int, int], int)
pset.addEphemeralConstant("E1", lambda: 2, int)
expr = gp.genFull(pset, min_=1, max_=1)
ind = creator.IndTree(expr)
ind.fitness.values = (1.0,)
ind_s = pickle.dumps(ind, pickle.HIGHEST_PROTOCOL)
ind_l = pickle.loads(ind_s)
msg = "Unpickled individual %s != pickled individual %s" % (str(ind), str(ind_l))
self.assertEqual(ind, ind_l, msg)
msg = "Unpickled fitness %s != pickled fitness %s" % (str(ind.fitness), str(ind_l.fitness))
self.assertEqual(ind.fitness, ind_l.fitness, msg)
def test_pickle_population(self):
ind1 = creator.IndList([1.0,2.0,3.0])
ind1.fitness.values = (1.0,)
ind2 = creator.IndList([4.0,5.0,6.0])
ind2.fitness.values = (2.0,)
ind3 = creator.IndList([7.0,8.0,9.0])
ind3.fitness.values = (3.0,)
pop = [ind1, ind2, ind3]
pop_s = pickle.dumps(pop)
pop_l = pickle.loads(pop_s)
self.assertEqual(pop[0], pop_l[0], "Unpickled individual list != pickled individual list")
self.assertEqual(pop[0].fitness, pop_l[0].fitness, "Unpickled individual fitness != pickled individual fitness")
self.assertEqual(pop[1], pop_l[1], "Unpickled individual list != pickled individual list")
self.assertEqual(pop[1].fitness, pop_l[1].fitness, "Unpickled individual fitness != pickled individual fitness")
self.assertEqual(pop[2], pop_l[2], "Unpickled individual list != pickled individual list")
self.assertEqual(pop[2].fitness, pop_l[2].fitness, "Unpickled individual fitness != pickled individual fitness")
# @unittest.skipIf(platform.python_implementation() == "PyPy", "PyPy support for pickling ndarrays (thus stats) is very unstable.")
def test_pickle_logbook(self):
stats = tools.Statistics()
logbook = tools.Logbook()
stats.register("mean", numpy.mean)
record = stats.compile([1,2,3,4,5,6,8,9,10])
logbook.record(**record)
logbook_s = pickle.dumps(logbook)
logbook_r = pickle.loads(logbook_s)
self.assertEqual(logbook, logbook_r, "Unpickled logbook != pickled logbook")
@unittest.skipIf(sys.version_info < (2, 7), "Skipping test because Python version < 2.7 does not pickle partials.")
def test_pickle_partial(self):
func_s = pickle.dumps(self.toolbox.func)
func_l = pickle.loads(func_s)
self.assertEqual(self.toolbox.func(), func_l())
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(Pickling)
unittest.TextTestRunner(verbosity=2).run(suite)
|
{"hexsha": "ed8c327d10123bc18f6ef3045086d9d29c053cd7", "size": 6760, "ext": "py", "lang": "Python", "max_stars_repo_path": "env/lib/python3.9/site-packages/deap/tests/test_pickle.py", "max_stars_repo_name": "wphoong/flappy_doge", "max_stars_repo_head_hexsha": "c778f0e4820c1ed46e50a56f989d57df4f386736", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-08-04T13:12:42.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-16T13:26:19.000Z", "max_issues_repo_path": "env/lib/python3.9/site-packages/deap/tests/test_pickle.py", "max_issues_repo_name": "wphoong/flappy_doge", "max_issues_repo_head_hexsha": "c778f0e4820c1ed46e50a56f989d57df4f386736", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-03-24T17:12:34.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-08T21:09:52.000Z", "max_forks_repo_path": "env/lib/python3.9/site-packages/deap/tests/test_pickle.py", "max_forks_repo_name": "wphoong/flappy_doge", "max_forks_repo_head_hexsha": "c778f0e4820c1ed46e50a56f989d57df4f386736", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-23T09:01:13.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-23T09:01:13.000Z", "avg_line_length": 42.25, "max_line_length": 135, "alphanum_fraction": 0.6545857988, "include": true, "reason": "import numpy", "num_tokens": 1756}
|
orth(S::AbstractArray{<:Real}) = Matrix(qr(S).Q)
"""
S_update_a
Random set of orthonormal sample directions.
See: Equation (6.1.a).
"""
function S_update_a(Sₖ, Yₖ, pₖ)
return orth(randn(eltype(Sₖ), size(Sₖ)...))
end
"""
S_update_b
Random set of sample directions orthogonal to the previous sample space given by input Sₖ.
See: Equation (6.1.b).
"""
function S_update_b(Sₖ, Yₖ, pₖ)
M = randn(eltype(Sₖ), size(Sₖ)...)
return orth(M - Sₖ * (Sₖ' * M))
end
"""
S_update_c
Attempts to guide the algorithm in accurately resolving the eigenspace associated with the
larger Hessian eigenvalues.
See: Equation (6.1.c).
"""
function S_update_c(Sₖ, Yₖ, pₖ)
return orth(Yₖ - Sₖ * (Sₖ' * Yₖ))
end
"""
S_update_d
A variant of (6.1.a) that includes approximate curvature information along the previously
chosen step.
See: Equation (6.1.d).
"""
function S_update_d(Sₖ, Yₖ, pₖ)
return orth([orth(randn(eltype(Sₖ), size(Sₖ, 1), size(Sₖ, 2) - 1)) pₖ])
end
"""
S_update_e
A variant of (6.1.b) that includes approximate curvature information along the previously
chosen step.
See: Equation (6.1.e).
"""
function S_update_e(Sₖ, Yₖ, pₖ)
M = randn(size(Sₖ, 1), size(Sₖ, 2) - 1)
return orth([orth(M - Sₖ * (Sₖ' * M)) pₖ])
end
"""
S_update_f
A variant of (6.1c) that includes approximate curvature information along the previously
chosen step.
See: Equation (6.1.f).
"""
function S_update_f(Sₖ, Yₖ, pₖ)
return orth([orth(Yₖ[:, begin:end-1] - Sₖ * (Sₖ' * Yₖ[:, begin:end-1])) pₖ])
end
"""
SR1
Returns the algebraically minimal SR1 inverse Quasi-Newton block update,
where ``δ`` is the Moore-Penrose pseudoinverse relative tolerance used in `pinv`.
See: Algorithm ``4.2.``
"""
function SR1(
H::AbstractArray{<:Real},
U::AbstractArray{<:Real},
V::AbstractArray{<:Real},
δ::Float64,
)
U_minus_HV = U - H * V
if size(U, 2) == 1
return Symmetric(H + ((U_minus_HV) * (U_minus_HV)') / ((U_minus_HV)' * V))
end
return Symmetric(H + U_minus_HV * pinv(U_minus_HV' * V, rtol = δ) * U_minus_HV')
end
"""
PSB
Powell-Symmetric-Broyden generalized Quasi-Newton block update,
where ``δ`` is the Moore-Penrose pseudoinverse relative tolerance used in `pinv`.
See: Algorithm ``4.3.``
"""
function PSB(
H::AbstractArray{<:Real},
U::AbstractArray{<:Real},
V::AbstractArray{<:Real},
δ::Float64,
)
if size(V, 2) == 1
T₁ = 1 / (V' * V)
else
T₁ = pinv(V' * V, rtol = δ)
end
T₂ = V * T₁ * (U - H * V)'
return Symmetric(H + T₂ + T₂' - T₂ * V * T₁ * V')
end
"""
Driver
An immutable structure specifying a simulation's preliminary secant update flag (`pflag`),
the ``Sₖ₊₁`` update formula, and the Quasi-Newton update formula used. If keyword arguments
aren't provided to a drivers' construction, then the default `S_update`, `pflag`, and `QN_updates`
are assigned. A Driver contains a mutable `DriverOptions` instance, which allows for forwarding
of the options interface to the Driver interface.
See `S_update`, `QN_update`, and `pflag` for more on keyword arguments.
"""
mutable struct Driver
S_update::Function
QN_update::Function
pflag::Bool
options::DriverOptions
function Driver(;
S_update = S_update_c,
QN_update = SR1,
pflag = false,
options = DriverOptions(),
)
# TODO: Confrim S_update and QN_update inputs are valid
# Maybe parse symbol and compare to set of strings?
new(S_update, QN_update, pflag, options)
end
end
"""
S_update(d::Driver)
The supplemental sample direction update formula of Driver `d`.
Values: `S_update_a`, `S_update_b`, `S_update_c`, `S_update_d`, `S_update_e`, `S_update_f`
"""
S_update(d::Driver) = getfield(d, :S_update)
"""
QN_update(d::Driver)
The QN update formula of Driver `d`.
Values: `SR1`, `PSB`
"""
QN_update(d::Driver) = getfield(d, :QN_update)
"""
pflag(d::Driver)
The preliminary secant QN update flag of driver `d` wraping a boolean.
Options `true`, `false`
"""
pflag(d::Driver) = getfield(d, :pflag)
options(d::Driver) = getfield(d, :options)
# Inhereted/Forwarded DriverOptions Methods
samples(d::Driver) = samples(options(d))
samples!(d::Driver, samples) = samples!(options(d), samples)
Δ_max(d::Driver) = Δ_max(options(d))
Δ_max!(d::Driver, Δ_max) = Δ_max!(options(d), Δ_max)
δ_tol(d::Driver) = δ_tol(options(d))
δ_tol!(d::Driver, δ_tol) = δ_tol!(options(d), δ_tol)
ϵ_tol(d::Driver) = ϵ_tol(options(d))
ϵ_tol!(d::Driver, ϵ_tol) = ϵ_tol!(options(d), ϵ_tol)
max_iterations(d::Driver) = max_iterations(options(d))
max_iterations!(d::Driver, m) = max_iterations!(options(d), m)
weave_level(d::Driver) = weave_level(options(d))
weave_level!(d::Driver, level) = weave_level!(options(d), level)
log_level(d::Driver) = log_level(options(d))
log_level!(d::Driver, level) = log_level!(options(d), level)
Base.getproperty(d::Driver, s::Symbol) = @restrict Driver
Base.propertynames(d::Driver) = ()
|
{"hexsha": "9982837cfa03ed88ca6c637a977023f86a914a78", "size": 5035, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/driver.jl", "max_stars_repo_name": "danphenderson/BlockOptim.jl", "max_stars_repo_head_hexsha": "c53672e67e8aba4daea2f1b8c7d2effd042d63c3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/driver.jl", "max_issues_repo_name": "danphenderson/BlockOptim.jl", "max_issues_repo_head_hexsha": "c53672e67e8aba4daea2f1b8c7d2effd042d63c3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-16T21:04:58.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-16T21:04:58.000Z", "max_forks_repo_path": "src/driver.jl", "max_forks_repo_name": "danphenderson/BlockOptim.jl", "max_forks_repo_head_hexsha": "c53672e67e8aba4daea2f1b8c7d2effd042d63c3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.8921161826, "max_line_length": 98, "alphanum_fraction": 0.6589870904, "num_tokens": 1564}
|
r"""
Contains methods to calculate certain quantities used in thermal states, open systems, and quantum thermodynamics.
TODO update docstring examples and write some tests after writing some tutorials
.. currentmodule:: quanguru.QuantumToolbox.thermodynamics
Functions
---------
.. autosummary::
nBarThermal
qubitPolarisation
HeatCurrent
"""
from numpy import exp, real # type: ignore
from .states import mat2Vec, vec2Mat
from .customTypes import Matrix
def nBarThermal(angFreq: float, temp: float, hbar: float = 1.0, kb: float = 1.0) -> float:
r"""
Calculates average excitation number :math:`\bar{n}(T) := 1/(e^{\hbar \omega / k_{b}T} - 1)` of a bosonic field with
frequeny :math:`\omega` at a temperature T.
Boltzmann and reduced Planck constants are by default :math:`\hbar = k_{B} = 1`.
TODO Physical constants' default values should be connected to simUnits.
Parameters
----------
angFreq : float
(angular) frequency of the bosonic field
temp : float
temperature
hbar : float
reduced Planck's constant
kb : float
Boltzmann constant
Returns
-------
float
Average excitation number
Raises
------
ValueError
If average number is infinite.
Examples
--------
# TODO
"""
if exp((hbar*angFreq) / (temp*kb)) == 1:
raise ValueError('?')
return 1.0 / (exp((hbar*angFreq) / (temp*kb)) - 1)
def qubitPolarisation(freq: float, temp: float) -> float:
r"""
Returns the polarisation :math:`\langle\hat{\sigma}_{z}\rangle := P_{1} - P_{0}` of a qubit with frequency
:math:`\omega`
in a thermal state of temperature T. :math:`P_{1}` and :math:`P_{0}` are excited and ground state populations
satisfying :math:`P_{1} + P_{0} = 1`, and thermal state populations also satisfy
:math:`\frac{P_{1}}{P_{0}} := e^{\omega/T}`.
Parameters
----------
freq : float
frequency of the qubit
temp : float
temperature of the qubit
Returns
-------
float
qubit polarisation, i.e. difference betwennn ground and excited state populations.
Examples
--------
# TODO
"""
populationRatio = exp(-freq/temp)
groundPop = 1/(1+populationRatio)
return 1 - (2*groundPop)
def HeatCurrent(Lindbladian: Matrix, Hamiltonian: Matrix, denMat: Matrix) -> float:
r"""
Calculates the heat current :math:`\mathcal{J}:=Tr(\dot{\rho}\hat{H})` due to given Lindbladian
:math:`\hat{\mathcal{L}}`.
Here, :math:`\hat{H}` is the system Hamiltonian, and the time derivative of density matrix is
:math:`\dot{\rho}mathcal{L}`. It does not strictly speaking have to be
a Lindbladian but any combination of terms from a Liouvillian. Disclaimer: physical meaning of those terms is not
and cannot be interpreted by this function.
TODO Write a bit of the theory here to better explain this function.
Parameters
----------
Lindbladian : Matrix
a Lindbladian or any combination of terms from a Liouvillian
Hamiltonian : Matrix
Hamiltonian of the system
denMat : Matrix
Density matrix (state) of the system
Returns
-------
float
Heat current
Examples
--------
# TODO
"""
full = mat2Vec(Lindbladian * vec2Mat(denMat))
heatCurrent = real((full * Hamiltonian).tr())
return heatCurrent
|
{"hexsha": "f3973b698e44ef5d2b7eb329fbc74defb1ee87d0", "size": 3458, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/quanguru/QuantumToolbox/thermodynamics.py", "max_stars_repo_name": "Qfabiolous/QuanGuru", "max_stars_repo_head_hexsha": "285ca44ae857cc61337f73ea2eb600f485a09e32", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/quanguru/QuantumToolbox/thermodynamics.py", "max_issues_repo_name": "Qfabiolous/QuanGuru", "max_issues_repo_head_hexsha": "285ca44ae857cc61337f73ea2eb600f485a09e32", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/quanguru/QuantumToolbox/thermodynamics.py", "max_forks_repo_name": "Qfabiolous/QuanGuru", "max_forks_repo_head_hexsha": "285ca44ae857cc61337f73ea2eb600f485a09e32", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.8166666667, "max_line_length": 120, "alphanum_fraction": 0.6333140544, "include": true, "reason": "from numpy", "num_tokens": 929}
|
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import requests
import streamlit as st
from azure.storage.blob import BlobServiceClient
from dotenv import load_dotenv
from pandas_profiling import ProfileReport
from plotly.subplots import make_subplots
from streamlit_pandas_profiling import st_profile_report
from blobstorageclient import BlobStorageClient
load_dotenv()
DATASTORAGE = os.getenv("DATASTORAGE")
DATACONTAINER = os.getenv("DATACONTAINER")
try:
blob_service_client = BlobServiceClient.from_connection_string(DATASTORAGE)
container_client = blob_service_client.get_container_client(DATACONTAINER)
blob_client: BlobStorageClient = BlobStorageClient(container_client)
except Exception as e:
st.error(f"EXCEPTION while connecting to Azure Blob Storage {e}")
st.set_page_config(
page_title="Demo app for Azure",
page_icon="https://streamlit.io/favicon.svg",
)
st.title("Self Service Analytics")
st.markdown(
"This is an internal tool for self service data analytics - Contact XYZ for more info"
)
st.sidebar.title("Workflow selector")
st.sidebar.markdown("Select the workflow accordingly:")
@st.cache(allow_output_mutation=True)
def gen_profile_report(df, *report_args, **report_kwargs):
return df.profile_report(*report_args, **report_kwargs)
def check_az_func(url):
JOB_STATUS = "Running"
while JOB_STATUS == "Running" or JOB_STATUS == "Pending":
time.sleep(5)
r = requests.get(url)
# st.write(f"Job Status: {JOB_STATUS}")
st.json(r.json())
JOB_STATUS = r.json()["runtimeStatus"]
if JOB_STATUS == "failed":
st.error(f"Job failed. Please check logs")
break
elif JOB_STATUS == "succeeded":
st.info(f"Job succeeded")
st.json(r.json())
def run_azfunc(rawDataPath="raw"):
header = {"Content-Type": "application/json"}
r = requests.post(
"https://process-files.azurewebsites.net/api/orchestrators/OrchestratorFunc",
json={"rawDataPath": rawDataPath, "numFiles": "1", "numRows": "10"},
headers=header,
)
# st.write(r.json()) # show the response from Azure functions
check_az_func(url=r.json()["statusQueryGetUri"])
def gen_pandas_stats(df):
st.write("Number of columns in the dataset: ", df.shape[1])
st.write("Number of rows in the dataset: ", df.shape[0])
st.write(df.head(5))
pr = gen_profile_report(df, explorative=True)
with st.expander("REPORT", expanded=True):
st_profile_report(pr)
def main():
st.sidebar.markdown("**1.** Select the **Data**:")
data = st.sidebar.selectbox("Data", ["Uploaded Data", "Sample Data"])
st.sidebar.markdown("**2.** Select the **Validate/Clean**:")
validateclean = st.sidebar.selectbox(
"Validate/Clean",
["Clean using Azure Functions", "Clean using Spark"],
)
st.sidebar.markdown("**3.** Select the **Score using ML**:")
score = st.sidebar.selectbox("Train ML", ["Model1", "Model2", "Model3"])
st.sidebar.markdown("**4.** Select the **Save Output**:")
save = st.sidebar.selectbox(
"Save", ["Save results to Azure Blob Storage", "Save to CSV locally"]
)
dataframe = pd.DataFrame()
filename = ""
st.subheader("Step - 1: Upload the data")
if data == "Uploaded Data":
uploaded_file = st.file_uploader("Choose a file")
if uploaded_file is not None:
filename = uploaded_file.name
# Can be used wherever a "file-like" object is accepted:
dataframe = pd.read_csv(uploaded_file)
gen_pandas_stats(dataframe)
elif data == "Sample Data":
st.subheader("Sample Data generated with 100 rows")
filename = "random_data.csv"
dataframe = blob_client.gen_data()
gen_pandas_stats(dataframe)
st.subheader("Step - 2: Clean the Data")
if st.button(label=f"Run - {validateclean}", key="run-azfunc"):
if validateclean == "Clean using Azure Functions":
run_azfunc()
elif validateclean == "Clean using Azure Databricks":
st.write("Clean using Azure Databricks")
st.subheader("Step - 3: Choose the model to apply")
if st.button(label=f"Train ML - {score}", key="apply-ml"):
st.write("Training the model")
# TODO: Add code to call Azure ML to score the data
st.subheader("Step - 4: Choose the output")
if st.button(label=f"Download - {save}", key="download-file"):
if save == "Save results to Azure Blob Storage":
try:
blob_client.upload_pd_dataframe(
dataframe, path=filename, metadata={"type": "csv"}
)
except Exception as e:
st.error(f"EXCEPTION while uploading file {e}")
elif save == "Save to CSV locally":
print(type(dataframe))
dataframe.to_csv(f"/tmp/{filename}", index=False)
try:
st.download_button(
label="File ready. Click to download",
data="trees",
file_name=f"/tmp/{filename}",
mime="text/plain",
)
except Exception as e:
st.error(f"EXCEPTION while saving file {e}")
if __name__ == "__main__":
main()
|
{"hexsha": "be1c60aaaa16a91a7810dedbe47619ae36afb6f8", "size": 5422, "ext": "py", "lang": "Python", "max_stars_repo_path": "ui/app.py", "max_stars_repo_name": "lordlinus/parallel-file-processing-serverless", "max_stars_repo_head_hexsha": "751830c3edfafd935e14fdc2ffbbe28dcb704170", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ui/app.py", "max_issues_repo_name": "lordlinus/parallel-file-processing-serverless", "max_issues_repo_head_hexsha": "751830c3edfafd935e14fdc2ffbbe28dcb704170", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ui/app.py", "max_forks_repo_name": "lordlinus/parallel-file-processing-serverless", "max_forks_repo_head_hexsha": "751830c3edfafd935e14fdc2ffbbe28dcb704170", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7564102564, "max_line_length": 90, "alphanum_fraction": 0.64459609, "include": true, "reason": "import numpy", "num_tokens": 1239}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.