content stringlengths 6 1.03M | input_ids listlengths 4 535k | ratio_char_token float64 0.68 8.61 | token_count int64 4 535k |
|---|---|---|---|
using ApproxFun, LinearAlgebra, Plots
# In this example, we solve the heat equation with left Neumann and
# right Dirichlet boundary conditions.
# To allow for a solution through matrix exponentiation, we approximate
# the initial condition with a basis that automatically constructs
# even and odd extensions on the left and right boundaries respectively. (Method of images)
n = 100
xlim = 10
xarray = (chebyshevpoints(n) .+ 1) .* xlim/2 # Chebyshev points on 0..xlim
r = 1.0 # right boundary value (left is assumed to be zero)
S = CosSpace(0..4*xlim)
u₀(x) = atan(x)*(π + atan(x-9) + atan(5-x))/π^2 - r # subtract by `r` to homogenise the right BC
plot(xarray, u₀)
v = u₀.(xarray) # values to use in fitting
m = div(n,2) # m << n for regularisation
# Create a Vandermonde matrix by evaluating the basis at the grid:
# We ensure the basis satisfies the boundary conditions by using
# the entries corresponding to {cos(x), cos(3x), cos(5x), ...}.
V = zeros(n, m)
for k = 1:m
V[:,k] = Fun(S,[zeros(2*k-1);1]).(xarray)
end
c₀ = zeros(2*m)
c₀[2:2:end] = V\v # do the fit
f = Fun(S, c₀) # Likely not a good approximation due to boundary discontinuities, but high frequencies will disappear quickly anyway
plot([xarray.+10; xarray; xarray.-10], f, xlims=(-5, 15))
scatter!(xarray, u₀, markersize=0.2)
L = Derivative(S, 2)
L = Matrix(L[1:2m,1:2m])
@manipulate for T=[zeros(5); 0.0:0.01:1]
u_t = Fun(S, exp(T*L)*c₀) + r # calculate solution and add back homogenisation term
plot(-0:0.05:10, u_t, ylims=(0,1), legend=:none)
end
@gif for T=[zeros(5); 0.0:0.5:50]
u_t = Fun(S, exp(T*L)*c₀) + r # calculate solution and add back homogenisation term
plot(-0:0.05:10, u_t, ylims=(0,1), legend=:none)
end
| [
3500,
2034,
13907,
24629,
11,
44800,
2348,
29230,
11,
1345,
1747,
198,
198,
2,
554,
428,
1672,
11,
356,
8494,
262,
4894,
16022,
351,
1364,
3169,
40062,
290,
198,
2,
826,
36202,
488,
1616,
18645,
3403,
13,
198,
2,
1675,
1249,
329,
25... | 2.531479 | 683 |
struct MinDomainVariableSelection{TakeObjective} <: AbstractVariableSelection{TakeObjective} end
MinDomainVariableSelection(;take_objective=true) = MinDomainVariableSelection{take_objective}()
function (::MinDomainVariableSelection{false})(cpmodel::CPModel)
selectedVar = nothing
minSize = typemax(Int)
for (k, x) in branchable_variables(cpmodel)
if x !== cpmodel.objective && !isbound(x) && length(x.domain) < minSize
selectedVar = x
minSize = length(x.domain)
end
end
if isnothing(selectedVar) && !isbound(cpmodel.objective)
return cpmodel.objective
end
return selectedVar
end
function (::MinDomainVariableSelection{true})(cpmodel::CPModel)
selectedVar = nothing
minSize = typemax(Int)
for (k, x) in branchable_variables(cpmodel)
if !isbound(x) && length(x.domain) < minSize
selectedVar = x
minSize = length(x.domain)
end
end
return selectedVar
end
| [
7249,
1855,
43961,
43015,
4653,
1564,
90,
12322,
10267,
425,
92,
1279,
25,
27741,
43015,
4653,
1564,
90,
12322,
10267,
425,
92,
886,
201,
198,
201,
198,
9452,
43961,
43015,
4653,
1564,
7,
26,
20657,
62,
15252,
425,
28,
7942,
8,
796,
... | 2.323462 | 439 |
<reponame>kogpsy/vpompd<filename>src/playground.jl
using StatsFuns: logistic
using Plots
βs = [0, 0.2, 0.5, 0.8, 1.0, 1.2, 1.5, 2, 3, 5, 7, 8, 10, 20]
plot([x -> logistic(β * x) for β in βs],
-2, 2,
alpha = 0.8,
legend = :none,
ylims = [0, 1])
| [
27,
7856,
261,
480,
29,
74,
519,
13764,
14,
36133,
3361,
67,
27,
34345,
29,
10677,
14,
1759,
2833,
13,
20362,
198,
3500,
20595,
37,
13271,
25,
2604,
2569,
198,
3500,
1345,
1747,
198,
198,
26638,
82,
796,
685,
15,
11,
657,
13,
17,
... | 1.782051 | 156 |
<gh_stars>0
function HHG(f::TVF) where {TVF<:AbstractVector{<:AbstractFloat}}
n = length(f)
spec = fft(f)
return abs.(spec) .^ 2 / n^2
end
function HHGfreq(f::TVF, ncyc) where {TVF<:AbstractVector{<:AbstractFloat}}
x = collect(range(1, length(f), length = length(f)))
return x ./ ncyc
end
| [
27,
456,
62,
30783,
29,
15,
198,
8818,
47138,
38,
7,
69,
3712,
6849,
37,
8,
810,
1391,
6849,
37,
27,
25,
23839,
38469,
90,
27,
25,
23839,
43879,
11709,
198,
220,
220,
220,
299,
796,
4129,
7,
69,
8,
198,
220,
220,
220,
1020,
79... | 2.30597 | 134 |
<filename>src/integrators/rk/integrators_iprk.jl
"Parameters for right-hand side function of implicit partitioned Runge-Kutta methods."
mutable struct ParametersIPRK{DT, TT, D, S, ET <: NamedTuple} <: Parameters{DT,TT}
equs::ET
tab::PartitionedTableau{TT}
Δt::TT
t::TT
q::Vector{DT}
p::Vector{DT}
function ParametersIPRK{DT,D}(equs::ET, tab::PartitionedTableau{TT}, Δt::TT) where {D, DT, TT, ET <: NamedTuple}
new{DT, TT, D, tab.s, ET}(equs, tab, Δt, zero(TT), zeros(DT,D), zeros(DT,D))
end
end
@doc raw"""
Implicit partitioned Runge-Kutta integrator cache.
### Fields
* `q̃`: initial guess of q
* `p̃`: initial guess of p
* `ṽ`: initial guess of v
* `f̃`: initial guess of f
* `s̃`: holds shift due to periodicity of solution
* `Q`: internal stages of q
* `P`: internal stages of p
* `V`: internal stages of v
* `F`: internal stages of f
* `Y`: vector field of internal stages of q
* `Z`: vector field of internal stages of p
"""
struct IntegratorCacheIPRK{ST,D,S} <: PODEIntegratorCache{ST,D}
q̃::Vector{ST}
p̃::Vector{ST}
ṽ::Vector{ST}
f̃::Vector{ST}
s̃::Vector{ST}
Q::Vector{Vector{ST}}
P::Vector{Vector{ST}}
V::Vector{Vector{ST}}
F::Vector{Vector{ST}}
Y::Vector{Vector{ST}}
Z::Vector{Vector{ST}}
function IntegratorCacheIPRK{ST,D,S}() where {ST,D,S}
# create temporary vectors
q̃ = zeros(ST,D)
p̃ = zeros(ST,D)
ṽ = zeros(ST,D)
f̃ = zeros(ST,D)
s̃ = zeros(ST,D)
# create internal stage vectors
Q = create_internal_stage_vector(ST, D, S)
P = create_internal_stage_vector(ST, D, S)
V = create_internal_stage_vector(ST, D, S)
F = create_internal_stage_vector(ST, D, S)
Y = create_internal_stage_vector(ST, D, S)
Z = create_internal_stage_vector(ST, D, S)
new(q̃, p̃, ṽ, f̃, s̃, Q, P, V, F, Y, Z)
end
end
function IntegratorCache(params::ParametersIPRK{DT,TT,D,S}; kwargs...) where {DT,TT,D,S}
IntegratorCacheIPRK{DT,D,S}(; kwargs...)
end
function IntegratorCache{ST}(params::ParametersIPRK{DT,TT,D,S}; kwargs...) where {ST,DT,TT,D,S}
IntegratorCacheIPRK{ST,D,S}(; kwargs...)
end
@inline CacheType(ST, params::ParametersIPRK{DT,TT,D,S}) where {DT,TT,D,S} = IntegratorCacheIPRK{ST,D,S}
@doc raw"""
Implicit partitioned Runge-Kutta integrator solving the system
```math
\begin{aligned}
V_{n,i} &= v (Q_{n,i}, P_{n,i}) , &
Q_{n,i} &= q_{n} + h \sum \limits_{j=1}^{s} a_{ij} \, V_{n,j} , &
q_{n+1} &= q_{n} + h \sum \limits_{i=1}^{s} b_{i} \, V_{n,i} , \\
F_{n,i} &= f (Q_{n,i}, P_{n,i}) , &
P_{n,i} &= p_{n} + h \sum \limits_{i=1}^{s} \bar{a}_{ij} \, F_{n,j} , &
p_{n+1} &= p_{n} + h \sum \limits_{i=1}^{s} \bar{b}_{i} \, F_{n,i} ,
\end{aligned}
```
Usually we are interested in Hamiltonian systems, where
```math
\begin{aligned}
V_{n,i} &= \dfrac{\partial H}{\partial p} (Q_{n,i}, P_{n,i}) , &
F_{n,i} &= - \dfrac{\partial H}{\partial q} (Q_{n,i}, P_{n,i}) ,
\end{aligned}
```
and tableaus satisfying the symplecticity conditions
```math
\begin{aligned}
b_{i} \bar{a}_{ij} + \bar{b}_{j} a_{ji} &= b_{i} \bar{b}_{j} , &
\bar{b}_i &= b_i .
\end{aligned}
```
"""
struct IntegratorIPRK{DT, TT, D, S, PT <: ParametersIPRK{DT,TT},
ST <: NonlinearSolver{DT},
IT <: InitialGuessPODE{TT}} <: AbstractIntegratorPRK{DT,TT}
params::PT
solver::ST
iguess::IT
caches::CacheDict{PT}
function IntegratorIPRK(params::ParametersIPRK{DT,TT,D,S}, solver::ST, iguess::IT, caches) where {DT,TT,D,S,ST,IT}
new{DT, TT, D, S, typeof(params), ST, IT}(params, solver, iguess, caches)
end
function IntegratorIPRK{DT,D}(equations::NamedTuple, tableau::PartitionedTableau{TT}, Δt::TT) where {DT,TT,D}
# get number of stages
S = tableau.s
# create params
params = ParametersIPRK{DT,D}(equations, tableau, Δt)
# create cache dict
caches = CacheDict(params)
# create solver
solver = create_nonlinear_solver(DT, 2*D*S, params, caches)
# create initial guess
iguess = InitialGuessPODE(get_config(:ig_extrapolation), equations[:v], equations[:f], Δt)
# create integrator
IntegratorIPRK(params, solver, iguess, caches)
end
function IntegratorIPRK{DT,D}(v::Function, f::Function, tableau::PartitionedTableau{TT}, Δt::TT; kwargs...) where {DT,TT,D}
IntegratorIPRK{DT,D}(NamedTuple{(:v,:f)}((v,f)), tableau, Δt; kwargs...)
end
function IntegratorIPRK{DT,D}(v::Function, f::Function, h::Function, tableau::PartitionedTableau{TT}, Δt::TT; kwargs...) where {DT,TT,D}
IntegratorIPRK{DT,D}(NamedTuple{(:v,:f,:h)}((v,f,h)), tableau, Δt; kwargs...)
end
function IntegratorIPRK(equation::Union{PODE{DT}, HODE{DT}}, tableau::PartitionedTableau{TT}, Δt::TT; kwargs...) where {DT,TT}
IntegratorIPRK{DT, ndims(equation)}(get_functions(equation), tableau, Δt; kwargs...)
end
end
@inline Base.ndims(::IntegratorIPRK{DT,TT,D,S}) where {DT,TT,D,S} = D
function initialize!(int::IntegratorIPRK, sol::AtomicSolutionPODE)
sol.t̄ = sol.t - timestep(int)
equations(int)[:v](sol.t, sol.q, sol.p, sol.v)
equations(int)[:f](sol.t, sol.q, sol.p, sol.f)
initialize!(int.iguess, sol.t, sol.q, sol.p, sol.v, sol.f,
sol.t̄, sol.q̄, sol.p̄, sol.v̄, sol.f̄)
end
function update_params!(int::IntegratorIPRK, sol::AtomicSolutionPODE)
# set time for nonlinear solver and copy previous solution
int.params.t = sol.t
int.params.q .= sol.q
int.params.p .= sol.p
end
function initial_guess!(int::IntegratorIPRK{DT}, sol::AtomicSolutionPODE{DT},
cache::IntegratorCacheIPRK{DT}=int.caches[DT]) where {DT}
for i in eachstage(int)
evaluate!(int.iguess, sol.q̄, sol.p̄, sol.v̄, sol.f̄,
sol.q, sol.p, sol.v, sol.f,
cache.Q[i], cache.P[i], cache.V[i], cache.F[i],
tableau(int).q.c[i], tableau(int).p.c[i])
end
for i in eachstage(int)
for k in eachdim(int)
int.solver.x[2*(ndims(int)*(i-1)+k-1)+1] = 0
int.solver.x[2*(ndims(int)*(i-1)+k-1)+2] = 0
for j in eachstage(int)
int.solver.x[2*(ndims(int)*(i-1)+k-1)+1] += tableau(int).q.a[i,j] * cache.V[j][k]
int.solver.x[2*(ndims(int)*(i-1)+k-1)+2] += tableau(int).p.a[i,j] * cache.F[j][k]
end
end
end
end
function compute_stages!(x::Vector{ST}, Q::Vector{Vector{ST}}, V::Vector{Vector{ST}}, Y::Vector{Vector{ST}},
P::Vector{Vector{ST}}, F::Vector{Vector{ST}}, Z::Vector{Vector{ST}},
params::ParametersIPRK{DT,TT,D,S}) where {ST,DT,TT,D,S}
local tqᵢ::TT
local tpᵢ::TT
for i in 1:S
for k in 1:D
# copy y to Y and Z
Y[i][k] = x[2*(D*(i-1)+k-1)+1]
Z[i][k] = x[2*(D*(i-1)+k-1)+2]
# compute Q and P
Q[i][k] = params.q[k] + params.Δt * Y[i][k]
P[i][k] = params.p[k] + params.Δt * Z[i][k]
end
# compute time of internal stage
tqᵢ = params.t + params.Δt * params.tab.q.c[i]
tpᵢ = params.t + params.Δt * params.tab.p.c[i]
# compute v(Q,P) and f(Q,P)
params.equs[:v](tqᵢ, Q[i], P[i], V[i])
params.equs[:f](tpᵢ, Q[i], P[i], F[i])
end
end
# Compute stages of implicit partitioned Runge-Kutta methods.
function function_stages!(x::Vector{ST}, b::Vector{ST}, params::ParametersIPRK{DT,TT,D,S},
caches::CacheDict) where {ST,DT,TT,D,S}
# get cache for internal stages
cache = caches[ST]
# compute stages from nonlinear solver solution x
compute_stages!(x, cache.Q, cache.V, cache.Y, cache.P, cache.F, cache.Z, params)
# compute b = - [(Y-AV), (Z-AF)]
for i in 1:S
for k in 1:D
b[2*(D*(i-1)+k-1)+1] = - cache.Y[i][k]
b[2*(D*(i-1)+k-1)+2] = - cache.Z[i][k]
for j in 1:S
b[2*(D*(i-1)+k-1)+1] += params.tab.q.a[i,j] * cache.V[j][k]
b[2*(D*(i-1)+k-1)+2] += params.tab.p.a[i,j] * cache.F[j][k]
end
end
end
end
function integrate_step!(int::IntegratorIPRK{DT,TT}, sol::AtomicSolutionPODE{DT,TT},
cache::IntegratorCacheIPRK{DT}=int.caches[DT]) where {DT,TT}
# update nonlinear solver parameters from atomic solution
update_params!(int, sol)
# compute initial guess
initial_guess!(int, sol, cache)
# reset atomic solution
reset!(sol, timestep(int))
# call nonlinear solver
solve!(int.solver)
# print solver status
print_solver_status(int.solver.status, int.solver.params)
# check if solution contains NaNs or error bounds are violated
check_solver_status(int.solver.status, int.solver.params)
# compute vector fields at internal stages
compute_stages!(int.solver.x, cache.Q, cache.V, cache.Y,
cache.P, cache.F, cache.Z, int.params)
# compute final update
update_solution!(sol.q, sol.q̃, cache.V, tableau(int).q.b, tableau(int).q.b̂, timestep(int))
update_solution!(sol.p, sol.p̃, cache.F, tableau(int).p.b, tableau(int).p.b̂, timestep(int))
# copy solution to initial guess
update_vector_fields!(int.iguess, sol.t, sol.q, sol.p, sol.v, sol.f)
end
| [
27,
34345,
29,
10677,
14,
18908,
18942,
14,
81,
74,
14,
18908,
18942,
62,
541,
81,
74,
13,
20362,
198,
198,
1,
48944,
329,
826,
12,
4993,
1735,
2163,
286,
16992,
18398,
276,
5660,
469,
12,
42,
315,
8326,
5050,
526,
198,
76,
18187,... | 1.979626 | 4,810 |
abstract type AbstractTableauSplitting{T <: Real} <: AbstractTableau{T} end
function get_splitting_coefficients(r, a::Vector{T}, b::Vector{T}) where {T}
@assert length(a) == length(b)
s = length(a)
f = zeros(Int, 2r*s)
c = zeros(T, 2r*s)
for i in 1:s
for j in 1:r
f[(2i-2)*r+j] = j
c[(2i-2)*r+j] = a[i]
end
for j in 1:r
f[(2i-1)*r+j] = r-j+1
c[(2i-1)*r+j] = b[i]
end
end
return f, c
end
@doc raw"""
Tableau for general splitting methods for vector fields with two components A and B.
Integrator:
```math
\varphi_{\tau} = \varphi_{b_s \tau}^{B} \circ \varphi_{a_s \tau}^{A} \circ \dotsc \circ \varphi_{b_1 \tau}^{B} \circ \varphi_{a_1 \tau}^{A}
```
"""
struct TableauSplitting{T} <: AbstractTableauSplitting{T}
@HeaderTableau
a::Vector{T}
b::Vector{T}
function TableauSplitting{T}(name, o, s, a, b) where {T}
@assert s == length(a) == length(b)
new(name, o, s, a, b)
end
end
function TableauSplitting(name, o, a::Vector{T}, b::Vector{T}) where {T}
TableauSplitting{T}(name, o, length(a), a, b)
end
function get_splitting_coefficients(nequs, tableau::TableauSplitting{T}) where {T}
@assert nequs == 2
@assert length(tableau.a) == length(tableau.b)
s = length(tableau.a)
f = zeros(Int, 2s)
c = zeros(T, 2s)
for i in eachindex(tableau.a, tableau.b)
f[2*(i-1)+1] = 1
c[2*(i-1)+1] = tableau.a[i]
f[2*(i-1)+2] = 2
c[2*(i-1)+2] = tableau.b[i]
end
# select all entries with non-vanishing coefficients
y = c .!= 0
return f[y], c[y]
end
@doc raw"""
Tableau for non-symmetric splitting methods.
See <NAME>, 2003, Equ. (4.10).
The methods A and B are the composition of all vector fields in the SODE
and its adjoint, respectively.
Basic method: Lie composition
```math
\begin{aligned}
\varphi_{\tau}^{A} &= \varphi_{\tau}^{v_1} \circ \varphi_{\tau}^{v_2} \circ \dotsc \circ \varphi_{\tau}^{v_{r-1}} \circ \varphi_{\tau}^{v_r} \\
\varphi_{\tau}^{B} &= \varphi_{\tau}^{v_r} \circ \varphi_{\tau}^{v_{r-1}} \circ \dotsc \circ \varphi_{\tau}^{v_2} \circ \varphi_{\tau}^{v_1}
\end{aligned}
```
Integrator:
```math
\varphi_{\tau}^{NS} = \varphi_{b_s \tau}^{B} \circ \varphi_{a_s \tau}^{A} \circ \dotsc \circ \varphi_{b_1 \tau}^{B} \circ \varphi_{a_1 \tau}^{A}
```
"""
struct TableauSplittingNS{T} <: AbstractTableauSplitting{T}
@HeaderTableau
a::Vector{T}
b::Vector{T}
function TableauSplittingNS{T}(name, o, s, a, b) where {T}
@assert s == length(a) == length(b)
new(name, o, s, a, b)
end
end
function TableauSplittingNS(name, o, a::Vector{T}, b::Vector{T}) where {T}
TableauSplittingNS{T}(name, o, length(a), a, b)
end
function get_splitting_coefficients(nequs, tableau::TableauSplittingNS)
# R = length(equation.v)
# S = tableau.s
#
# f = zeros(Int, 2R*S)
# c = zeros(TT, 2R*S)
#
# for i in 1:S
# for j in 1:R
# f[(2i-2)*R+j] = j
# c[(2i-2)*R+j] = tableau.a[i]
# end
# for j in 1:R
# f[(2i-1)*R+j] = R-j+1
# c[(2i-1)*R+j] = tableau.b[i]
# end
# end
get_splitting_coefficients(nequs, tableau.a, tableau.b)
end
@doc raw"""
Tableau for symmetric splitting methods with general stages.
See McLachlan, Quispel, 2003, Equ. (4.11).
Basic method: Lie composition
```math
\begin{aligned}
\varphi_{\tau}^{A} &= \varphi_{\tau}^{v_1} \circ \varphi_{\tau}^{v_2} \circ \dotsc \circ \varphi_{\tau}^{v_{r-1}} \circ \varphi_{\tau}^{v_r} \\
\varphi_{\tau}^{B} &= \varphi_{\tau}^{v_r} \circ \varphi_{\tau}^{v_{r-1}} \circ \dotsc \circ \varphi_{\tau}^{v_2} \circ \varphi_{\tau}^{v_1}
\end{aligned}
```
Integrator:
```math
\varphi_{\tau}^{GS} = \varphi_{a_1 \tau}^{A} \circ \varphi_{b_1 \tau}^{B} \circ \dotsc \circ \varphi_{b_1 \tau}^{B} \circ \varphi_{a_1 \tau}^{A}
```
"""
struct TableauSplittingGS{T} <: AbstractTableauSplitting{T}
@HeaderTableau
a::Vector{T}
b::Vector{T}
function TableauSplittingGS{T}(name, o, s, a, b) where {T}
@assert s == length(a) == length(b)
new(name, o, s, a, b)
end
end
function TableauSplittingGS(name, o, a::Vector{T}, b::Vector{T}) where {T}
TableauSplittingGS{T}(name, o, length(a), a, b)
end
function get_splitting_coefficients(nequs, tableau::TableauSplittingGS)
# R = nequs
# S = tableau.s
#
# f = zeros(Int, 2R*S)
# c = zeros(TT, 2R*S)
#
# for i in 1:S
# for j in 1:R
# f[(2i-2)*R+j] = j
# c[(2i-2)*R+j] = tableau.a[i]
# end
# for j in R:-1:1
# f[(2i-1)*R+j] = R-j+1
# c[(2i-1)*R+j] = tableau.b[i]
# end
# end
f, c = get_splitting_coefficients(nequs, tableau.a, tableau.b)
vcat(f, f[end:-1:1]), vcat(c, c[end:-1:1])
end
@doc raw"""
Tableau for symmetric splitting methods with symmetric stages.
See <NAME>, 2003, Equ. (4.6).
Basic method: symmetric Strang composition
```math
\varphi_{\tau}^{A} = \varphi_{\tau/2}^{v_1} \circ \varphi_{\tau/2}^{v_2} \circ \dotsc \circ \varphi_{\tau/2}^{v_{r-1}} \circ \varphi_{\tau/2}^{v_r} \circ \varphi_{\tau/2}^{v_r} \circ \varphi_{\tau/2}^{v_{r-1}} \circ \dotsc \circ \varphi_{\tau/2}^{v_2} \circ \varphi_{\tau/2}^{v_1}
```
Integrator:
```math
\varphi_{\tau}^{SS} = \varphi_{a_1 \tau}^{A} \circ \varphi_{a_2 \tau}^{A} \circ \dotsc \circ \varphi_{a_s \tau}^{A} \circ \dotsc \circ \varphi_{a_2 \tau}^{A} \circ \varphi_{a_1 \tau}^{A}
```
"""
struct TableauSplittingSS{T} <: AbstractTableauSplitting{T}
@HeaderTableau
a::Vector{T}
function TableauSplittingSS{T}(name, o, s, a) where {T}
@assert s == length(a)
new(name, o, s, a)
end
end
function TableauSplittingSS(name, o, a::Vector{T}) where {T}
TableauSplittingSS{T}(name, o, length(a), a)
end
function get_splitting_coefficients(nequs, tableau::TableauSplittingSS{T}) where {T}
r = nequs
a = vcat(tableau.a, tableau.a[end-1:-1:1]) ./ 2
s = length(a)
f = zeros(Int, 2r*s)
c = zeros(T, 2r*s)
for i in 1:s
for j in 1:r
f[(2i-2)*r+j] = j
c[(2i-2)*r+j] = a[i]
f[(2i-1)*r+j] = r-j+1
c[(2i-1)*r+j] = a[i]
end
end
f, c
end
| [
198,
397,
8709,
2099,
27741,
10962,
559,
26568,
2535,
90,
51,
1279,
25,
6416,
92,
1279,
25,
27741,
10962,
559,
90,
51,
92,
886,
198,
198,
8818,
651,
62,
22018,
2535,
62,
1073,
41945,
7,
81,
11,
257,
3712,
38469,
90,
51,
5512,
275,... | 1.933577 | 3,282 |
t = Template(; user=me)
pkg_dir = joinpath(t.dir, test_pkg)
@testset "AppVeyor" begin
@testset "Plugin creation" begin
p = AppVeyor()
@test isempty(p.gitignore)
@test p.src == joinpath(PkgTemplates.DEFAULTS_DIR, "appveyor.yml")
@test p.dest == ".appveyor.yml"
@test p.badges == [
Badge(
"Build Status",
"https://ci.appveyor.com/api/projects/status/github/{{USER}}/{{PKGNAME}}.jl?svg=true",
"https://ci.appveyor.com/project/{{USER}}/{{PKGNAME}}-jl",
),
]
@test isempty(p.view)
p = AppVeyor(; config_file=nothing)
@test p.src === nothing
p = AppVeyor(; config_file=test_file)
@test p.src == test_file
@test_throws ArgumentError AppVeyor(; config_file=fake_path)
end
@testset "Badge generation" begin
p = AppVeyor()
@test badges(p, me, test_pkg) == ["[](https://ci.appveyor.com/project/$me/$test_pkg-jl)"]
end
@testset "File generation" begin
# Without a coverage plugin in the template, there should be no post-test step.
p = AppVeyor()
@test gen_plugin(p, t, test_pkg) == [".appveyor.yml"]
@test isfile(joinpath(pkg_dir, ".appveyor.yml"))
appveyor = read(joinpath(pkg_dir, ".appveyor.yml"), String)
@test !occursin("on_success", appveyor)
@test !occursin("%JL_CODECOV_SCRIPT%", appveyor)
rm(joinpath(pkg_dir, ".appveyor.yml"))
# Generating the plugin with Codecov in the template should create a post-test step.
t.plugins[Codecov] = Codecov()
gen_plugin(p, t, test_pkg)
delete!(t.plugins, Codecov)
appveyor = read(joinpath(pkg_dir, ".appveyor.yml"), String)
@test occursin("on_success", appveyor)
@test occursin("%JL_CODECOV_SCRIPT%", appveyor)
rm(joinpath(pkg_dir, ".appveyor.yml"))
# TODO: Add Coveralls tests when AppVeyor.jl supports it.
p = AppVeyor(; config_file=nothing)
@test isempty(gen_plugin(p, t, test_pkg))
@test !isfile(joinpath(pkg_dir, ".appveyor.yml"))
end
end
rm(pkg_dir; recursive=true)
| [
83,
796,
37350,
7,
26,
2836,
28,
1326,
8,
198,
35339,
62,
15908,
796,
4654,
6978,
7,
83,
13,
15908,
11,
1332,
62,
35339,
8,
198,
198,
31,
9288,
2617,
366,
4677,
53,
2959,
273,
1,
2221,
198,
220,
220,
220,
2488,
9288,
2617,
366,
... | 2.093002 | 1,086 |
<filename>docs/make.jl
using Documenter
using MPIMeasurements
makedocs(
sitename = "MPIMeasurements",
authors = "<NAME> et al.",
format = Documenter.HTML(prettyurls = false),
modules = [MPIMeasurements],
pages = [
"Home" => "index.md",
"Manual" => Any[
"Guide" => "man/guide.md",
"Devices" => "man/devices.md",
"Protocols" => "man/protocols.md",
"Sequences" => "man/sequences.md",
"Examples" => "man/examples.md",
],
"Library" => Any[
"Public" => "lib/public.md",
"Internals" => map(
s -> "lib/internals/$(s)",
sort(readdir(normpath(@__DIR__, "src/lib/internals")))
),
],
],
)
# deploydocs(repo = "github.com/MagneticParticleImaging/MPIMeasurements.jl.git",
# target = "build")
| [
27,
34345,
29,
31628,
14,
15883,
13,
20362,
198,
3500,
16854,
263,
198,
3500,
4904,
3955,
68,
5015,
902,
198,
198,
76,
4335,
420,
82,
7,
198,
220,
220,
220,
1650,
12453,
796,
366,
7378,
3955,
68,
5015,
902,
1600,
198,
220,
220,
22... | 1.947137 | 454 |
<reponame>ericphanson/GuessworkQuantumSideInfo.jl<filename>test/high_precision_tests/tests.jl
using GuessworkQuantumSideInfo
using SDPAFamily
using Test
using LinearAlgebra, GenericLinearAlgebra
# Fix a compatibility problem
LinearAlgebra.eigmin(A::Hermitian{Complex{BigFloat},Array{Complex{BigFloat},2}}) =
minimum(real.(eigvals(A)))
function testPOVM(Es; tol = 1e-25)
dB = size(first(Es), 1)
@test sum(Es) ≈ complex(1.0) * I(dB) atol = tol
for E in Es
@test E ≈ E' atol = tol
end
@test all(isposdef, Hermitian(E) + tol * I(dB) for E in Es)
end
T = BigFloat
default_sdp_solver(T) = SDPAFamily.Optimizer{T}(presolve = true)
@testset "BB84" begin
ρBs = GuessworkQuantumSideInfo.BB84_states(T)
p = ones(T, 4) / 4
output = guesswork(p, ρBs; solver = default_sdp_solver(T))
testPOVM(output.Es)
relaxed_output =
guesswork_upper_bound(p, ρBs; num_constraints = 4, make_solver = () -> default_sdp_solver(T))
true_val = (big(1) / big(4)) * (10 - sqrt(big(10)))
@test output.optval ≈ true_val rtol = 1e-25
@test output.optval ≈ relaxed_output.optval rtol = 1e-4
lb = guesswork_lower_bound(p, ρBs; solver = default_sdp_solver(T)).optval
@test lb <= output.optval + 1e-4
end
| [
27,
7856,
261,
480,
29,
35626,
746,
23103,
14,
8205,
408,
1818,
24915,
388,
24819,
12360,
13,
20362,
27,
34345,
29,
9288,
14,
8929,
62,
3866,
16005,
62,
41989,
14,
41989,
13,
20362,
198,
3500,
37571,
1818,
24915,
388,
24819,
12360,
19... | 2.288321 | 548 |
<reponame>dominikkiese/PFFRGSolver.jl
"""
get_observable_timers() :: Nothing
Test performance of current observable implementation by Fourier transforming test correlations
on a few 2D and 3D lattices with 50 x 50 x 0 (2D) or 50 x 50 x 50 (3D) momenta.
"""
function get_observable_timers() :: Nothing
# init test dummys
l1 = get_lattice("square", 6, verbose = false); r1 = get_reduced_lattice("heisenberg", [[0.0]], l1, verbose = false)
l2 = get_lattice("cubic", 6, verbose = false); r2 = get_reduced_lattice("heisenberg", [[0.0]], l2, verbose = false)
l3 = get_lattice("kagome", 6, verbose = false); r3 = get_reduced_lattice("heisenberg", [[0.0]], l3, verbose = false)
l4 = get_lattice("hyperkagome", 6, verbose = false); r4 = get_reduced_lattice("heisenberg", [[0.0]], l4, verbose = false)
χ1 = Float64[exp(-norm(r1.sites[i].vec)) for i in eachindex(r1.sites)]
χ2 = Float64[exp(-norm(r2.sites[i].vec)) for i in eachindex(r2.sites)]
χ3 = Float64[exp(-norm(r3.sites[i].vec)) for i in eachindex(r3.sites)]
χ4 = Float64[exp(-norm(r4.sites[i].vec)) for i in eachindex(r4.sites)]
k_2D = get_momenta((0.0, 1.0 * pi), (0.0, 1.0 * pi), (0.0, 0.0), (50, 50, 0))
k_3D = get_momenta((0.0, 1.0 * pi), (0.0, 1.0 * pi), (0.0, 1.0 * pi), (50, 50, 50))
# init timer
to = TimerOutput()
@timeit to "=> Fourier transform" begin
for rep in 1 : 10
@timeit to "-> square" compute_structure_factor(χ1, k_2D, l1, r1)
@timeit to "-> cubic" compute_structure_factor(χ2, k_3D, l2, r2)
@timeit to "-> kagome" compute_structure_factor(χ3, k_2D, l3, r3)
@timeit to "-> hyperkagome" compute_structure_factor(χ4, k_3D, l4, r4)
end
end
show(to)
return nothing
end | [
27,
7856,
261,
480,
29,
3438,
259,
36073,
444,
68,
14,
47,
5777,
49,
14313,
14375,
13,
20362,
198,
37811,
198,
220,
220,
220,
651,
62,
672,
3168,
540,
62,
16514,
364,
3419,
7904,
10528,
220,
198,
198,
14402,
2854,
286,
1459,
42550,
... | 2.102535 | 868 |
# FUNCTION recover_primal
"""Computes the Ridge regressor
INPUT
ℓ - LossFunction to use
Y - Vector of observed responses
Z - Matrix of observed features
γ - Regularization parameter
OUTPUT
w - Optimal regressor"""
function recover_primal(ℓ::Regression, Y, Z, γ)
CM = Matrix(I, size(Z,2), size(Z,2))/γ + Z'*Z # The capacitance matrix
α = -Y + Z*(CM\(Z'*Y)) # Matrix Inversion Lemma
return -γ*Z'*α # Regressor
end
using LIBLINEAR
function recover_primal(ℓ::Classification, Y, Z, γ)
solverNumber = LibLinearSolver(ℓ)
if isa(ℓ, SubsetSelection.Classification)
model = LIBLINEAR.linear_train(Y, Z'; verbose=false, C=γ, solver_type=Cint(solverNumber))
return Y[1]*model.w
# else
# model = linear_train(Y, Z'; verbose=false, C=γ, solver_type=Cint(solverNumber), eps = ℓ.ε)
# return model.w
end
end
function LibLinearSolver(ℓ::L1SVR)
return 13
end
function LibLinearSolver(ℓ::L2SVR)
return 12
end
function LibLinearSolver(ℓ::LogReg)
return 7
end
function LibLinearSolver(ℓ::L1SVM)
return 3
end
function LibLinearSolver(ℓ::L2SVM)
return 2
end
| [
2,
29397,
4177,
2849,
8551,
62,
1050,
4402,
198,
37811,
7293,
1769,
262,
20614,
842,
44292,
198,
1268,
30076,
198,
220,
2343,
226,
241,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
532,
22014,
22203,
284,
779,
198,
220,
575,
22... | 2.182156 | 538 |
<filename>src/equations/euler.jl<gh_stars>0
struct Euler <: Equation end
@declare_dofs Euler [:rhou, :rhov, :rho, :e]
struct GaussianWaveeuler <: Scenario end
struct Sod_shock_tube <: Scenario end
function is_periodic_boundary(equation::Euler, scenario::GaussianWaveeuler)
# GaussianWave scenario with periodic boundary conditions
true
end
function is_periodic_boundary(equation::Euler, scenario::Sod_shock_tube)
# sod_shock_tube scenario doesn't require periodic boundary conditions
false
end
# for Sod_shock_tube Scenario
function evaluate_boundary(eq::Euler, scenario::Sod_shock_tube, face, normalidx, dofsface, dofsfaceneigh)
# dofsface and dofsfaceneigh have shape (num_2d_quadpoints, dofs)
# you need to set dofsfaceneigh
dofsfaceneigh .= dofsface
if normalidx == 1
dofsfaceneigh[:, 1] = -dofsface[:, 1]
else
dofsfaceneigh[:, 2] = -dofsface[:, 2]
end
end
function evaluate_energy(eq::Euler, rhou, rhov, rho, p)
# value of gamma is 1.4, hence gamma-1 = 0.4
return ( (p/0.4) + (1/(2*rho)) * (rhou^2 + rhov^2) )
end
function evaluate_pressure(eq::Euler, rhou, rhov, rho, e)
# value of gamma is 1.4, hence gamma-1 = 0.4
return ( (0.4) * ( e - (1/(2*rho)) * (rhou^2 + rhov^2) ))
end
function get_initial_values(eq::Euler, scenario::GaussianWaveeuler, global_position; t=0.0)
x, y = global_position
if is_analytical_solution(eq, scenario)
println("No analytical solution implemented")
else
p = exp(-100 * (x - 0.5)^2 - 100 *(y - 0.5)^2) + 1
[0, 0, 1.0, evaluate_energy(eq, 0, 0, 1.0, p)]
end
end
function get_initial_values(eq::Euler, scenario::Sod_shock_tube, global_position; t=0.0)
x, y = global_position
if is_analytical_solution(eq, scenario)
println("No analytical solution implemented")
else
[ 0, 0, if x<=0.5 0.125 else 1.0 end, if x<= 0.5 evaluate_energy(eq, 0, 0, 0.125, 0.1) else evaluate_energy(eq, 0, 0, 1.0, 1.0) end ]
end
end
function is_analytical_solution(equation::Euler, scenario::GaussianWaveeuler)
false
end
function is_analytical_solution(equation::Euler, scenario::Sod_shock_tube)
false
end
function evaluate_flux(eq::Euler, celldofs, cellflux)
# size(cellflux)=(2*order*order, ndof)=(2,5)
# size(celldofs)=(order*order, ndof)=(1,5)
order_sq, ndof = size(celldofs)
for j=1:order_sq
# explicit names of dofs for clarity
rhou = celldofs[j, 1]
rhov = celldofs[j, 2]
density = celldofs[j, 3]
e = celldofs[j, 4]
p=evaluate_pressure(eq,rhou,rhov,density,e)
# here flux is computed elementwise
cellflux[j,1]= (1/density)*rhou^2 + p
cellflux[j + order_sq, 1] = (1/density)*rhou*rhov
cellflux[j,2] = (1/density)*rhou*rhov
cellflux[j + order_sq, 2] = (1/density)*rhov^2 + p
cellflux[j,3] = rhou
cellflux[j + order_sq, 3] = rhov
cellflux[j,4] = (rhou/density)*(e+p)
cellflux[j + order_sq, 4] = (rhov/density)*(e+p)
end
end
function max_eigenval(eq::Euler, celldata, normalidx)
# v_n + c, where c = sqrt(gamma*p /rho)
maxeigenval = 0.
#@info size(celldata)
for i in 1:size(celldata)[1]
rhou, rhov, density, e = celldata[i, :]
pressure = evaluate_pressure(eq,rhou,rhov,density,e)
#@info i
#@info pressure
#@info density
#@info e
#@info rhou
#@info rhov
c = sqrt(1.4*pressure/density)
if normalidx == 1
vn = rhou / density
else
vn = rhov / density
end
maxeigenval_new = vn + c
if maxeigenval_new > maxeigenval
maxeigenval = maxeigenval_new
end
end
maxeigenval
end
| [
27,
34345,
29,
10677,
14,
4853,
602,
14,
68,
18173,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
7249,
412,
18173,
1279,
25,
7889,
341,
886,
198,
31,
32446,
533,
62,
67,
1659,
82,
412,
18173,
685,
25,
81,
15710,
11,
1058,
17179,
... | 2.147915 | 1,751 |
# file containing methods for requesting forecast simulations from GEOGloWS
"""
forecast_stats(parameters::AbstractDict{Symbol,<:Any})
returns statistics calculated from 51 forecast ensemble members.
"""
function forecast_stats(parameters::AbstractDict{Symbol,<:Any})
endpoint = "ForecastStats"
r = _make_request(baseurl, endpoint, parameters)
_read_response(r, parameters[:return_format], with_dates = true)
end
"""
forecast_stats(reach_id::Int; return_format::Symbol = :csv)
returns statistics calculated from 51 forecast ensemble members.
"""
function forecast_stats(reach_id::Int; return_format::Symbol = :csv)
parameters = Dict(:reach_id => reach_id, :return_format => return_format)
forecast_stats(parameters)
end
"""
forecast_stats(reach_id::Int,date::AbstractString; return_format::Symbol = :csv)
returns statistics calculated from 51 forecast ensemble members.
"""
function forecast_stats(
reach_id::Int,
date::AbstractString;
return_format::Symbol = :csv,
)
parameters = Dict(
:reach_id => reach_id,
:date => date,
:return_format => return_format,
)
forecast_stats(parameters)
end
"""
forecast_stats(reach_id::Int,date::TimeType; return_format::Symbol = :csv)
returns statistics calculated from 51 forecast ensemble members.
"""
function forecast_stats(
reach_id::Int,
date::TimeType;
return_format::Symbol = :csv,
)
datestr = try
Dates.format(date, "YYYY-m-d")
catch
Dates.format(date, "yyyymmdd")
end
parameters = Dict(
:reach_id => reach_id,
:date => datestr,
:return_format => return_format,
)
forecast_stats(parameters)
end
"""
forecast_stats(lat::Real, lon::Real; return_format::Symbol = :csv)
returns statistics calculated from 51 forecast ensemble members.
"""
function forecast_stats(lat::Real, lon::Real; return_format::Symbol = :csv)
parameters = Dict(:lat => lat, :lon => lon, :return_format => return_format)
forecast_stats(parameters)
end
"""
forecast_stats(lat::Real, lon::Real, date::AbstractString; return_format::Symbol = :csv)
returns statistics calculated from 51 forecast ensemble members.
"""
function forecast_stats(
lat::Real,
lon::Real,
date::AbstractString;
return_format::Symbol = :csv,
)
if occursin("-", date)
date = replace(date, "-" => "")
end
parameters = Dict(
:lat => lat,
:lon => lon,
:date => date,
:return_format => return_format,
)
forecast_stats(parameters)
end
"""
forecast_stats(lat::Real, lon::Real, date::TimeType; return_format::Symbol = :csv)
returns statistics calculated from 51 forecast ensemble members.
"""
function forecast_stats(
lat::Real,
lon::Real,
date::TimeType;
return_format::Symbol = :csv,
)
datestr = Dates.format(date, "yyyymmdd")
parameters = Dict(
:lat => lat,
:lon => lon,
:date => datestr,
:return_format => return_format,
)
forecast_stats(parameters)
end
"""
forecast_ensembles(parameters::AbstractDict{Symbol,<:Any})
returns a timeseries for each of the 51 normal forecast ensemble members and the 52nd higher resolution forecast
"""
function forecast_ensembles(parameters::AbstractDict{Symbol,<:Any})
endpoint = "ForecastEnsembles"
r = _make_request(baseurl, endpoint, parameters)
_read_response(r, parameters[:return_format], with_dates = true)
end
"""
forecast_ensembles(reach_id::Int; return_format::Symbol = :csv)
returns a timeseries for each of the 51 normal forecast ensemble members and the 52nd higher resolution forecast
"""
function forecast_ensembles(reach_id::Int; return_format::Symbol = :csv)
parameters = Dict(:reach_id => reach_id, :return_format => return_format)
forecast_ensembles(parameters)
end
"""
forecast_ensembles(reach_id::Int, date::AbstractString; return_format::Symbol = :csv)
returns a timeseries for each of the 51 normal forecast ensemble members and the 52nd higher resolution forecast
"""
function forecast_ensembles(
reach_id::Int,
date::AbstractString;
return_format::Symbol = :csv,
)
if occursin("-", date)
date = replace(date, "-" => "")
end
parameters = Dict(
:reach_id => reach_id,
:date => date,
:return_format => return_format,
)
forecast_ensembles(parameters)
end
"""
forecast_ensembles(reach_id::Int, date::TimeType; return_format::Symbol = :csv)
returns a timeseries for each of the 51 normal forecast ensemble members and the 52nd higher resolution forecast
"""
function forecast_ensembles(
reach_id::Int,
date::TimeType;
return_format::Symbol = :csv,
)
datestr = Dates.format(date, "yyyymmdd")
parameters = Dict(
:reach_id => reach_id,
:date => datestr,
:return_format => return_format,
)
forecast_ensembles(parameters)
end
"""
forecast_ensembles(lat::Real, lon::Real; return_format::Symbol = :csv)
returns a timeseries for each of the 51 normal forecast ensemble members and the 52nd higher resolution forecast
"""
function forecast_ensembles(lat::Real, lon::Real; return_format::Symbol = :csv)
parameters = Dict(:lat => lat, :lon => lon, :return_format => return_format)
forecast_ensembles(parameters)
end
"""
forecast_ensembles(lat::Real, lon::Real, date::AbstractString; return_format::Symbol = :csv)
returns a timeseries for each of the 51 normal forecast ensemble members and the 52nd higher resolution forecast
"""
function forecast_ensembles(
lat::Real,
lon::Real,
date::AbstractString;
return_format::Symbol = :csv,
)
if occursin("-", date)
date = replace(date, "-" => "")
end
parameters = Dict(
:lat => lat,
:lon => lon,
:date => date,
:return_format => return_format,
)
forecast_ensembles(parameters)
end
"""
forecast_ensembles(lat::Real, lon::Real, date::TimeType; return_format::Symbol = :csv)
returns a timeseries for each of the 51 normal forecast ensemble members and the 52nd higher resolution forecast
"""
function forecast_ensembles(
lat::Real,
lon::Real,
date::TimeType;
return_format::Symbol = :csv,
)
datestr = Dates.format(date, "yyyymmdd")
parameters = Dict(
:lat => lat,
:lon => lon,
:date => datestr,
:return_format => return_format,
)
forecast_ensembles(parameters)
end
"""
forecast_records(parameters::AbstractDict{Symbol,<:Any})
retrieves the rolling record of the mean of the forecasted streamflow during the
first 24 hours of each day's forecast. That is, each day day after the streamflow
forecasts are computed, the average of first 8 of the 3-hour timesteps are recorded to a csv.
"""
function forecast_records(parameters::AbstractDict{Symbol,<:Any})
endpoint = "ForecastRecords"
r = _make_request(baseurl, endpoint, parameters)
_read_response(r, parameters[:return_format], with_dates = true)
end
"""
forecast_records(reach_id::Int; return_format::Symbol = :csv)
retrieves the rolling record of the mean of the forecasted streamflow during the
first 24 hours of each day's forecast. That is, each day day after the streamflow
forecasts are computed, the average of first 8 of the 3-hour timesteps are recorded to a csv.
"""
function forecast_records(reach_id::Int; return_format::Symbol = :csv)
parameters = Dict(:reach_id => reach_id, :return_format => return_format)
forecast_records(parameters)
end
"""
forecast_records(lat::Real, lon::Real; return_format::Symbol = :csv)
retrieves the rolling record of the mean of the forecasted streamflow during the
first 24 hours of each day's forecast. That is, each day day after the streamflow
forecasts are computed, the average of first 8 of the 3-hour timesteps are recorded to a csv.
"""
function forecast_records(lat::Real, lon::Real; return_format::Symbol = :csv)
parameters = Dict(:lat => lat, :lon => lon, :return_format => return_format)
forecast_records(parameters)
end
"""
forecast_records(reach_id::Int, start_date::TimeType, end_date::TimeType; return_format::Symbol = :csv,)
retrieves the rolling record of the mean of the forecasted streamflow during the
first 24 hours of each day's forecast. That is, each day day after the streamflow
forecasts are computed, the average of first 8 of the 3-hour timesteps are recorded to a csv.
"""
function forecast_records(
reach_id::Int,
start_date::TimeType,
end_date::TimeType;
return_format::Symbol = :csv,
)
start_datestr = Dates.format(start_date, "yyyymmdd")
end_datestr = Dates.format(end_date, "yyyymmdd")
parameters = Dict(
:reach_id => reach_id,
:start_date => start_datestr,
:end_date => end_datestr,
:return_format => return_format,
)
forecast_records(parameters)
end
"""
forecast_records(reach_id::Int, start_date::AbstractString, end_date::AbstractString; return_format::Symbol = :csv,)
retrieves the rolling record of the mean of the forecasted streamflow during the
first 24 hours of each day's forecast. That is, each day day after the streamflow
forecasts are computed, the average of first 8 of the 3-hour timesteps are recorded to a csv.
"""
function forecast_records(
reach_id::Int,
start_date::AbstractString,
end_date::AbstractString;
return_format::Symbol = :csv,
)
if occursin("-", start_date)
start_date = replace(start_date, "-" => "")
end
if occursin("-", end_date)
end_date = replace(end_date, "-" => "")
end
parameters = Dict(
:reach_id => reach_id,
:start_date => start_date,
:end_date => end_date,
:return_format => return_format,
)
forecast_records(parameters)
end
"""
forecast_records(lat::Real, lon::Real, start_date::TimeType, end_date::TimeType; return_format::Symbol = :csv,)
retrieves the rolling record of the mean of the forecasted streamflow during the
first 24 hours of each day's forecast. That is, each day day after the streamflow
forecasts are computed, the average of first 8 of the 3-hour timesteps are recorded to a csv.
"""
function forecast_records(
lat::Real,
lon::Real,
start_date::TimeType,
end_date::TimeType;
return_format::Symbol = :csv,
)
start_datestr = Dates.format(start_date, "yyyymmdd")
end_datestr = Dates.format(end_date, "yyyymmdd")
parameters = Dict(
:lat => lat,
:lon => lon,
:start_date => start_datestr,
:end_date => end_datestr,
:return_format => return_format,
)
forecast_records(parameters)
end
"""
forecast_records(lat::Real, lon::Real, start_date::AbstractString, end_date::AbstractString; return_format::Symbol = :csv,)
retrieves the rolling record of the mean of the forecasted streamflow during the
first 24 hours of each day's forecast. That is, each day day after the streamflow
forecasts are computed, the average of first 8 of the 3-hour timesteps are recorded to a csv.
"""
function forecast_records(
lat::Real,
lon::Real,
start_date::AbstractString,
end_date::AbstractString;
return_format::Symbol = :csv,
)
if occursin("-", start_date)
start_date = replace(start_date, "-" => "")
end
if occursin("-", end_date)
end_date = replace(end_date, "-" => "")
end
parameters = Dict(
:lat => lat,
:lon => lon,
:start_date => start_date,
:end_date => end_date,
:return_format => return_format,
)
forecast_records(parameters)
end
"""
forecast_warnings(parameters::AbstractDict{Symbol,<:Any})
returns a csv created to summarize the potential return period level flow events
coming to the reaches in a specified region. The CSV contains a column for the
reach_id, lat of the reach, lon of the reach, maximum forecasted flow in the next
15 day forecast, and a column for each of the return periods (2, 10, 20, 25, 50, 100)
which will contain the date when the forecast is first expected to pass that threshold
"""
function forecast_warnings(parameters::AbstractDict{Symbol,<:Any})
endpoint = "ForecastWarnings"
r = _make_request(baseurl, endpoint, parameters)
_read_response(r, parameters[:return_format])
end
"""
forecast_warnings(; return_format::Symbol = :csv)
returns a csv created to summarize the potential return period level flow events
coming to the reaches in a specified region. The CSV contains a column for the
reach_id, lat of the reach, lon of the reach, maximum forecasted flow in the next
15 day forecast, and a column for each of the return periods (2, 10, 20, 25, 50, 100)
which will contain the date when the forecast is first expected to pass that threshold
"""
function forecast_warnings(; return_format::Symbol = :csv)
parameters = Dict(:return_format => return_format)
forecast_warnings(parameters)
end
"""
forecast_warnings(date::TimeType; return_format::Symbol = :csv)
returns a csv created to summarize the potential return period level flow events
coming to the reaches in a specified region. The CSV contains a column for the
reach_id, lat of the reach, lon of the reach, maximum forecasted flow in the next
15 day forecast, and a column for each of the return periods (2, 10, 20, 25, 50, 100)
which will contain the date when the forecast is first expected to pass that threshold
"""
function forecast_warnings(date::TimeType; return_format::Symbol = :csv)
datestr = Dates.format(date, "yyyymmdd")
parameters = Dict(:date => datestr, :return_format => return_format)
forecast_warnings(parameters)
end
"""
forecast_warnings(region::AbstractString; return_format::Symbol = :csv)
returns a csv created to summarize the potential return period level flow events
coming to the reaches in a specified region. The CSV contains a column for the
reach_id, lat of the reach, lon of the reach, maximum forecasted flow in the next
15 day forecast, and a column for each of the return periods (2, 10, 20, 25, 50, 100)
which will contain the date when the forecast is first expected to pass that threshold
"""
function forecast_warnings(region::AbstractString; return_format::Symbol = :csv)
parameters = Dict(:region => region, :return_format => return_format)
forecast_warnings(parameters)
end
"""
forecast_warnings(region::AbstractString, date::TimeType; return_format::Symbol = :csv)
returns a csv created to summarize the potential return period level flow events
coming to the reaches in a specified region. The CSV contains a column for the
reach_id, lat of the reach, lon of the reach, maximum forecasted flow in the next
15 day forecast, and a column for each of the return periods (2, 10, 20, 25, 50, 100)
which will contain the date when the forecast is first expected to pass that threshold
"""
function forecast_warnings(
region::AbstractString,
date::TimeType;
return_format::Symbol = :csv,
)
datestr = Dates.format(date, "yyyymmdd")
parameters = Dict(
:region => region,
:date => datestr,
:return_format => return_format,
)
forecast_warnings(parameters)
end
"""
forecast_warnings(region::AbstractString, date::AbstractString; return_format::Symbol = :csv)
returns a csv created to summarize the potential return period level flow events
coming to the reaches in a specified region. The CSV contains a column for the
reach_id, lat of the reach, lon of the reach, maximum forecasted flow in the next
15 day forecast, and a column for each of the return periods (2, 10, 20, 25, 50, 100)
which will contain the date when the forecast is first expected to pass that threshold
"""
function forecast_warnings(
region::AbstractString,
date::AbstractString;
return_format::Symbol = :csv,
)
if occursin("-", date)
date = replace(date, "-" => "")
end
parameters = Dict(
region::AbstractString,
:date => date,
:return_format => return_format,
)
forecast_warnings(parameters)
end
| [
2,
2393,
7268,
5050,
329,
20623,
11092,
27785,
422,
402,
4720,
38,
5439,
19416,
198,
37811,
198,
220,
220,
220,
11092,
62,
34242,
7,
17143,
7307,
3712,
23839,
35,
713,
90,
13940,
23650,
11,
27,
25,
7149,
30072,
198,
198,
7783,
82,
7... | 2.89991 | 5,565 |
import Base.LinAlg:
A_mul_B!, At_mul_B, A_ldiv_B!
import Tulip:
Model, PrimalDualPoint
"""
solve(model, tol, verbose)
Solve model m using an infeasible predictor-corrector Interior-Point algorithm.
# Arguments
- `model::Model`: the optimization model
- `tol::Float64`: numerical tolerance
- `verbose::Int`: 0 means no output, 1 displays log at each iteration
"""
function solve!(
model::Model;
tol::Float64 = 10.0^-8,
verbose::Int = 0
)
model.status = :Built
N_ITER_MAX = 100 # maximum number of IP iterations
niter = 0 # number of IP iterations
# TODO: pre-optimization stuff
F = symbolic_cholesky(model.A) # Symbolic factorization
θ = zeros(model.sol.x)
# X = Array{PrimalDualPoint, 1}()
# TODO: check stopping criterion for possible early termination
# compute starting point
# TODO: decide which starting point
compute_starting_point!(
model.A,
F,
model.sol,
model.b, model.c, model.uind, model.uval
)
# IPM log
if verbose == 1
println(" Itn Primal Obj Dual Obj Prim Inf Dual Inf UBnd Inf\n")
end
# main loop
# push!(X, copy(model.sol))
while niter < N_ITER_MAX
# I. Form and factor Newton System
compute_newton!(
model.A,
model.sol.x,
model.sol.s,
model.sol.w,
model.sol.z,
model.uind,
θ,
F
)
# II. Compute and take step
compute_next_iterate!(model, F)
# III. Book-keeping + display log
# compute residuals
rb = model.A * model.sol.x - model.b
rc = At_mul_B(model.A, model.sol.y) + model.sol.s - model.c
spxpay!(-1.0, rc, model.uind, model.sol.z)
ru = model.sol.x[model.uind] + model.sol.w - model.uval
obj_primal = dot(model.sol.x, model.c)
obj_dual = dot(model.b, model.sol.y) - dot(model.uval, model.sol.z)
niter += 1
# check stopping criterion
eps_p = (norm(rb)) / (1.0 + norm(model.b))
eps_d = (norm(rc)) / (1.0 + norm(model.c))
eps_u = (norm(ru)) / (1.0 + norm(model.uval))
eps_g = abs(obj_primal - obj_dual) / (1.0 + abs(obj_primal))
# if verbose == 1
# print("\teps_p=")
# print(@sprintf("%9.2e", eps_p))
# print("\teps_d=")
# print(@sprintf("%9.2e", eps_d))
# print("\teps_u=")
# print(@sprintf("%9.2e", eps_u))
# print("\teps_g=")
# print(@sprintf("%9.2e", eps_g))
# println("\n")
# end
if verbose == 1
print(@sprintf("%4d", niter)) # iteration count
print(@sprintf("%+18.7e", obj_primal)) # primal objective
print(@sprintf("%+16.7e", obj_dual)) # dual objective
print(@sprintf("%10.2e", norm(rb, Inf))) # primal infeas
print(@sprintf("%9.2e", norm(rc, Inf))) # dual infeas
print(@sprintf("%9.2e", norm(ru, Inf))) # upper bound infeas
print(@sprintf("%9.2e", abs(obj_primal - obj_dual) / (model.nvars + size(model.uind, 1))))
print("\n")
end
if (eps_p < tol) && (eps_u < tol) && (eps_d < tol) && (eps_g < tol)
model.status = :Optimal
end
# push!(X, copy(model.sol))
# check status
if model.status == :Optimal
if verbose == 1
println()
println("Optimal solution found.")
end
return model.status
end
end
#
return model.status
end
"""
compute_starting_point!
Compute a starting point
# Arguments
-`model::Model`
-`F::Factorization`: Cholesky factor of A*A', where A is the constraint matrix
of the optimization problem.
"""
function compute_starting_point!(
A::AbstractMatrix{Tv},
F::Factorization{Tv},
Λ::Tulip.PrimalDualPoint{Tv},
b::StridedVector{Tv},
c::StridedVector{Tv},
uind::StridedVector{Ti},
uval::StridedVector{Tv}
) where{Tv<:Real, Ti<:Integer}
(m, n) = size(A)
p = size(uind, 1)
rhs = - 2 * b
u_ = zeros(n)
spxpay!(1.0, u_, uind, uval)
rhs += A* u_
#==============================#
#
# I. Solve two QPs
#
#==============================#
# Compute x0
v = F \ rhs
copy!(Λ.x, -0.5 * At_mul_B(A, v))
spxpay!(0.5, Λ.x, uind, uval)
# Compute w0
@inbounds for i in 1:p
j = uind[i]
Λ.w[i] = uval[i] - Λ.x[j]
end
# Compute y0
copy!(Λ.y, F \ (A*c))
# Compute s0
copy!(Λ.s, 0.5 * (At_mul_B(A, Λ.y) - c))
# Compute z0
@inbounds for i in 1:p
j = uind[i]
Λ.z[i] = - Λ.s[j]
end
#==============================#
#
# II. Compute correction
#
#==============================#
dp = zero(Tv)
dd = zero(Tv)
@inbounds for i in 1:n
tmp = -1.5 * Λ.x[i]
if tmp > dp
dp = tmp
end
end
@inbounds for i in 1:p
tmp = -1.5 * Λ.w[i]
if tmp > dp
dp = tmp
end
end
@inbounds for i in 1:n
tmp = -1.5 * Λ.s[i]
if tmp > dd
dd = tmp
end
end
@inbounds for i in 1:p
tmp = -1.5 * Λ.z[i]
if tmp > dd
dd = tmp
end
end
tmp = dot(Λ.x + dp, Λ.s + dd) + dot(Λ.w + dp, Λ.z + dd)
dp += 0.5 * tmp / (sum(Λ.s + dd) + sum(Λ.z + dd))
dd += 0.5 * tmp / (sum(Λ.x + dp) + sum(Λ.w + dp))
#==============================#
#
# III. Apply correction
#
#==============================#
@inbounds for i in 1:n
Λ.x[i] += dp
end
@inbounds for i in 1:n
Λ.s[i] += dd
end
@inbounds for i in 1:p
Λ.w[i] += dp
end
@inbounds for i in 1:p
Λ.z[i] += dd
end
# Done
return Λ
end
function compute_next_iterate!(model::Model, F::Factorization)
(x, y, s, w, z) = (model.sol.x, model.sol.y, model.sol.s, model.sol.w, model.sol.z)
(m, n, p) = model.nconstr, model.nvars, size(model.uind, 1)
d_aff = copy(model.sol)
d_cc = copy(model.sol)
# compute residuals
μ = (
(dot(x, s) + dot(w, z))
/ (n + p)
)
rb = (model.A * x) - model.b
rc = At_mul_B(model.A, y) + s - model.c
spxpay!(-1.0, rc, model.uind, model.sol.z)
ru = x[model.uind] + w - model.uval
rxs = x .* s
rwz = w .* z
θ = x ./ s
update_theta!(θ, x, s, z, w, model.uind)
# compute predictor
solve_newton!(
model.A,
θ,
F,
model.sol,
d_aff,
model.uind,
-rb,
-rc,
-ru,
-rxs,
-rwz
)
# compute step length
(α_pa, α_da) = compute_stepsize(model.sol, d_aff)
# update centrality parameter
μ_aff = (
(
dot(x + α_pa * d_aff.x, s + α_da * d_aff.s)
+ dot(w + α_pa * d_aff.w, z + α_da * d_aff.z)
) / (n + p)
)
σ = clamp((μ_aff / μ)^3, 10.0^-12, 1.0 - 10.0^-12) # clamped for numerical stability
# compute corrector
solve_newton!(
model.A,
θ,
F,
model.sol,
d_cc,
model.uind,
zeros(m),
zeros(n),
zeros(p),
σ*μ*ones(n) - d_aff.x .* d_aff.s,
σ*μ*ones(p) - d_aff.w .* d_aff.z
)
# final step size
d = d_aff + d_cc
(α_p, α_d) = compute_stepsize(model.sol, d, damp=0.99995)
# take step
model.sol.x += α_p * d.x
model.sol.y += α_d * d.y
model.sol.s += α_d * d.s
model.sol.w += α_p * d.w
model.sol.z += α_d * d.z
return model.sol
end
"""
symbolic_cholesky
Compute Cholesky factorization of A*A'
"""
function symbolic_cholesky(A::AbstractMatrix{T}) where {T<:Real}
F = Cholesky.cholesky(A, ones(A.n))
return F
end
"""
compute_newton!
Form and factorize the Newton system, using the normal equations.
"""
function compute_newton!(
A::AbstractMatrix{Ta},
x::AbstractVector{Tx},
s::AbstractVector{Ts},
w::AbstractVector{Tw},
z::AbstractVector{Tz},
uind::AbstractVector{Ti},
θ::AbstractVector{T},
F::Factorization{Ta}
) where {Ta<:Real, Tx<:Real, Ts<:Real, Tw<:Real, Tz<:Real, Ti<:Integer, T<:Real}
# Compute Θ = (X^{-1} S + W^{-1} Z)^{-1}
θ = x ./ s
for i in 1:size(uind, 1)
j = uind[i]
θ[j] = 1.0 / (s[j] / x[j] + z[i] / w[i])
end
# Form the normal equations matrix and compute its factorization
Cholesky.cholesky!(A, θ, F)
return θ
end
"""
solve_newton
Solve Newton system with the given right-hand side.
Overwrites the input d
"""
function solve_newton!(
A::AbstractMatrix{Ta},
θ::AbstractVector{T1},
F::Factorization{Ta},
Λ::PrimalDualPoint,
d::PrimalDualPoint,
uind::AbstractVector{Ti},
ξ_b::AbstractVector{T2},
ξ_c::AbstractVector{T3},
ξ_u::AbstractVector{T4},
ξ_xs::AbstractVector{T5},
ξ_wz::AbstractVector{T6},
) where {Ta<:Real, T1<:Real, T2<:Real, T3<:Real, T4<:Real, T5<:Real, T6<:Real, Ti<:Integer}
ξ_tmp = ξ_c - (ξ_xs ./ Λ.x)
ξ_tmp[uind] += (ξ_wz - (Λ.z .* ξ_u)) ./ Λ.w
d.y = F \ (ξ_b + A * (θ .* ξ_tmp))
d.x = θ .* (At_mul_B(A, d.y) - ξ_tmp)
d.z = (Λ.z .* (-ξ_u + d.x[uind]) + ξ_wz) ./ Λ.w
d.s = (ξ_xs - Λ.s .* d.x) ./ Λ.x
d.w = (ξ_wz - Λ.w .* d.z) ./ Λ.z
# # check if system is solved correctly
# rb = ξ_b - A*d.x
# rc = ξ_c - (A'*d.y + d.s)
# rc[uind] += d.z
# ru = ξ_u - (d.x[uind] + d.w)
# rxs = ξ_xs - (Λ.s .* d.x + Λ.x .* d.s)
# rwz = ξ_wz - (Λ.z .* d.w + Λ.w .* d.z)
# println("Residuals\t(normal eqs)")
# println("||rb|| \t", @sprintf("%.6e", maximum(abs.(rb))))
# println("||rc|| \t", @sprintf("%.6e", maximum(abs.(rc))))
# println("||ru|| \t", @sprintf("%.6e", maximum(abs.(ru))))
# println("||rxs|| \t", @sprintf("%.6e", maximum(abs.(rxs))))
# println("||rwz|| \t", @sprintf("%.6e", maximum(abs.(rwz))))
# println()
return d
end
function compute_stepsize(
tx::AbstractVector{T}, tw::AbstractVector{T}, ts::AbstractVector{T}, tz::AbstractVector{T},
dx::AbstractVector{T}, dw::AbstractVector{T}, ds::AbstractVector{T}, dz::AbstractVector{T};
damp=1.0
) where T<:Real
n = size(tx, 1)
p = size(tw, 1)
n == size(ts, 1) || throw(DimensionMismatch("t.s is wrong size"))
p == size(tz, 1) || throw(DimensionMismatch("t.z is wrong size"))
n == size(dx, 1) || throw(DimensionMismatch("d.x is wrong size"))
n == size(ds, 1) || throw(DimensionMismatch("d.s is wrong size"))
p == size(dw, 1) || throw(DimensionMismatch("d.w is wrong size"))
p == size(dz, 1) || throw(DimensionMismatch("d.z is wrong size"))
ap, ad = -1.0, -1.0
@inbounds for i in 1:n
if dx[i] < 0.0
if (tx[i] / dx[i]) > ap
ap = (tx[i] / dx[i])
end
end
end
@inbounds for i in 1:n
if ds[i] < 0.0
if (ts[i] / ds[i]) > ad
ad = (ts[i] / ds[i])
end
end
end
@inbounds for j in 1:p
if dw[j] < 0.0
if (tw[j] / dw[j]) > ap
ap = (tw[j] / dw[j])
end
end
end
@inbounds for j in 1:p
if dz[j] < 0.0
if (tz[j] / dz[j]) > ad
ad = (tz[j] / dz[j])
end
end
end
ap = - damp * ap
ad = - damp * ad
# println("\t(ap, ad) = ", (ap, ad))
return (ap, ad)
end
function compute_stepsize(t::PrimalDualPoint{T}, d::PrimalDualPoint{T}; damp=1.0) where T<:Real
(ap, ad) = compute_stepsize(t.x, t.w, t.s, t.z, d.x, d.w, d.s, d.z, damp=damp)
return (ap, ad)
end
function update_theta!(θ, x, s, z, w, colind)
# only called from within the optimization, so bounds were checked before
for i in 1:size(colind, 1)
j = colind[i]
θ[j] = 1.0 / (s[j] / x[j] + z[i] / w[i])
end
return nothing
end
"""
spxpay!(α, x, y_ind, y_val)
In-place computation of x += α * y, where y = sparse(y_ind, y_val)
# Arguments
"""
function spxpay!(α::Tv, x::AbstractVector{Tv}, y_ind::AbstractVector{Ti}, y_val::AbstractVector{Tv}) where{Ti<:Integer, Tv<:Real}
for i in 1:size(y_ind, 1)
j = y_ind[i]
x[j] = x[j] + α * y_val[i]
end
return nothing
end
| [
11748,
7308,
13,
14993,
2348,
70,
25,
198,
220,
220,
220,
317,
62,
76,
377,
62,
33,
28265,
1629,
62,
76,
377,
62,
33,
11,
317,
62,
335,
452,
62,
33,
0,
198,
198,
11748,
30941,
541,
25,
198,
220,
220,
220,
9104,
11,
37712,
3624... | 1.872774 | 6,681 |
<reponame>simonsobs/ps_py
# # [Utilities (util.jl)](@id util)
# Unlike every other file in the pipeline, this file is not intended to be run directly.
# Instead, include this in other files. These utilities provide an interface to the Planck
# data products, namely
# 1. binning matrix
# 2. beam ``W_{\ell}^{XY} = B_{\ell}^X B_{\ell}^{Y}``
# 3. foreground model cross-spectra
# 4. ``\texttt{plic}`` reference covariance matrix and reference spectra, for comparison plots
using PowerSpectra
using DataFrames, CSV
using DelimitedFiles
using LinearAlgebra
using FITSIO
# ## Planck Binning
# Planck bins the spectra at the very end, and applies an ``\ell (\ell+1)`` relative
# weighting inside the bin. This utility function generates the binning operator
# ``P_{b\ell}`` such that ``C_b = P_{b \ell} C_{\ell}``. It also returns the mean of the
# left and right bin edges, which is what is used when plotting the Planck spectra.
"""
util_planck_binning(binfile; lmax=6143)
Obtain the Planck binning scheme.
### Arguments:
- `binfile::String`: filename of the Planck binning, containing left/right bin edges
### Keywords
- `lmax::Int=6143`: maximum multipole for one dimension of the binning matrix
### Returns:
- `Tuple{Matrix{Float64}, Vector{Float64}`: returns (binning matrix, bin centers)
"""
function util_planck_binning(binfile; lmax=6143)
bin_df = DataFrame(CSV.File(binfile;
header=false, delim=" ", ignorerepeated=true))
lb = (bin_df[:,1] .+ bin_df[:,2]) ./ 2
P = binning_matrix(bin_df[:,1], bin_df[:,2], ℓ -> ℓ*(ℓ+1) / (2π); lmax=lmax)
return P, lb[1:size(P,1)]
end
# ## Planck Beam
# The Planck effective beams are azimuthally-averaged window functions induced by the
# instrumental optics. This utility function reads the Planck beams from the RIMO, which
# are of the form `TT_2_TT`, `TT_2_EE` etc. Conventionally, the Planck spectra are stored
# with the diagonal of the beam-mixing matrix applied.
#
# ```math
# C_{\ell} = W^{-1}_{\ell} \hat{C}_{\ell}
# ```
"""
util_planck_beam_Wl([T::Type=Float64], freq1, split1, freq2, split2, spec1_, spec2_;
lmax=4000, beamdir=nothing)
Returns the Planck beam transfer of [spec1]_to_[spec2], in Wl form.
### Arguments:
- `T::Type=Float64`: optional first parameter specifying numerical type
- `freq1::String`: frequency of first field
- `split1::String`: split of first field (i.e. hm1)
- `freq2::String`: frequency of first field
- `split2::String`: split of second field (i.e. hm2)
- `spec1_`: converted to string, source spectrum like TT
- `spec2_`: converted to string, destination spectrum
### Keywords
- `lmax=4000`: maximum multipole
- `beamdir=nothing`: directory containing beam FITS files. if nothing, will fall back to
the PowerSpectra.jl beam files.
### Returns:
- `SpectralVector`: the beam Wl, indexed 0:lmax
"""
function util_planck_beam_Wl(T::Type, freq1, split1, freq2, split2, spec1_, spec2_;
lmax=4000, beamdir=nothing)
if isnothing(beamdir)
@warn "beam directory not specified. switching to PowerSpectra.jl fallback"
beamdir = PowerSpectra.planck256_beamdir()
end
spec1 = String(spec1_)
spec2 = String(spec2_)
if parse(Int, freq1) > parse(Int, freq2)
freq1, freq2 = freq2, freq1
split1, split2 = split2, split1
end
if (freq1 == freq2) && ((split1 == "hm2") && (split2 == "hm1"))
split1, split2 = split2, split1
end
fname = "Wl_R3.01_plikmask_$(freq1)$(split1)x$(freq2)$(split2).fits"
f = FITS(joinpath(beamdir, "BeamWf_HFI_R3.01", fname))
Wl = convert(Vector{T}, read(f[spec1], "$(spec1)_2_$(spec2)")[:,1])
if lmax < 4000
Wl = Wl[1:lmax+1]
else
Wl = vcat(Wl, last(Wl) * ones(T, lmax - 4000))
end
return SpectralVector(Wl)
end
util_planck_beam_Wl(T::Type, freq1, split1, freq2, split2, spec1; kwargs...) =
util_planck_beam_Wl(T, freq1, split1, freq2, split2, spec1, spec1; kwargs...)
util_planck_beam_Wl(freq1::String, split1, freq2, split2, spec1, spec2; kwargs...) =
util_planck_beam_Wl(Float64, freq1, split1, freq2, split2, spec1, spec2; kwargs...)
# ## Planck Likelihood Specifics
# The Planck likelihood uses a specific choice of spectra and multipole ranges for those
# spectra. We provide some utility functions to retrieve a copy of the spectra order and
# the multipole minimum and maximum for those spectra.
plic_order() = (
(:TT,"100","100"), (:TT,"143","143"), (:TT,"143","217"), (:TT,"217","217"),
(:EE,"100","100"), (:EE,"100","143"), (:EE,"100","217"), (:EE,"143","143"),
(:EE,"143","217"), (:EE,"217","217"),
(:TE,"100","100"), (:TE,"100","143"), (:TE,"100","217"), (:TE,"143","143"),
(:TE,"143","217"), (:TE,"217","217")
)
const plic_ellranges = Dict(
(:TT, "100", "100") => (30, 1197),
(:TT, "143", "143") => (30, 1996),
(:TT, "143", "217") => (30, 2508),
(:TT, "217", "217") => (30, 2508),
(:EE, "100", "100") => (30, 999),
(:EE, "100", "143") => (30, 999),
(:EE, "100", "217") => (505, 999),
(:EE, "143", "143") => (30, 1996),
(:EE, "143", "217") => (505, 1996),
(:EE, "217", "217") => (505, 1996),
(:TE, "100", "100") => (30, 999),
(:TE, "100", "143") => (30, 999),
(:TE, "100", "217") => (505, 999),
(:TE, "143", "143") => (30, 1996),
(:TE, "143", "217") => (505, 1996),
(:TE, "217", "217") => (505, 1996),
(:TT, "100", "143") => (30, 999), # not used
(:TT, "100", "217") => (505, 999), # not used
)
function get_plic_ellrange(spec::Symbol, freq1, freq2)
if spec ∈ (:TE, :ET)
if parse(Float64, freq1) > parse(Float64, freq2)
freq1, freq2 = freq2, freq1
end
return plic_ellranges[:TE, freq1, freq2]
end
return plic_ellranges[spec, freq1, freq2]
end
# ## Signal Spectra
# The covariance matrix calculation and and signal simulations require an assumed signal
# spectra. We use the same foreground spectra as used in the ``\text{plic}`` likelihood.
# This returns dictionaries for signal and theory ``C_{\ell}`` in ``\mu\mathrm{K}``
# between two frequencies. The data is stored in the `plicref` directory in the config.
function signal_and_theory(freq1, freq2, config::Dict)
likelihood_data_dir = joinpath(config["scratch"], "plicref")
th = read_commented_header(joinpath(likelihood_data_dir,"theory_cl.txt"))
fg = read_commented_header(joinpath(likelihood_data_dir,
"base_plikHM_TTTEEE_lowl_lowE_lensing.minimum.plik_foregrounds"))
for (spec, f1, f2) in plic_order()
lmin, lmax = get_plic_ellrange(spec, f1, f2)
const_val = fg[lmax-1,"$(spec)$(f1)X$(f2)"]
## constant foreground level after lmax -- there are fitting artifacts otherwise
fg[(lmax-1):end, "$(spec)$(f1)X$(f2)"] .= const_val
end
## loop over spectra and also fill in the flipped name
freqs = ("100", "143", "217")
specs = ("TT", "TE", "ET", "EE")
for f1 in freqs, f2 in freqs, spec in specs
if "$(spec)$(f1)X$(f2)" ∉ names(fg)
if "$(reverse(spec))$(f2)X$(f1)" ∈ names(fg)
fg[!, "$(spec)$(f1)X$(f2)"] = fg[!, "$(reverse(spec))$(f2)X$(f1)"]
else
fg[!, "$(spec)$(f1)X$(f2)"] = zeros(nrow(fg))
end
end
end
ap(v) = vcat([0., 0.], v)
ell_fac = fg[!, "l"] .* (fg[!, "l"] .+ 1) ./ (2π);
signal_dict = Dict{String,Vector{Float64}}()
theory_dict = Dict{String,Vector{Float64}}()
for XY₀ in specs ## XY₀ is the spectrum to store
f₁, f₂ = parse(Int, freq1), parse(Int, freq2)
if f₁ <= f₂
XY = XY₀
else ## swap what we're looking for, as fg data only has those cross-spectra
XY = XY₀[2] * XY₀[1]
f₁, f₂ = f₂, f₁
end
if XY == "ET"
theory_cl_XY = th[!, "TE"] ./ (th[!, "L"] .* (th[!, "L"] .+ 1) ./ (2π))
else
theory_cl_XY = th[!, XY] ./ (th[!, "L"] .* (th[!, "L"] .+ 1) ./ (2π))
end
fg_cl_XY = fg[!, "$(XY)$(f₁)X$(f₂)"] ./ (fg[!, "l"] .* (fg[!, "l"] .+ 1) ./ (2π))
signal_dict[XY₀] = ap(theory_cl_XY .+ fg_cl_XY[1:2507])
theory_dict[XY₀] = ap(theory_cl_XY)
end
return signal_dict, theory_dict
end
# ## Planck ``\texttt{plic}`` Reference
# In various parts of the pipeline, we want to compare our results to the official 2018
# data release. These routines load them from disk. They're automatically downloaded to the
# `plicref` directory specified in the configuration TOML.
"""
PlanckReferenceCov(plicrefpath::String)
Stores the spectra and covariances of the reference plic analysis.
"""
PlanckReferenceCov
struct PlanckReferenceCov{T}
cov::Array{T,2}
ells::Vector{Int}
cls::Vector{T}
keys::Vector{String}
sub_indices::Vector{Int}
key_index_dict::Dict{String,Int}
end
function PlanckReferenceCov(plicrefpath)
ellspath = joinpath(plicrefpath, "vec_all_spectra.dat")
clpath = joinpath(plicrefpath, "data_extracted.dat")
covpath = joinpath(plicrefpath, "covmat.dat")
keys = ["<KEY>", "TT_143x143", "TT_143x217", "TT_217x217", "EE_100x100",
"EE_100x143", "EE_100x217", "EE_143x143", "EE_143x217", "EE_217x217", "TE_100x100",
"TE_100x143", "TE_100x217", "TE_143x143", "TE_143x217", "TE_217x217"]
cov = inv(readdlm(covpath))
ells = readdlm(ellspath)[:,1]
cls = readdlm(clpath)[:,2]
subarray_indices = collect(0:(size(cov,1)-2))[findall(diff(ells) .< 0) .+ 1] .+ 1
sub_indices = [1, subarray_indices..., length(cls)+1]
key_ind = 1:length(keys)
key_index_dict = Dict(keys .=> key_ind)
return PlanckReferenceCov{Float64}(cov, ells, cls, keys, sub_indices, key_index_dict)
end
"""
get_subcov(pl::PlanckReferenceCov, spec1, spec2)
Extract the sub-covariance matrix corresponding to spec1 × spec2.
### Arguments:
- `pl::PlanckReferenceCov`: data structure storing reference covmat and spectra
- `spec1::String`: spectrum of form i.e. "TT_100x100"
- `spec2::String`: spectrum of form i.e. "TT_100x100"
### Returns:
- `Matrix{Float64}`: subcovariance matrix
"""
function get_subcov(pl::PlanckReferenceCov, spec1, spec2)
i = pl.key_index_dict[spec1]
j = pl.key_index_dict[spec2]
return pl.cov[
pl.sub_indices[i] : (pl.sub_indices[i + 1] - 1),
pl.sub_indices[j] : (pl.sub_indices[j + 1] - 1),
]
end
"""
extract_spec_and_cov(pl::PlanckReferenceCov, spec1)
Extract the reference ells, cl, errorbar, and sub-covariance block for a spectrum × itself.
### Arguments:
- `pl::PlanckReferenceCov`: data structure storing reference covmat and spectra
- `spec1::String`: spectrum of form i.e. "TT_100x100"
### Returns:
- `(ells, cl, err, this_subcov)`
"""
function extract_spec_and_cov(pl::PlanckReferenceCov, spec1)
i = pl.key_index_dict[spec1]
this_subcov = get_subcov(pl, spec1, spec1)
ells = pl.ells[pl.sub_indices[i]:(pl.sub_indices[i + 1] - 1)]
cl = pl.cls[pl.sub_indices[i]:(pl.sub_indices[i + 1] - 1)]
err = sqrt.(diag(this_subcov))
return ells, cl, err, this_subcov
end
| [
27,
7856,
261,
480,
29,
14323,
26666,
1443,
14,
862,
62,
9078,
198,
2,
1303,
685,
18274,
2410,
357,
22602,
13,
20362,
15437,
7,
31,
312,
7736,
8,
198,
2,
12101,
790,
584,
2393,
287,
262,
11523,
11,
428,
2393,
318,
407,
5292,
284,
... | 2.280418 | 4,882 |
using Revise
using AdFem
using PyCall
using LinearAlgebra
using PyPlot
using SparseArrays
using MAT
np = pyimport("numpy")
λ = 2.0
μ = 0.5
η = 1/12
β = 1/4; γ = 1/2
a = b = 0.1
m = 10
n = 5
h = 0.01
NT = 20
Δt = 1/NT
bdedge = []
for j = 1:n
push!(bdedge, [(j-1)*(m+1)+m+1 j*(m+1)+m+1])
end
bdedge = vcat(bdedge...)
bdnode = Int64[]
for j = 1:n+1
push!(bdnode, (j-1)*(m+1)+1)
end
G = [1/Δt+μ/η -μ/3η 0.0
-μ/3η 1/Δt+μ/η-μ/3η 0.0
0.0 0.0 1/Δt+μ/η]
S = [2μ/Δt+λ/Δt λ/Δt 0.0
λ/Δt 2μ/Δt+λ/Δt 0.0
0.0 0.0 μ/Δt]
invG = inv(G)
H = invG*S
M = compute_fem_mass_matrix1(m, n, h)
Zero = spzeros((m+1)*(n+1), (m+1)*(n+1))
M = [M Zero;Zero M]
K = compute_fem_stiffness_matrix(H, m, n, h)
C = a*M + b*K # damping matrix
L = M + γ*Δt*C + β*Δt^2*K
L, Lbd = fem_impose_Dirichlet_boundary_condition(L, bdnode, m, n, h)
Forces = zeros(NT, 2(m+1)*(n+1))
for i = 1:NT
T = eval_f_on_boundary_edge((x,y)->0.1, bdedge, m, n, h)
T = [T zeros(length(T))]
rhs = compute_fem_traction_term(T, bdedge, m, n, h)
if i*Δt>0.5
rhs = zero(rhs)
end
Forces[i, :] = rhs
end
a = zeros(2(m+1)*(n+1))
v = zeros(2(m+1)*(n+1))
d = zeros(2(m+1)*(n+1))
U = zeros(2(m+1)*(n+1),NT+1)
Sigma = zeros(NT+1, 4m*n, 3)
Varepsilon = zeros(NT+1, 4m*n, 3)
for i = 1:NT
@info i
global a, v, d
F = compute_strain_energy_term(Sigma[i,:,:]*invG/Δt, m, n, h) - K * U[:,i]
# @show norm(compute_strain_energy_term(Sigma[i,:,:]*invG/Δt, m, n, h)), norm(K * U[:,i])
rhs = Forces[i,:] - Δt^2 * F
td = d + Δt*v + Δt^2/2*(1-2β)*a
tv = v + (1-γ)*Δt*a
rhs = rhs - C*tv - K*td
rhs[[bdnode; bdnode.+(m+1)*(n+1)]] .= 0.0
a = L\rhs
d = td + β*Δt^2*a
v = tv + γ*Δt*a
U[:,i+1] = d
Varepsilon[i+1,:,:] = eval_strain_on_gauss_pts(U[:,i+1], m, n, h)
Sigma[i+1,:,:] = Sigma[i,:,:]*invG/Δt + (Varepsilon[i+1,:,:]-Varepsilon[i,:,:])*(invG*S)
end
matwrite("U.mat", Dict("U"=>U'|>Array))
# visualize_displacement(U, m, n, h; name = "_eta$η", xlim_=[-0.01,0.5], ylim_=[-0.05,0.22])
# visualize_displacement(U, m, n, h; name = "_viscoelasticity")
# visualize_stress(H, U, m, n, h; name = "_viscoelasticity")
close("all")
figure(figsize=(15,5))
subplot(1,3,1)
idx = div(n,2)*(m+1) + m+1
plot((0:NT)*Δt, U[idx,:])
xlabel("time")
ylabel("\$u_x\$")
subplot(1,3,2)
idx = 4*(div(n,2)*m + m)
plot((0:NT)*Δt, Sigma[:,idx,1])
xlabel("time")
ylabel("\$\\sigma_{xx}\$")
subplot(1,3,3)
idx = 4*(div(n,2)*m + m)
plot((0:NT)*Δt, Varepsilon[:,idx,1])
xlabel("time")
ylabel("\$\\varepsilon_{xx}\$")
savefig("visco_eta$η.png")
| [
3500,
5416,
786,
198,
3500,
1215,
37,
368,
198,
3500,
9485,
14134,
198,
3500,
44800,
2348,
29230,
198,
3500,
9485,
43328,
198,
3500,
1338,
17208,
3163,
20477,
198,
3500,
36775,
198,
37659,
796,
12972,
11748,
7203,
77,
32152,
4943,
198,
... | 1.679868 | 1,515 |
<reponame>itsdfish/Minesweeper.jl<filename>src/Game.jl
"""
Cell(;has_mine=false, flagged=false, revealed=false, mine_count=0, idx=(0,0))
Cell represents the state of a cell in Minesweeper
- `has_mine`: true if cell contains a mine
- `flagged`: true if cell has been flagged as having a mine
- `revealed`: true if the cell has been revealed
- `mine_count`: number of mines in adjecent cells
- `idx`: Cartesian indices of cell
"""
mutable struct Cell
has_mine::Bool
flagged::Bool
revealed::Bool
mine_count::Int
idx::CartesianIndex
end
function Cell(;has_mine=false, flagged=false, revealed=false, mine_count=0, idx=(0,0))
coords = CartesianIndex(idx)
return Cell(has_mine, flagged, revealed, mine_count, coords)
end
"""
Game(;dims=(12,12), n_mines=40, mines_flagged=0, mine_detonated=false, trials=0)
Minesweeper game object
- `cells`: an array of cells
- `dims`: a Tuple indicating dimensions of cell array
- `n_mines`: number of mines in the game
- `mines_flagged`: number of mines flagged
- `mine_detonated`: indicates whether a mine has been detonated
- `score`: score for game, which includes hits, misses, false alarms and correct rejections
- `trials`: the number of trials or moves
"""
mutable struct Game{T}
cells::Array{Cell,2}
dims::Tuple{Int,Int}
n_mines::Int
mines_flagged::Int
mine_detonated::Bool
score::T
trials::Int
end
function Game(;dims=(12,12), n_mines=40, mines_flagged=0,
mine_detonated=false, trials=0)
score = (hits=0.0,false_alarms=0.0,misses=0.0,correct_rejections=0.0)
cells = initilize_cells(dims)
add_mines!(cells, n_mines)
mine_count!(cells)
return Game(cells, dims, n_mines, mines_flagged, mine_detonated,
score, trials)
end
function initilize_cells(dims)
return [Cell(idx=(r,c)) for r in 1:dims[1], c in 1:dims[2]]
end
function add_mines!(cells, n_mines)
mines = sample(cells, n_mines, replace=false)
for m in mines
m.has_mine = true
end
return nothing
end
"""
mine_count!(cells)
For each cell, omputes the number of neighboring cells containing a mine.
"""
function mine_count!(cells)
for cell in cells
neighbors = get_neighbors(cells, cell.idx)
cell.mine_count = count(x->x.has_mine, neighbors)
end
return nothing
end
get_neighbors(game::Game, idx) = get_neighbors(game.cells, idx.I...)
get_neighbors(game::Game, cell::Cell) = get_neighbors(game.cells, cell.idx.I...)
get_neighbors(game::Game, x, y) = get_neighbors(game.cells, x, y)
get_neighbors(cells, idx) = get_neighbors(cells, idx.I...)
function get_neighbors(cells, xr, yr)
v = [-1,0,1]
X = xr .+ v; Y = yr .+ v
neighbors = Vector{Cell}()
for y in Y, x in X
(x == xr) && (y == yr) ? (continue) : nothing
in_bounds(cells, x, y) ? push!(neighbors, cells[x,y]) : nothing
end
return neighbors
end
get_neighbor_indices(game::Game, idx) = get_neighbor_indices(game.cells, idx.I...)
get_neighbor_indices(game::Game, x, y) = get_neighbor_indices(game.cells, x, y)
get_neighbor_indices(cells, idx) = get_neighbor_indices(cells, idx.I...)
function get_neighbor_indices(cells, xr, yr)
v = [-1,0,1]
X = xr .+ v; Y = yr .+ v
neighbors = Vector{CartesianIndex}()
for y in Y, x in X
(x == xr) && (y == yr) ? (continue) : nothing
in_bounds(cells, x, y) ? push!(neighbors, CartesianIndex(x,y)) : nothing
end
return neighbors
end
function in_bounds(cells, x, y)
if (x < 1) || (y < 1)
return false
elseif (x > size(cells, 1)) || (y > size(cells, 2))
return false
else
return true
end
end
reveal_zeros!(game::Game, x, y) = reveal_zeros!(game.cells, CartesianIndex(x, y))
reveal_zeros!(game::Game, idx) = reveal_zeros!(game.cells, idx)
function reveal_zeros!(cells, idx)
cells[idx].mine_count ≠ 0 ? (return nothing) : nothing
indices = get_neighbor_indices(cells, idx)
for i in indices
c = cells[i]
if !c.has_mine && !c.revealed
c.revealed = true
c.mine_count == 0 ? reveal_zeros!(cells, i) : nothing
end
end
return nothing
end
reveal(game::Game) = reveal(game.cells::Array{Cell,2})
"""
reveal(cells::Array{Cell,2})
Reveals game state in REPL
"""
function reveal(cells::Array{Cell,2})
s = ""
for x in 1:size(cells,1)
for y in 1:size(cells,2)
c = cells[x,y]
if c.has_mine
s *= string(" ", "💣", " ")
else
s *= string(" ", c.mine_count, " ")
end
end
s *= "\r\n"
end
println(s)
end
Base.show(io::IO, game::Game) = Base.show(io::IO, game.cells)
function Base.show(io::IO, cells::Array{Cell,2})
s = ""
for x in 1:size(cells,1)
for y in 1:size(cells,2)
c = cells[x,y]
if c.flagged
s *= string(" ", "🚩", " ")
continue
end
if c.revealed
if c.has_mine
s *= string(" ", "💣", " ")
continue
end
if c.mine_count == 0
s *= string(" ", "∘", " ")
else
s *= string(" ", c.mine_count, " ")
end
else
s *= string(" ", "■", " ")
end
end
s *= "\r\n"
end
println(s)
end
"""
game_over(game)
Terminates game if mine is detonated or all cells have been revealed or flagged.
"""
function game_over(game)
if game.mine_detonated
return true
end
return all(x->x.revealed || x.flagged, game.cells)
end
"""
compute_score!(game)
Computes hits, false alarms, misses and correct rejections
"""
function compute_score!(game)
cells = game.cells
n_mines = game.n_mines
no_mines = prod(game.dims) - n_mines
hits = count(x->x.flagged && x.has_mine, cells)
misses = n_mines - hits
false_alarms = count(x->x.flagged && !x.has_mine, cells)
cr = no_mines - false_alarms
game.score = (hits=hits,false_alarms=false_alarms,misses=misses,correct_rejections=cr)
return nothing
end
"""
select_cell!(game, x, y)
Select cell given a game and row and column indices.
"""
select_cell!(game, x, y) = select_cell!(game, CartesianIndex(x, y))
"""
select_cell!(game, x, y)
Select cell given a game and cell object.
"""
select_cell!(game, cell::Cell) = select_cell!(game, cell.idx)
"""
select_cell!(game, idx)
Select cell given a game and Cartesian index.
"""
function select_cell!(game, idx)
cell = game.cells[idx]
cell.revealed = true
reveal_zeros!(game, idx)
game.mine_detonated = cell.has_mine
return nothing
end
function flag_cell!(cell, game, gui::Nothing)
cell.flagged = true
game.mines_flagged += 1
return nothing
end
function setup()
println("Select Difficulty")
waiting = true
levels = [20,50,70]
i = 0
while waiting
d = input("1: easy 2: medium, 3: difficult")
i = parse(Int, d)
if i ∉ 1:3
println("Please select 1, 2, or 3")
else
waiting = false
end
end
return Game(;dims=(15,15), n_mines=levels[i])
end
function play()
game = setup()
play(game)
end
"""
play(game)
Play Minesweeper in REPL
Run play() to use with default easy, medium and difficult settings
Pass game object to play custom game
Enter exit to quit
Enter row_index column_index to select cell (e.g. 1 2)
"""
function play(game)
playing = true
coords = fill(0, 2)
println(game)
while playing
str = input("select coordinates")
str == "exit" ? (return) : nothing
try
coords = parse.(Int, split(str, " "))
catch
error("Please give x and y coordinates e.g. 3 2")
end
select_cell!(game, coords...)
if game.mine_detonated
reveal(game)
println("Game Over")
println(" ")
d = input("Press y to play again")
d == "y" ? (game = setup()) : (playing = false)
else
println(game)
end
end
end
function input(prompt)
println(prompt)
return readline()
end
"""
update!(game, gui::Nothing)
`update!` is used to display game state during simulations.
`update!` can be used with REPL or Gtk GUI if imported via import_gui()
"""
function update!(game, gui::Nothing)
println(game)
end
"""
imports Gtk related code for the GUI
"""
function import_gui()
path = @__DIR__
include(path*"/Gtk_Gui.jl")
end
| [
27,
7856,
261,
480,
29,
896,
7568,
680,
14,
44,
1127,
732,
5723,
13,
20362,
27,
34345,
29,
10677,
14,
8777,
13,
20362,
198,
37811,
198,
220,
220,
220,
12440,
7,
26,
10134,
62,
3810,
28,
9562,
11,
34060,
28,
9562,
11,
4602,
28,
9... | 2.217022 | 3,889 |
<filename>bin/run_equilibration.jl
#!/usr/bin/env julia
using DelimitedFiles
using WaterMeanMeanForce
using ArgParse
s = ArgParseSettings()
s.autofix_names = true
models = join(list_models(), ", ")
integrators = join(list_integrators(), ", ")
@add_arg_table! s begin
"--model"
metavar = "M"
range_tester = in(list_models())
help = "water model ($(models))"
required = true
"--integrator"
metavar = "I"
range_tester = in(list_integrators())
help = "molecular dynamics integrator ($(integrators))"
required = true
"-T"
metavar = "T"
help = "temperature (K)"
arg_type = Float64
required = true
"-R"
metavar = "R"
help = "constraint/restraint distance (nm)"
arg_type = Float64
required = true
"--num-links"
metavar = "P"
help = "number of Trotter links"
arg_type = Int
required = true
"--time-step"
metavar = "T"
help = "time step (ps)"
arg_type = Float64
required = true
"--centroid-friction"
metavar = "G"
help = "centroid friction (1 / ps)"
arg_type = Float64
required = true
"--equil-duration"
metavar = "D"
help = "duration of equilibration (ps)"
arg_type = Float64
required = true
"--umbrella-k"
metavar = "K"
help = "umbrella sampling force constant (kJ / nm^2 mol)"
arg_type = Float64
"--initial-configuration"
metavar = "FILE"
help = "path to initial configuration input file"
"--final-configuration"
metavar = "FILE"
help = "path to final configuration output file"
end
c = parse_args(ARGS, s, as_symbols=true)
if !isnothing(c[:initial_configuration])
qs_init = dropdims(readdlm(c[:initial_configuration]); dims=2)
else
qs_init = nothing
end
mdp_equil = MolecularDynamicsParameters(c[:centroid_friction], c[:time_step],
c[:equil_duration])
if isnothing(c[:umbrella_k])
s = run_constrained_equilibration(get_model(c[:model]),
get_integrator(c[:integrator]), 2, c[:T],
c[:R], c[:num_links], mdp_equil; qs_init)
else
s = run_restrained_equilibration(get_model(c[:model]),
get_integrator(c[:integrator]), 2, c[:T],
c[:R], c[:num_links], mdp_equil,
c[:umbrella_k]; qs_init)
end
if !isnothing(c[:final_configuration])
writedlm(c[:final_configuration], s.qs)
end
| [
27,
34345,
29,
8800,
14,
5143,
62,
4853,
22282,
1358,
13,
20362,
198,
2,
48443,
14629,
14,
8800,
14,
24330,
474,
43640,
198,
198,
3500,
4216,
320,
863,
25876,
198,
198,
3500,
5638,
5308,
272,
5308,
272,
10292,
198,
198,
3500,
20559,
... | 1.981495 | 1,351 |
<reponame>langfzac/Limbdark.jl<filename>src/deprecated/integrate_transit_cubature.jl
# This is code for computing a transit model and derivatives integrated over
# a time step, giving the fluence in units of time (since
# flux is normalized to unity).
using Cubature
include("transit_poly_struct.jl")
# Now the version with derivatives:
function integrate_timestep_gradient!(param::Array{T,1},trans::Transit_Struct{T},t1::T,t2::T,tol::T,maxdepth::Int64) where {T <: Real}
function fill_flux!(tmid::T,fmid0::T,trans_mid::Transit_Struct{T}) where {T <: Real}
fmid = Array{T}(6+trans_mid.n)
fmid[1] = fmid0 # flux
fmid[2] = trans_mid.dfdrb[1] # r derivative
fmid[3] = trans_mid.dfdrb[2]/trans_mid.b*param[2]^2*(param[1]-tmid) # t0 derivative
fmid[4] = trans_mid.dfdrb[2]*param[2]/trans_mid.b*(tmid-param[1])^2 # v derivative
fmid[5] = trans_mid.dfdrb[2]*param[3]/trans_mid.b # b0 derivative
@inbounds for i=1:trans_mid.n+1
fmid[5+i] = trans_mid.dfdg[i] # d_i derivatives
end
return fmid
end
function solver(time::T) where {T <: Real}
# This predicts the impact parameter as a function of time in
# terms of the transit_struct.
# For now, transit is approximated as a straight line, where params are given as:
# param[1] = t0 (mid-transit time; units of days/JD)
# param[2] = v/R_* (sky velocity in terms of radius of the star - units are day^{-1}
# param[3] = b (impact parameter at time t0)
return sqrt(param[3]^2+(param[2]*(time-param[1]))^2)
end
# Function to define the vector integration in cubature:
function transit_flux_derivative(tmid::T,fmid::Array{T,1}) where {T <: Real}
trans.b = solver(tmid)
fmid0 = transit_poly_d!(trans)
fmid .= fill_flux!(tmid,fmid0,trans)
# println("b: ",trans.b," f/df: ",fmid)
return
end
fint,ferr = hquadrature(trans.n+6,transit_flux_derivative,t1,t2,abstol=tol)
return fint
end
| [
27,
7856,
261,
480,
29,
17204,
69,
49897,
14,
43,
14107,
21953,
13,
20362,
27,
34345,
29,
10677,
14,
10378,
31023,
14,
18908,
4873,
62,
7645,
270,
62,
66,
549,
1300,
13,
20362,
198,
2,
770,
318,
2438,
329,
14492,
257,
11168,
2746,
... | 2.473203 | 765 |
<gh_stars>1-10
function update_gam!(i::Int, n::Int, j::Int, s::State, c::Constants, d::Data)
k = s.lam[i][n]
if k > 0
z = s.Z[j, k]
logpriorVec = log.(s.eta[z][i, j, :])
loglikeVec = logpdf.(Normal.(mus(z, s, c, d), sqrt(s.sig2[i])), s.y_imputed[i][n, j])
logPostVec = logpriorVec .+ loglikeVec
s.gam[i][n, j] = MCMC.wsample_logprob(logPostVec)
else
s.gam[i][n, j] = 0
end
end
function update_gam!(s::State, c::Constants, d::Data)
for i in 1:d.I
for j in 1:d.J
for n in 1:d.N[i]
update_gam!(i, n, j, s, c, d)
end
end
end
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
8818,
4296,
62,
28483,
0,
7,
72,
3712,
5317,
11,
299,
3712,
5317,
11,
474,
3712,
5317,
11,
264,
3712,
9012,
11,
269,
3712,
34184,
1187,
11,
288,
3712,
6601,
8,
198,
220,
479,
796,
264,
... | 1.835404 | 322 |
using Documenter, Jets, LinearAlgebra, Random
makedocs(
sitename="Jets.jl",
modules=[Jets],
pages = [ "index.md", "reference.md" ]
)
deploydocs(
repo = "github.com/ChevronETC/Jets.jl.git"
)
| [
3500,
16854,
263,
11,
14728,
11,
44800,
2348,
29230,
11,
14534,
198,
198,
76,
4335,
420,
82,
7,
198,
220,
220,
220,
1650,
12453,
2625,
41,
1039,
13,
20362,
1600,
198,
220,
220,
220,
13103,
41888,
41,
1039,
4357,
198,
220,
220,
220,
... | 2.26087 | 92 |
module test_stdlib_statistics
using Statistics, Test
X = rand(5)
@test cor(X, 2X) == 1.0
@test cov(X, 2X) - cov(X, X) == var(X)
end # module test_stdlib_statistics
| [
21412,
1332,
62,
19282,
8019,
62,
14269,
3969,
198,
198,
3500,
14370,
11,
6208,
198,
198,
55,
796,
43720,
7,
20,
8,
198,
198,
31,
9288,
1162,
7,
55,
11,
362,
55,
8,
6624,
352,
13,
15,
198,
31,
9288,
39849,
7,
55,
11,
362,
55,
... | 2.333333 | 72 |
# Dispatchable and non-dispatchable generators
## Expressions
"Curtailed power of a non-dispatchable generator as the difference between its reference power and the generated power."
function expression_gen_curtailment(pm::_PM.AbstractPowerModel; nw::Int=_PM.nw_id_default, report::Bool=true)
pgcurt = _PM.var(pm, nw)[:pgcurt] = Dict{Int,Any}(
i => ndgen["pmax"] - _PM.var(pm,nw,:pg,i) for (i,ndgen) in _PM.ref(pm,nw,:ndgen)
)
if report
_IM.sol_component_fixed(pm, _PM.pm_it_sym, nw, :gen, :pgcurt, _PM.ids(pm, nw, :dgen), 0.0)
_PM.sol_component_value(pm, nw, :gen, :pgcurt, _PM.ids(pm, nw, :ndgen), pgcurt)
end
end
| [
2,
35934,
540,
290,
1729,
12,
6381,
17147,
540,
27298,
628,
198,
2235,
10604,
507,
198,
198,
1,
34,
3325,
6255,
1176,
286,
257,
1729,
12,
6381,
17147,
540,
17301,
355,
262,
3580,
1022,
663,
4941,
1176,
290,
262,
7560,
1176,
526,
198... | 2.256849 | 292 |
# ---
# title: 1545. Find Kth Bit in Nth Binary String
# id: problem1545
# author: <NAME>
# date: 2020-10-31
# difficulty: Medium
# categories: String
# link: <https://leetcode.com/problems/find-kth-bit-in-nth-binary-string/description/>
# hidden: true
# ---
#
# Given two positive integers `n` and `k`, the binary string `Sn` is formed as
# follows:
#
# * `S1 = "0"`
# * `Si = Si-1 + "1" + reverse(invert(Si-1))` for `i > 1`
#
# Where `+` denotes the concatenation operation, `reverse(x)` returns the
# reversed string x, and `invert(x)` inverts all the bits in x (0 changes to 1
# and 1 changes to 0).
#
# For example, the first 4 strings in the above sequence are:
#
# * `S1 = "0"`
# * `S2 = "0 **1** 1"`
# * `S3 = "011 **1** 001"`
# * `S4 = "0111001 **1** 0110001"`
#
# Return _the_ `kth` _bit_ _in_ `Sn`. It is guaranteed that `k` is valid for
# the given `n`.
#
#
#
# **Example 1:**
#
#
#
# Input: n = 3, k = 1
# Output: "0"
# Explanation: S3 is " ** _0_** 111001". The first bit is "0".
#
#
# **Example 2:**
#
#
#
# Input: n = 4, k = 11
# Output: "1"
# Explanation: S4 is "0111001101 ** _1_** 0001". The 11th bit is "1".
#
#
# **Example 3:**
#
#
#
# Input: n = 1, k = 1
# Output: "0"
#
#
# **Example 4:**
#
#
#
# Input: n = 2, k = 3
# Output: "1"
#
#
#
#
# **Constraints:**
#
# * `1 <= n <= 20`
# * `1 <= k <= 2n - 1`
#
#
## @lc code=start
using LeetCode
## add your code here:
## @lc code=end
| [
2,
11420,
198,
2,
3670,
25,
1315,
2231,
13,
9938,
509,
400,
4722,
287,
399,
400,
45755,
10903,
198,
2,
4686,
25,
1917,
1314,
2231,
198,
2,
1772,
25,
1279,
20608,
29,
198,
2,
3128,
25,
12131,
12,
940,
12,
3132,
198,
2,
8722,
25,
... | 2.064257 | 747 |
<filename>src/Helpers.jl
"""copy a FEFunction on a FE Space"""
clone_fe_function(space,f)=FEFunction(space,copy(get_free_dof_values(f)))
"""
Given an arbitrary number of FEFunction arguments,
returns a tuple with their corresponding free DoF Values
"""
function Gridap.get_free_dof_values(functions...)
map(get_free_dof_values,functions)
end
function setup_mixed_spaces(model, order)
reffe_rt = ReferenceFE(raviart_thomas, Float64, order)
V = FESpace(model, reffe_rt ; conformity=:HDiv)
U = TrialFESpace(V)
reffe_lgn = ReferenceFE(lagrangian, Float64, order)
Q = FESpace(model, reffe_lgn; conformity=:L2)
P = TrialFESpace(Q)
reffe_lgn = ReferenceFE(lagrangian, Float64, order+1)
S = FESpace(model, reffe_lgn; conformity=:H1)
R = TrialFESpace(S)
R, S, U, V, P, Q
end
function setup_and_factorize_mass_matrices(dΩ,
R, S, U, V, P, Q;
mass_matrix_solver=BackslashSolver())
amm(a,b) = ∫(a⋅b)dΩ
H1MM = assemble_matrix(amm, R, S)
RTMM = assemble_matrix(amm, U, V)
L2MM = assemble_matrix(amm, P, Q)
ssH1MM = symbolic_setup(mass_matrix_solver,H1MM)
nsH1MM = numerical_setup(ssH1MM,H1MM)
ssRTMM = symbolic_setup(mass_matrix_solver,RTMM)
nsRTMM = numerical_setup(ssRTMM,RTMM)
ssL2MM = symbolic_setup(mass_matrix_solver,L2MM)
nsL2MM = numerical_setup(ssL2MM,L2MM)
H1MM, RTMM, L2MM, nsH1MM, nsRTMM, nsL2MM
end
function new_vtk_step(Ω,file,_cellfields)
n = size(_cellfields)[1]
createvtk(Ω,
file,
cellfields=_cellfields,
nsubcells=n)
end
| [
27,
34345,
29,
10677,
14,
12621,
19276,
13,
20362,
198,
37811,
30073,
257,
18630,
22203,
319,
257,
18630,
4687,
37811,
198,
21018,
62,
5036,
62,
8818,
7,
13200,
11,
69,
47505,
15112,
22203,
7,
13200,
11,
30073,
7,
1136,
62,
5787,
62,
... | 2.129458 | 757 |
<reponame>kdyrhage/GenomicAnnotations.jl
using GenomicAnnotations
using BioSequences
using Test
@testset "GenomicAnnotations" begin
@testset "GenBank parsing" begin
s = " gene 1..1"
@test GenBank.parseposition(s) == (:gene, Locus(1:1, '+'))
s = " gene complement(order(3300..4037,4047..4052))"
@test GenBank.parseposition(s) == (:gene, Locus(3300:4052, '-', true, true, UnitRange{Int}[3300:4037, 4047:4052], false))
chrs = collect(open(GenBank.Reader, "example.gbk"))
@test length(chrs) == 2
@test chrs[2].name == "plasmid1"
end
chr = collect(open(GenBank.Reader, "example.gbk"))[1]
@testset "GFF parsing" begin
open(GFF.Writer, "example.gff") do w
write(w, chr)
end
gff = collect(open(GFF.Reader, "example.gff"))[1]
@test begin
gbkbuf = IOBuffer()
gffbuf = IOBuffer()
print(gbkbuf, chr.genes[2:4])
print(gffbuf, gff.genes[2:4])
String(take!(gbkbuf)) == String(take!(gffbuf))
end
end
@testset "Extended methods" begin
@test length(chr.genes[1]) == length(sequence(chr.genes[1]))
@test length(chr.sequence) == length(sequence(chr))
end
@testset "Gene properties" begin
@test length(propertynames(chr.genes[1])) == 12
@test chr.genes[2].locus_tag == "tag01"
@test (chr.genes[2].locus_tag = "tag01") == "tag01"
@test begin
chr.genes[2].test = "New column"
chr.genes[2].test == "New column" && all(ismissing, chr.genes[[1,3,4,5,6]].test)
end
@test get(chr.genes[1], :locus_tag, "") == ""
@test get(chr.genes[2], :locus_tag, "") == "tag01"
@test get(chr.genes[2], :locustag, "") == ""
@test begin
GenomicAnnotations.pushproperty!(chr.genes[2], :db_xref, "GI:123")
chr.genes[2].db_xref == ["GI:1293614", "GI:123"]
end
@test GenomicAnnotations.vectorise(Union{Missing,Int}[1,1,1]) == [[1],[1],[1]]
end
@testset "Iteration" begin
@test length([g.locus_tag for g in chr.genes]) == 7
end
@testset "Adding/removing genes" begin
addgene!(chr, :CDS, Locus(300:390, '+'), locus_tag = "tag04")
@test chr.genes[end].locus_tag == "tag04"
delete!(chr.genes[end])
@test chr.genes[end].locus_tag == "reg01"
end
@testset "@genes" begin
@test length(union(@genes(chr, CDS), @genes(chr, !CDS))) == length(chr.genes)
@test length(intersect(@genes(chr, CDS), @genes(chr, !CDS))) == 0
@test length(union(@genes(chr, gene), @genes(chr, !gene))) == length(chr.genes)
@test length(intersect(@genes(chr, gene), @genes(chr, !gene))) == 0
@test length(@genes(chr)) == length(chr.genes)
@test @genes(chr, feature(gene) == $:CDS) == chr.genes[[2,4,6]]
@test @genes(chr, feature(gene) == $:CDS) == @genes(chr, CDS)
@test @genes(chr, iscomplement(gene)) == chr.genes[[5,6,7]]
@test @genes(chr, feature(gene) == $:CDS, !iscomplement(gene)) == chr.genes[[2,4]]
@test @genes(chr, length(gene) < 300)[1] == chr.genes[2]
@test length(@genes(chr, get(gene, :locus_tag, "") == "")) == 3
gene = chr.genes[3]
@test @genes(chr, gene == $gene)[1] == chr.genes[3]
d = Dict(:a => "tag01")
@test @genes(chr, :locus_tag == d[$:a]) == @genes(chr, :locus_tag == "tag01")
end
@testset "Broadcast" begin
# Broadcasted assignment on existing property
chr.genes[3:4].gene .= "AXL2P"
@test all(chr.genes[3:4].gene .== "AXL2P")
# Broadcasted assignment on previously missing property
chr.genes[3:4].newproperty .= true
@test all(chr.genes[3:4].newproperty .== true)
# Broadcasted assignment with @genes
@genes(chr, gene).newproperty .= false
@test all(chr.genes[[3,5]].newproperty .== false)
end
@testset "Locus" begin
loc = Locus(1:1, '.', true, true, UnitRange{Int}[], false)
@test Locus() == loc
@test locus(chr.genes[2]) < locus(chr.genes[4])
@test locus(chr.genes[2]) == locus(chr.genes[2])
@test iscomplement(chr.genes[2]) == false
@test iscomplement(chr.genes[5]) == true
end
seq = dna"atgtccatatacaacggtatctccacctcaggtttagatctcaacaacggaaccattgccgacatgagacagttaggtatcgtcgagagttacaagctaaaacgagcagtagtcagctctgcatctgaagccgctgaagttctactaagggtggataacatcatccgtgcaagaccaagaaccgccaatagacaacatatgtaa"
@test sequence(chr.genes[2]) == seq
@test length(chr.genes[2]) == length(seq)
@testset "Empty Record" begin
chr = GenBank.Record()
@test chr.name == ""
@test chr.sequence == dna""
@test chr.header == ""
@test chr.genes == Gene[]
@test names(chr.genedata) == []
end
end
| [
27,
7856,
261,
480,
29,
74,
67,
2417,
71,
496,
14,
13746,
10179,
2025,
30078,
13,
20362,
198,
3500,
5215,
10179,
2025,
30078,
198,
3500,
16024,
44015,
3007,
198,
3500,
6208,
198,
198,
31,
9288,
2617,
366,
13746,
10179,
2025,
30078,
1,... | 1.961924 | 2,495 |
"""
CHELSA{BioClim} <: RasterDataSource
Data from CHELSA, currently only the `BioClim` layer is implemented.
See: [chelsa-climate.org](https://chelsa-climate.org/)
"""
struct CHELSA{X} <: RasterDataSource end
layers(::Type{CHELSA{BioClim}}) = 1:19
"""
getraster(T::Type{CHELSA{BioClim}}, [layer::Integer]) => String
Download CHELSA BioClim data, choosing layers from: `$(layers(CHELSA{BioClim}))`.
Without a layer argument, all layers will be downloaded, and a tuple of paths is returned.
If the data is already downloaded the path will be returned.
"""
function getraster(T::Type{CHELSA{BioClim}}, layer::Integer)
_check_layer(T, layer)
path = rasterpath(T, layer)
url = rasterurl(T, layer)
return _maybe_download(url, path)
end
rastername(::Type{CHELSA{BioClim}}, layer::Integer) = "CHELSA_bio10_$(lpad(layer, 2, "0")).tif"
rasterpath(::Type{CHELSA}) = joinpath(rasterpath(), "CHELSA")
rasterpath(::Type{CHELSA{BioClim}}) = joinpath(rasterpath(CHELSA), "BioClim")
rasterpath(T::Type{CHELSA{BioClim}}, layer::Integer) = joinpath(rasterpath(T), rastername(T, layer))
# https://os.zhdk.cloud.switch.ch/envicloud/chelsa/chelsa_V1/climatologies/bio/CHELSA_prec_01_V1.2_land.tif
rasterurl(::Type{CHELSA}) = URI(scheme="https", host="os.zhdk.cloud.switch.ch", path="/envicloud/chelsa/chelsa_V1/")
rasterurl(::Type{CHELSA{BioClim}}) = joinpath(rasterurl(CHELSA), "climatologies/bio/")
rasterurl(T::Type{CHELSA{BioClim}}, layer::Integer) = joinpath(rasterurl(T), rastername(T, layer))
| [
37811,
198,
220,
220,
220,
5870,
3698,
4090,
90,
42787,
34,
2475,
92,
1279,
25,
371,
1603,
6601,
7416,
198,
198,
6601,
422,
5870,
3698,
4090,
11,
3058,
691,
262,
4600,
42787,
34,
2475,
63,
7679,
318,
9177,
13,
198,
198,
6214,
25,
... | 2.458537 | 615 |
if basename(pwd()) == "aoc"
cd("2018/10")
end
using OffsetArrays
mutable struct LightPoint
position::CartesianIndex{2}
velocity::CartesianIndex{2}
end
struct LightField
points
end
function Base.size(lightfield::LightField)
Tuple(-(reverse(extrema(getfield.(lightfield.points, :position)))...))
end
Base.length(lightfield::LightField) = prod(size(lightfield))
function Base.in(position::CartesianIndex, lightfield::LightField)
any(point.position == position for point in lightfield.points)
end
function loadpoints(filename::AbstractString)
LightField([
begin
a, b, c, d = parse.(Int, m.match for m in eachmatch(r"-?\d+", line))
LightPoint(CartesianIndex(a, b), CartesianIndex(c, d))
end
for line in eachline(filename)
])
end
function step!(lightfield::LightField; rev = false)
foreach(
if rev
point -> point.position -= point.velocity
else
point -> point.position += point.velocity
end,
lightfield.points
)
lightfield
end
function Base.show(io::IO, lightfield::LightField)
println("LightField with ", length(lightfield.points), " points:")
for col in eachcol(range(extrema(getfield.(lightfield.points, :position))...))
println(io, " ", (p in lightfield ? '#' : ' ' for p in col)...)
end
end
part1(filename::AbstractString) = part1(loadpoints(filename))
function part1(lightfield::LightField)
len = length(lightfield)
minlen = len + 1
steps = -1
while minlen > len
minlen = len
len = length(step!(lightfield))
steps += 1
end
step!(lightfield, rev = true), steps
end
| [
361,
1615,
12453,
7,
79,
16993,
28955,
6624,
366,
64,
420,
1,
198,
220,
220,
220,
22927,
7203,
7908,
14,
940,
4943,
198,
437,
198,
3500,
3242,
2617,
3163,
20477,
198,
198,
76,
18187,
2878,
4401,
12727,
198,
220,
220,
220,
2292,
3712... | 2.526158 | 669 |
<filename>docs/make.jl
using memoJuliaClassJa
using Documenter
## Adopted from docs/make.jl in Documenter.jl
# The DOCSARGS environment variable can be used to pass additional arguments to make.jl.
# This is useful on CI, if you need to change the behavior of the build slightly but you
# can not change the .travis.yml or make.jl scripts any more (e.g. for a tag build).
if haskey(ENV, "DOCSARGS")
for arg in split(ENV["DOCSARGS"])
(arg in ARGS) || push!(ARGS, arg)
end
end
makedocs(
modules=[memoJuliaClassJa],
authors="<NAME> <<EMAIL>>",
clean = false,
sitename="memoJuliaClassJa.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://hsugawa8651.github.io/memoJuliaClassJa.jl/stable/",
assets=String[],
),
linkcheck = !("skiplinks" in ARGS),
linkcheck_ignore = [
# timelessrepo.com seems to 404 on any CURL request...
"http://timelessrepo.com/json-isnt-a-javascript-subset",
# We'll ignore links that point to GitHub's edit pages, as they redirect to the
# login screen and cause a warning:
r"https://github.com/([A-Za-z0-9_.-]+)/([A-Za-z0-9_.-]+)/edit(.*)",
] ∪ (get(ENV, "GITHUB_ACTIONS", nothing) == "true" ? [
# Extra ones we ignore only on CI.
#
# It seems that CTAN blocks GitHub Actions?
"https://ctan.org/pkg/minted",
] : []),
pages= [
"Home" => "index.md",
"LICENSE.md",
"LICENSEja.md",
"ch00.md",
"ch01.md",
"ch02.md",
"ch03.md",
"ch04.md",
"ch05.md",
"ch06.md",
"ch07.md",
"ch08.md",
"ch09.md",
"ch10.md",
"ch11.md",
"ch12.md",
"ch13.md",
"porting.md"
],
# strict = !("strict=false" in ARGS),
doctest = ("doctest=only" in ARGS) ? :only : true,
)
deploydocs(
repo="github.com/hsugawa8651/memoJuliaClassJa.jl.git",
devbranch="develop",
push_preview = true,
)
| [
27,
34345,
29,
31628,
14,
15883,
13,
20362,
198,
3500,
16155,
16980,
544,
9487,
33186,
198,
3500,
16854,
263,
198,
198,
2235,
1215,
45256,
422,
34165,
14,
15883,
13,
20362,
287,
16854,
263,
13,
20362,
198,
2,
383,
37760,
50,
1503,
143... | 2.22659 | 865 |
<filename>test/solutions/test_functions.jl<gh_stars>1-10
function fx(t, x, fx)
fx .= x
end
function fq(t, q, p, fq)
fq .= q
end
function fp(t, q, p, fp)
fp .= q.^2
end
function fϕ(t, x, fϕ)
fϕ .= 0
end
function gx(t, x, λ, fx)
fx .= x
end
function gq(t, q, p, λ, fλ)
fλ .= q
end
function gp(t, q, p, λ, fλ)
fλ .= q.^2
end
function gϕ(t, q, p, gϕ)
gϕ .= p - q.^2
end
| [
27,
34345,
29,
9288,
14,
82,
14191,
14,
9288,
62,
12543,
2733,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
198,
8818,
277,
87,
7,
83,
11,
2124,
11,
277,
87,
8,
198,
220,
220,
220,
277,
87,
764,
28,
2124,
198,
437,
1... | 1.624 | 250 |
using CompilerOptions
using Base.Test
JLOptions()
| [
3500,
3082,
5329,
29046,
198,
3500,
7308,
13,
14402,
198,
198,
41,
21982,
8544,
3419,
198
] | 3.1875 | 16 |
<filename>src/datasets/ucr_datasets.jl<gh_stars>10-100
# using DelimitedFiles
# const UCR_DATASETS = """
# 50words,
# Adiac,
# ArrowHead,
# Beef,
# BeetleFly,
# BirdChicken,
# CBF,
# Car,
# ChlorineConcentration,
# CinC_ECG_torso,
# Coffee,
# Computers,
# Cricket_X,
# Cricket_Y,
# Cricket_Z,
# DiatomSizeReduction,
# DistalPhalanxOutlineAgeGroup,
# DistalPhalanxOutlineCorrect,
# DistalPhalanxTW,
# ECG200,
# ECG5000,
# ECGFiveDays,
# Earthquakes,
# ElectricDevices,
# FISH,
# FaceAll,
# FaceFour,
# FacesUCR,
# FordA,
# FordB,
# Gun_Point,
# Ham,
# HandOutlines,
# Haptics,
# Herring,
# InlineSkate,
# InsectWingbeatSound,
# ItalyPowerDemand,
# LargeKitchenAppliances,
# Lighting2,
# Lighting7,
# MALLAT,
# Meat,
# MedicalImages,
# MiddlePhalanxOutlineAgeGroup,
# MiddlePhalanxOutlineCorrect,
# MiddlePhalanxTW,
# MoteStrain,
# NonInvasiveFatalECG_Thorax1,
# NonInvasiveFatalECG_Thorax2,
# OSULeaf,
# OliveOil,
# PhalangesOutlinesCorrect,
# Phoneme,
# Plane,
# ProximalPhalanxOutlineAgeGroup,
# ProximalPhalanxOutlineCorrect,
# ProximalPhalanxTW,
# RefrigerationDevices,
# ScreenType,
# ShapeletSim,
# ShapesAll,
# SmallKitchenAppliances,
# SonyAIBORobotSurface,
# SonyAIBORobotSurfaceII,
# StarLightCurves,
# Strawberry,
# SwedishLeaf,
# Symbols,
# ToeSegmentation1,
# ToeSegmentation2,
# Trace,
# TwoLeadECG,
# Two_Patterns,
# UWaveGestureLibraryAll,
# Wine,
# WordsSynonyms,
# Worms,
# WormsTwoClass,
# synthetic_control,
# uWaveGestureLibrary_X,
# uWaveGestureLibrary_Y,
# uWaveGestureLibrary_Z,
# wafer,
# yoga
# """
#
# """
# DynamicAxisWarping.download_data([path="/DynamicAxisWarping/data/"])
#
# Downloads the UC Riverside Time Series Classification Archive to
# the specified path.
# """
# function download_ucr()
#
# UCR_URL = "http://www.cs.ucr.edu/~eamonn/time_series_data/UCR_TS_Archive_2015.zip"
#
# # the password is "<PASSWORD>"
# @info(
# """
# A simple password is required to extract the UCR Time Series Classification. While the
# data is downloading, please read the following paper
#
# <NAME>, <NAME>, <NAME>: Time Series Classification under More Realistic
# Assumptions. SDM 2013: 578-586.
# http://dx.doi.org/10.1137/1.9781611972832.64
#
# Find the following sentence (it's near the end of the first page):
#
# “Every item that we ******* ## @@@@@@@ belongs to exactly one of our well defined classes”
#
# Enter the three redacted words (without spaces) to extract the data. The purpose of this
# exercise is to encourage you to read the associated paper, see:
#
# http://www.cs.ucr.edu/~eamonn/time_series_data/
#
# """
# )
#
# download(UCR_URL, "$DATAPATH/UCR.zip")
#
# # should unzip the file correctly in platform-specific manner
# run(unpack_cmd("UCR", DATAPATH , ".zip", ""))
#
# # delete the zip file when we're done
# rm("$DATAPATH/UCR.zip")
#
# @info("Download and extraction successful!")
#
# end
#
# """
# data,labels = DynamicAxisWarping.traindata(name)
#
# Loads the training set of the specified dataset. Returns a matrix `data` where each column
# holds a 1-dimensional time series. The class label for each column is held `labels`
# which is a vector of length `size(data,2)`.
#
# Available datasets:
#
# $UCR_DATASETS
# """
# function ucr_traindata(name::AbstractString)
# try
# Y = readdlm(DATAPATH*"/UCR_TS_Archive_2015/"*name*"/"*name*"_TRAIN", ',')
# labels = round(Int,vec(Y[:,1]))
# data = transpose(Y[:,2:end])
# return data,labels
# catch err
# showerror(stdout, err, backtrace());println()
# @info("You may have recieved this error because you haven't downloaded the database yet.")
# @info("Try running download_ucr() first.")
# @info("You may also have mispelled the name of the dataset.")
# end
# end
#
# """
# data,labels = DynamicAxisWarping.traindata(name)
#
# Loads the test set of the specified dataset. Returns a matrix `data` where each column
# holds a 1-dimensional time series. The class label for each column is held `labels`
# which is a vector of length `size(data,2)`.
#
# Available datasets:
#
# $UCR_DATASETS
# """
# function ucr_testdata(name::AbstractString)
# try
# Y = return readcsv(DATAPATH*"/UCR_TS_Archive_2015/"*name*"/"*name*"_TEST")
# labels = round(Int,vec(Y[:,1]))
# data = transpose(Y[:,2:end])
# return data,labels
# catch err
# showerror(stdout, err, backtrace());println()
# @info("You may have recieved this error because you haven't downloaded the database yet.")
# @info("Try running download_ucr() first.")
# @info("You may also have mispelled the name of the dataset.")
# end
# end
| [
27,
34345,
29,
10677,
14,
19608,
292,
1039,
14,
1229,
81,
62,
19608,
292,
1039,
13,
20362,
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
2,
1262,
4216,
320,
863,
25876,
198,
2,
1500,
471,
9419,
62,
35,
1404,
1921,
32716,
796,
37227,... | 2.472567 | 1,932 |
<reponame>mateuszbaran/FunManifolds.jl<gh_stars>1-10
"""
CurveWarpingSRSFSpace(knots, quadweights = get_quad_weights(knots))
Space of SRSFs of curve warpings on knots `knots`. Points are represented by vectors
in ambient space (ℝ^(N+1) if there are `N` knots). Given quadrature weights are used
to compute inner products.
"""
struct CurveWarpingSRSFSpace{TK<:AbstractVector,TW<:AbstractVector} <: AbstractManifold{ℝ}
knots::TK
quadweights::TW
end
function CurveWarpingSRSFSpace(knots::AbstractVector)
ws = get_quad_weights(knots)
return CurveWarpingSRSFSpace(knots, ws)
end
function manifold_dimension(M::CurveWarpingSRSFSpace)
return length(M.knots)
end
function representation_size(M::CurveWarpingSRSFSpace)
return (length(M.knots) + 1,)
end
function isapprox(M::CurveWarpingSRSFSpace, p, q; kwargs...)
return isapprox(distance(M, p, q), 0; kwargs...)
end
function isapprox(M::CurveWarpingSRSFSpace, p, X, Y; kwargs...)
return isapprox(X, Y; kwargs...)
end
"""
inner_ambient(M::CurveWarpingSRSFSpace, p, q)
Inner product in ambient space of points `p` and `q` on `M`. Takes into account quadrature
weights.
"""
function inner_ambient(M::CurveWarpingSRSFSpace, p, q)
return sum(M.quadweights .* p .* q)
end
function distance(M::CurveWarpingSRSFSpace, p, q)
pmq = p - q
ppq = p + q
return 2 * atan(sqrt(inner_ambient(M, pmq, pmq)), sqrt(inner_ambient(M, ppq, ppq)))
end
function embed!(::CurveWarpingSRSFSpace, q, p)
copyto!(q, p)
return q
end
function embed!(M::CurveWarpingSRSFSpace, Y, p, X)
copyto!(Y, X)
return Y
end
function project!(M::CurveWarpingSRSFSpace, q, p)
scale = sqrt(inner_ambient(M, p, p))
q .= p ./ scale
return q
end
function project!(M::CurveWarpingSRSFSpace, Y, p, X)
Y .= X .- inner_ambient(M, p, X) .* p
return Y
end
function get_quad_weights(nodes::AbstractRange)
n = length(nodes)
dt = 1 / (n - 1)
return dt / 2 .* [1; [2 for i in 1:(n - 2)]; 1]
# not that precise for some reason
#=if n % 2 == 1
# Simpson's rule
return dt / 3 .* [1; [i % 2 == 0 ? 2 : 4 for i in 1:(n - 2)]; 1]
else
# trapezoidal rule
return dt / 2 .* [1; [2 for i in 1:(n - 2)]; 1]
end=#
# Simpson's 3/8
#n%3 == 1 || error("argument mod 3 must be 1")
#return 3dt/8 .* [1; [i%3==0 ? 2 : 3 for i in 1:n-2]; 1]
end
function exp!(M::CurveWarpingSRSFSpace, q, p, X)
θ = norm(M, p, X)
q .= cos(θ) .* p .+ Manifolds.usinc(θ) .* X
return q
end
function log!(M::CurveWarpingSRSFSpace, X, p, q)
cosθ = clamp(inner_ambient(M, p, q), -1, 1)
θ = acos(cosθ)
X .= (q .- cosθ .* p) ./ Manifolds.usinc(θ)
return project!(M, X, p, X)
end
function inner(M::CurveWarpingSRSFSpace, p, X, Y)
return inner_ambient(M, X, Y)
end
function vector_transport_to!(M::CurveWarpingSRSFSpace, Y, p, X, q, ::ParallelTransport)
copyto!(Y, X)
X_pq = log(M, p, q)
X1 = norm(M, p, X_pq)
if X1 > 0
sum_pq = p + q
factor = 2 * inner_ambient(M, X, q) / inner_ambient(M, sum_pq, sum_pq)
Y .-= factor .* sum_pq
end
return Y
end
function zero_vector!(M::CurveWarpingSRSFSpace, X, p)
fill!(X, 0)
return X
end
const CurveWarpingSRSFGroup{TCWS<:CurveWarpingSRSFSpace} =
GroupManifold{ℝ,TCWS,WarpingCompositionOperation}
function CurveWarpingSRSFGroup(cws::CurveWarpingSRSFSpace)
return GroupManifold(cws, WarpingCompositionOperation())
end
function identity_element(cwg::CurveWarpingSRSFGroup)
return ones(length(cwg.manifold.knots))
end
function identity_element(cwg::CurveWarpingSRSFGroup, a)
return ones(eltype(a), length(cwg.manifold.knots))
end
function inv(cwg::CurveWarpingSRSFGroup, p)
cws = CurveWarpingSpace(cwg.manifold.knots)
pinv = inv(CurveWarpingGroup(cws), reverse_srsf(cws, p))
return srsf(cws, pinv)
end
inv(::CurveWarpingSRSFGroup, p::Identity{WarpingCompositionOperation}) = p
function compose(cwg::CurveWarpingSRSFGroup, p1, p2)
knots = cwg.manifold.knots
cws = CurveWarpingSpace(knots)
p1inv = reverse_srsf(cws, p1)
p2w = make_warping(cws, p2)
p2warped = map(t -> p2w(p1inv(t)), knots)
return p2warped .* p1
end
function compose(cwg::CurveWarpingSRSFGroup, p1::Identity{WarpingCompositionOperation}, p2)
return p2
end
function compose(cwg::CurveWarpingSRSFGroup, p1, p2::Identity{WarpingCompositionOperation})
return p1
end
function compose(
cwg::CurveWarpingSRSFGroup,
p1::Identity{WarpingCompositionOperation},
p2::Identity{WarpingCompositionOperation},
)
return p1
end
"""
CurveWarpingSRSFAction(M::AbstractManifold, p, cwg::CurveWarpingSRSFGroup)
Space of left actions of the group of SRSFs of curve warpings `cwg` on the manifold `M`
of TSRVFs of curves at point `p`.
"""
struct CurveWarpingSRSFAction{TM<:AbstractManifold,TP,TCWG<:CurveWarpingSRSFGroup} <:
AbstractGroupAction{LeftAction}
manifold::TM
point::TP
cwg::TCWG
end
function Manifolds.g_manifold(A::CurveWarpingSRSFAction)
return A.manifold
end
function Manifolds.base_group(A::CurveWarpingSRSFAction)
return A.cwg
end
function apply!(A::CurveWarpingSRSFAction{<:DiscretizedCurves}, q, a, p)
itp = make_interpolant(A.manifold, p)
a_rev = reverse_srsf(CurveWarpingSpace(A.cwg.manifold.knots), a)
ts = map(a_rev, A.cwg.manifold.knots)
rep_size = representation_size(A.manifold.manifold)
for (i, t) in zip(get_iterator(A.manifold), ts)
copyto!(_write(A.manifold, rep_size, q, i), itp(t) * a[i])
end
return q
end
function apply!(
A::CurveWarpingSRSFAction{<:DiscretizedCurves},
q,
::Identity{WarpingCompositionOperation},
p,
)
copyto!(q, p)
return q
end
function Manifolds.inverse_apply(A::CurveWarpingSRSFAction, a, p)
inva = inv(base_group(A), a)
return apply(A, inva, p)
end
function optimal_alignment(A::CurveWarpingSRSFAction, p, q)
M = A.manifold
return pairwise_optimal_warping(M, M, p, q, A.point)[1]
end
"""
karcher_mean_amplitude(A::CurveWarpingSRSFAction, ps::Vector)
Calculate the Karcher mean of amplitudes of given functions `ps` in SRVF form under
the action `A`.
Roughly follows Algorithm 2 from https://arxiv.org/abs/1103.3817.
"""
function karcher_mean_amplitude(
A::CurveWarpingSRSFAction,
ps::Vector;
throw_on_divergence=false,
progress_update=(x...) -> nothing,
max_iter=100,
)
M = A.manifold
N = length(ps)
cur_ps = ps
cur_increment = 42.0
prev_increment = 42.0
num_iterations = 0
meanp = project(M, (1.0 / N) * (sum(embed(M, p) for p in cur_ps)))
# find intial candidate for the mean
min_i = 1
norm_min_i = Inf
for i in 1:N
cur_norm = distance(M, meanp, cur_ps[i])
if cur_norm < norm_min_i
min_i = i
norm_min_i = cur_norm
end
end
curμp = cur_ps[min_i]
initial_increment = cur_increment
while cur_increment > 1e-6 && num_iterations < max_iter
warpings = [pairwise_optimal_warping(M, M, curμp, p, A.point) for p in cur_ps]
aligned_ps = [apply(A, warpings[i][1], cur_ps[i]) for i in 1:N]
mean_aligned_ps = project(M, (1.0 / N) * (sum(embed(M, p) for p in aligned_ps)))
prev_increment = cur_increment
cur_increment = distance(M, mean_aligned_ps, curμp)
progress_update(cur_increment, num_iterations, max_iter)
if num_iterations == 0
initial_increment = cur_increment
elseif cur_increment > 1.5 * prev_increment
if throw_on_divergence
error("Divergent mean amplitude calculation.")
else
break
end
end
cur_ps = aligned_ps
curμp = mean_aligned_ps
num_iterations += 1
end
progress_update(:finish)
return curμp
end
"""
phase_amplitude_separation(A::CurveWarpingSRSFAction, ps::Vector)
Perform phase-amplitude separation of given functions in SRVF form.
Returns mean amplitude, phases and amplitudes.
Implements alignment from Section 3.4 of https://arxiv.org/abs/1103.3817.
"""
function phase_amplitude_separation(A::CurveWarpingSRSFAction, ps::Vector)
μp = karcher_mean_amplitude(A, ps)
# TODO: use other mean functions?
a = center_of_orbit(A, ps, μp)
M = A.manifold
p̃ = apply(A, a, μp)
γs = [pairwise_optimal_warping(M, M, p̃, p, A.point)[1] for p in ps]
aligned_ps = [apply(A, γs[i], ps[i]) for i in 1:length(ps)]
return (p̃, γs, aligned_ps)
end
| [
27,
7856,
261,
480,
29,
9830,
385,
89,
5657,
272,
14,
24629,
5124,
361,
10119,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
37811,
198,
220,
220,
220,
46300,
54,
5117,
278,
50,
6998,
37,
14106,
7,
74,
1662,
82,
11,
15094... | 2.167429 | 3,936 |
<gh_stars>1-10
module CanvasWebIO
using WebIO, JSExpr, Observables
export Canvas, addmovable!, addclickable!, addstatic!
mutable struct Canvas
w::WebIO.Scope
size::Array{Int64, 1}
objects::Array{WebIO.Node, 1}
getter::Dict
id::String
handler::Observables.Observable
selection::Observables.Observable
synced::Bool # synced=true => julia listeners called on mousemove, not just drop
end
function Canvas(size::Array{Int64,1}, synced=false)
w = Scope(imports=["/pkg/CanvasWebIO/helpers.js"])
handler = Observable(w, "handler", ["id", 0, 0])
selection = Observable(w, "selection", "id")
getter = Dict()
id = WebIO.newid("canvas")
on(selection) do val
val
end
on(handler) do val
selection[] = val[1]
if val[1] in keys(getter)
getter[val[1]][] = Int.(floor.(val[2:3]))
else
println("Failed to assign value $(val[2:3]) to $(val[1])")
end
end
Canvas(w, size, Array{WebIO.Node,1}(), getter, id, handler, selection, synced)
end
function Canvas()
Canvas([800,800])
end
function Canvas(synced::Bool)
Canvas([800,800], synced)
end
function Base.getindex(canvas::Canvas, i)
canvas.getter[i]
end
function (canvas::Canvas)()
# js function setp sets the position of the object named name to the position of the mouse
# returns the [draggable, xpos, ypos] where draggable is whether the object was movable,
# and xpos,ypos the new position of the object
#
# Transform parser from https://stackoverflow.com/a/17838403
canvas_events = Dict()
handler = canvas.handler
synced = canvas.synced
canvas_events["mousemove"] = @js function(event)
event.preventDefault()
event.stopPropagation()
@var name = document.getElementById($(canvas.id)).getAttribute("data-selected")
@var pos
if name!=""
pos = setp(event, name)
if(pos[0]) #is dragged
document.getElementById($(canvas.id)).setAttribute("is-dragged", true)
if($synced)
$handler[] = [name, Math.round(pos[1]), Math.round(pos[2])]
end
end
end
end
canvas_events["mouseup"] = @js function(event)
event.preventDefault()
event.stopPropagation()
console.log("canvas click")
@var name = document.getElementById($(canvas.id)).getAttribute("data-selected")
@var pos
if name!=""
pos = setp(event, name)
if document.getElementById($(canvas.id)).getAttribute("is-dragged")=="true"
$handler[] = [name, parseFloat(pos[1]), parseFloat(pos[2])]
document.getElementById(name).style.stroke = "none"
document.getElementById($(canvas.id)).setAttribute("data-selected", "")
document.getElementById($(canvas.id)).setAttribute("is-dragged", false)
end
end
end
canvas.w(dom"svg:svg[id = $(canvas.id),
height = $(canvas.size[1]),
width = $(canvas.size[2])]"(
canvas.objects...,
attributes = Dict("data-selected" => "",
"is-dragged" => false),
events = canvas_events))
end
"""
addclickable!(canvas::Canvas, svg::WebIO.Node)
Adds a clickable (as in, can be clicked but not moved) object to the canvas based on the svg template. If the template has an id, this will be given to the canvas object, and the object will be associated with the id as a string (canvas[id] accesses the associated observable etc). If the template has no id, one will be generated. Note that the stroke property will be overwritten.
"""
function addclickable!(canvas::Canvas, svg::WebIO.Node)
attr = svg.props[:attributes]
children = svg.children
if "id" in keys(attr)
id = attr["id"]
else
id = WebIO.newid("svg")
end
selection = canvas.selection
clickable_events = Dict()
clickable_events["click"] = @js function(event)
name = document.getElementById($(canvas.id)).getAttribute("data-selected")
#selected_obj
if name == this.id
this.style.stroke = "none"
document.getElementById($(canvas.id)).setAttribute("data-selected", "")
else
if name != ""
selected_obj = document.getElementById(name)
selected_obj.style.stroke = "none"
end
this.style.stroke = "green" #Change this later
this.style.strokeWidth = 2 #Change this later
document.getElementById($(canvas.id)).setAttribute("data-selected", this.id)
$selection[] = this.id
end
end
push!(canvas.objects,
Node(svg.instanceof, children..., attributes=attr, events=clickable_events))
end
"""
addmovable!(canvas::Canvas, svg::WebIO.Node, lock=" ")
Adds a movable object to the canvas based on the svg template. If the template has an id, this will be given to the canvas object, and the object will be associated with the id as a string (canvas[id] accesses the associated observable etc). If the template has no id, one will be generated. Note that the stroke property will be overwritten.
The optional lock argument allows locking of an axis. Setting lock="x" will lock the movable's x value, so it can only be moved up and down. Similarly, lock="y" will only permit movements to the left and right.
"""
function addmovable!(canvas::Canvas, svg::WebIO.Node, lock=" ")
attr = svg.props[:attributes]
if svg.instanceof.tag!=:g
newattr = Dict()
if "id" in keys(attr)
newattr["id"] = attr["id"]
else
newattr["id"] = WebIO.newid("svg")
end
attr["id"] = WebIO.newid("subsvg")
if haskey(attr, "x") && haskey(attr, "y") #Rectangle etc
newattr["transform"] = "translate($(attr["x"]),$(attr["y"]))"
attr["x"] = "$(-parse(attr["width"])/2)"
attr["y"] = "$(-parse(attr["height"])/2)"
elseif haskey(attr, "cx") && haskey(attr, "cy") #Circle
newattr["transform"] = "translate($(attr["cx"]),$(attr["cy"]))"
attr["cx"] = "0.0"
attr["cy"] = "0.0"
else
newattr["transform"] = "translate($(attr["cx"]),$(attr["cy"]))" #undefined object
end
return addmovable!(canvas, dom"svg:g"(svg, attributes=newattr), lock)
end
if :style in keys(svg.props)
style = svg.props[:style]
else
style = Dict()
end
children = svg.children
if "id" in keys(attr)
id = attr["id"]
else
id = WebIO.newid("svg")
attr["id"] = id
end
attr["data-lock"] = lock
if svg.instanceof.tag==:g
coo = [0.0, 0.0]
try
coo .= parse.(Float64, match(r"translate\((.*?),(.*?)\)",
attr["transform"]).captures)
catch
println("Failed to get position of $id, setting default")
end
pos = Observable(canvas.w, id, coo)
else
error("Only <g> objects allowed")
end
push!(pos.listeners, (x)->(x))
canvas.getter[id] = pos
handler = canvas.handler
attr["draggable"] = "true"
style[:cursor] = "move"
movable_events = Dict()
movable_events["mousedown"] = @js function(event)
event.stopPropagation()
event.preventDefault()
console.log("clicking", this.id)
@var name = document.getElementById($(canvas.id)).getAttribute("data-selected")
@var pos
if name == ""
this.style.stroke = "red" #Change this later
this.style.strokeWidth = 2 #Change this later
document.getElementById($(canvas.id)).setAttribute("data-selected", this.id)
else
selected_obj = document.getElementById(name)
selected_obj.style.stroke = "none"
pos = setp(event,name)
if(pos[0]) #is dragged
$handler[] = [name, pos[1], pos[2]]
end
document.getElementById($(canvas.id)).setAttribute("data-selected", "")
document.getElementById($(canvas.id)).setAttribute("is-dragged", false)
end
end
node = Node(svg.instanceof, children..., attributes=attr, style=style, events=movable_events)
push!(canvas.objects, node)
node
end
"""
addstatic!(canvas::Canvas, svg::WebIO.Node)
Adds the svg object directly to the canvas.
"""
function addstatic!(canvas::Canvas, svg::WebIO.Node)
push!(canvas.objects, svg)
end
"""
setindex_(canvas::Canvas, pos, i::String)
Sets the position of the object i to pos on the javascript side.
"""
function setindex_(canvas::Canvas, pos, i::String)
evaljs(canvas.w, js""" setp_nonevent($pos, $i)""")
end
function Base.setindex!(canvas::Canvas, val, i::String)
setindex_(canvas::Canvas, val, i)
canvas[i][] = val
end
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
21412,
1680,
11017,
13908,
9399,
198,
198,
3500,
5313,
9399,
11,
26755,
3109,
1050,
11,
19243,
2977,
198,
198,
39344,
1680,
11017,
11,
751,
76,
21985,
28265,
751,
12976,
540,
28265,
751,
12708... | 2.246895 | 4,026 |
<gh_stars>0
module Fatou
using SyntaxTree,Reduce,LaTeXStrings,Requires,Base.Threads
# This file is part of Fatou.jl. It is licensed under the MIT license
# Copyright (C) 2017 <NAME>
export fatou, juliafill, mandelbrot, newton, basin, plot
"""
Fatou.Define(E::Any; # primary map, (z, c) -> F
Q::Expr = :(abs2(z)), # escape criterion, (z, c) -> Q
C::Expr = :((angle(z)/(2π))*n^p)# coloring, (z, n=iter., p=exp.) -> C
∂ = π/2, # Array{Float64,1} # Bounds, [x(a),x(b),y(a),y(b)]
n::Integer = 176, # horizontal grid points
N::Integer = 35, # max. iterations
ϵ::Number = 4, # basin ϵ-Limit criterion
iter::Bool = false, # toggle iteration mode
p::Number = 0, # iteration color exponent
newt::Bool = false, # toggle Newton mode
m::Number = 0, # Newton multiplicity factor
O::String = F, # original Newton map
mandel::Bool= false, # toggle Mandelbrot mode
seed::Number= 0.0+0.0im, # Mandelbrot seed value
x0 = nothing, # orbit starting point
orbit::Int = 0, # orbit cobweb depth
depth::Int = 1, # depth of function composition
cmap::String= "", # imshow color map
plane::Bool = false, # convert input disk to half-plane
disk::Bool = false) # convert output half-plane to disk
`Define` the metadata for a `Fatou.FilledSet`.
"""
struct Define{FT<:Function,QT<:Function,CT<:Function,M,N,P,D}
E::Any # input expression
F::FT # primary map
Q::QT # escape criterion
C::CT # complex fixed point coloring
∂::Array{Float64,1} # bounds
n::UInt16 # number of grid points
N::UInt8 # number of iterations
ϵ::Float64 # epsilon Limit criterion
iter::Bool # toggle iteration mode
p::Float64 # iteration color exponent
newt::Bool # toggle Newton mode
m::Number # newton multiplicity factor
mandel::Bool # toggle Mandelbrot mode
seed::Number # Mandelbrot seed value
x0 # orbit starting point
orbit::Int # orbit cobweb depth
depth::Int # depth of function composition
cmap::String # imshow color map
plane::Bool # convert input disk to half-plane
disk::Bool # convert output half-plane to disk
function Define(E;
Q=:(abs2(z)),
C=:((angle(z)/(2π))*n^p),
∂=π/2,
n::Integer=176,
N::Integer=35,
ϵ::Number=4,
iter::Bool=false,
p::Number=0,
newt::Bool=false,
m::Number=0,
mandel::Bool=false,
seed::Number=0.0+0.0im,
x0=nothing,
orbit::Int=0,
depth::Int=1,
cmap::String="",
plane::Bool=false,
disk::Bool=false)
!(typeof(∂) <: Array) && (∂ = [-float(∂),∂,-∂,∂])
length(∂) == 2 && (∂ = [∂[1],∂[2],∂[1],∂[2]])
!newt ? (f = genfun(E,[:z,:c]); q = genfun(Q,[:z,:c])) :
(f = genfun(newton_raphson(E,m),[:z,:c]); q = genfun(Expr(:call,:abs,E),[:z,:c]))
c = genfun(C,[:z,:n,:p])
e = typeof(E) == String ? parse(E) : E
return new{typeof(f),typeof(q),typeof(c),mandel,newt,plane,disk}(e,f,q,c,convert(Array{Float64,1},∂),UInt16(n),UInt8(N),float(ϵ),iter,float(p),newt,m,mandel,seed,x0,orbit,depth,cmap,plane,disk)
end
end
"""
Fatou.FilledSet(::Fatou.Define)
Compute the `Fatou.FilledSet` set using `Fatou.Define`.
"""
struct FilledSet{FT,QT,CT,M,N,P,D}
meta::Define{FT,QT,CT,M,N,P,D}
set::Matrix{Complex{Float64}}
iter::Matrix{UInt8}
mix::Matrix{Float64}
function FilledSet{FT,QT,CT,M,N,P,D}(K::Define{FT,QT,CT,M,N,P,D}) where {FT,QT,CT,M,N,P,D}
(i,s) = Compute(K)
return new{FT,QT,CT,M,N,P,D}(K,s,i,broadcast(K.C,s,broadcast(float,i./K.N),K.p))
end
end
"""
fatou(::Fatou.Define)
Compute the `Fatou.FilledSet` set using `Fatou.Define`.
# Examples
```Julia
julia> fatou(K)
```
"""
fatou(K::Define{FT,QT,CT,M,N,P,D}) where {FT,QT,CT,M,N,P,D} = FilledSet{FT,QT,CT,M,N,P,D}(K)
"""
juliafill(::Expr; # primary map, (z, c) -> F
Q::Expr = :(abs2(z)), # escape criterion, (z, c) -> Q
C::Expr = :((angle(z)/(2π))*n^p)# coloring, (z, n=iter., p=exp.) -> C
∂ = π/2, # Array{Float64,1} # Bounds, [x(a),x(b),y(a),y(b)]
n::Integer = 176, # horizontal grid points
N::Integer = 35, # max. iterations
ϵ::Number = 4, # basin ϵ-Limit criterion
iter::Bool = false, # toggle iteration mode
p::Number = 0, # iteration color exponent
x0 = nothing, # orbit starting point
orbit::Int = 0, # orbit cobweb depth
depth::Int = 1, # depth of function composition
cmap::String= "", # imshow color map
plane::Bool = false, # convert input disk to half-plane
disk::Bool = false) # convert output half-plane to disk
`Define` filled Julia basin in `Fatou`
# Exmaples
```Julia
julia> juliafill("z^2-0.06+0.67im",∂=[-1.5,1.5,-1,1],N=80,n=1501,cmap="RdGy")
```
"""
function juliafill(E;
Q=:(abs2(z)),
C=:((angle(z)/(2π))*n^p),
∂=π/2,
n::Integer=176,
N::Integer=35,
ϵ::Number=4,
iter::Bool=false,
p::Number=0,
newt::Bool=false,
m::Number=0,
x0=nothing,
orbit::Int=0,
depth::Int=1,
cmap::String="",
plane::Bool=false,
disk::Bool=false)
return Define(E,Q=Q,C=C,∂=∂,n=n,N=N,ϵ=ϵ,iter=iter,p=p,newt=newt,m=m,x0=x0,orbit=orbit,depth=depth,cmap=cmap,plane=plane,disk=disk)
end
"""
mandelbrot(::Expr; # primary map, (z, c) -> F
Q::Expr = :(abs2(z)), # escape criterion, (z, c) -> Q
C::Expr = :(exp(-abs(z))*n^p), # coloring, (z, n=iter., p=exp.) -> C
∂ = π/2, # Array{Float64,1} # Bounds, [x(a),x(b),y(a),y(b)]
n::Integer = 176, # horizontal grid points
N::Integer = 35, # max. iterations
ϵ::Number = 4, # basin ϵ-Limit criterion
iter::Bool = false, # toggle iteration mode
p::Number = 0, # iteration color exponent
m::Number = 0, # Newton multiplicity factor
seed::Number= 0.0+0.0im, # Mandelbrot seed value
x0 = nothing, # orbit starting point
orbit::Int = 0, # orbit cobweb depth
depth::Int = 1, # depth of function composition
cmap::String= "", # imshow color map
plane::Bool = false, # convert input disk to half-plane
disk::Bool = false) # convert output half-plane to disk
`Define` Mandelbrot basin in `Fatou`
# Examples
```Julia
mandelbrot(:(z^2+c),n=800,N=20,∂=[-1.91,0.51,-1.21,1.21],cmap="nipy_spectral")
```
"""
function mandelbrot(E;
Q=:(abs2(z)),
∂=π/2,
C=:(exp(-abs(z))*n^p),
n::Integer=176,
N::Integer=35,
ϵ::Number=4,
iter::Bool=false,
p::Number=0,
newt::Bool=false,
m::Number=0,
seed::Number=0.0+0.0im,
x0=nothing,
orbit::Int=0,
depth::Int=1,
cmap::String="",
plane::Bool=false,
disk::Bool=false)
m ≠ 0 && (newt = true)
return Define(E,Q=Q,C=C,∂=∂,n=n,N=N,ϵ=ϵ,iter=iter,p=p,newt=newt,m=m,mandel=true,seed=seed,x0=x0,orbit=orbit,depth=depth,cmap=cmap,plane=plane,disk=disk)
end
"""
newton(::Expr; # primary map, (z, c) -> F
C::Expr = :((angle(z)/(2π))*n^p)# coloring, (z, n=iter., p=exp.) -> C
∂ = π/2, # Array{Float64,1} # Bounds, [x(a),x(b),y(a),y(b)]
n::Integer = 176, # horizontal grid points
N::Integer = 35, # max. iterations
ϵ::Number = 4, # basin ϵ-Limit criterion
iter::Bool = false, # toggle iteration mode
p::Number = 0, # iteration color exponent
m::Number = 0, # Newton multiplicity factor
mandel::Bool= false, # toggle Mandelbrot mode
seed::Number= 0.0+0.0im, # Mandelbrot seed value
x0 = nothing, # orbit starting point
orbit::Int = 0, # orbit cobweb depth
depth::Int = 1, # depth of function composition
cmap::String= "", # imshow color map
plane::Bool = false, # convert input disk to half-plane
disk::Bool = false) # convert output half-plane to disk
`Define` Newton basin in `Fatou`
# Examples
```Julia
julia> newton("z^3-1",n=800,cmap="brg")
```
"""
function newton(E;
C=:((angle(z)/(2π))*n^p),
∂=π/2,
n::Integer=176,
N::Integer=35,
ϵ::Number=0.01,
iter::Bool=false,
p::Number=0,
m::Number=1,
mandel::Bool=false,
seed::Number=0.0+0.0im,
x0=nothing,
orbit::Int=0,
depth::Int=1,
cmap::String="",
plane::Bool=false,
disk::Bool=false)
return Define(E,C=C,∂=∂,n=n,N=N,ϵ=ϵ,iter=iter,p=p,newt=true,m=m,mandel=mandel,seed=seed,x0=x0,orbit=orbit,depth=depth,cmap=cmap,plane=plane,disk=disk)
end
# load additional functionality
include("internals.jl"); include("orbitplot.jl")
"""
basin(::Fatou.Define, ::Integer)
Output the `j`-th basin of `Fatou.Define` as LaTeX.
Each subsequent iteration of the Newton-Raphson method will yield a more complicated set.
# Examples
```Julia
julia> basin(newton("z^3-1"),2)
L"\$\\displaystyle D_2(\\epsilon) = \\left\\{z\\in\\mathbb{C}:\\left|\\,z - \\frac{\\left(z - \\frac{z^{3} - 1}{3 z^{2}}\\right)^{3} - 1}{3 \\left(z - \\frac{z^{3} - 1}{3 z^{2}}\\right)^{2}} - \\frac{z^{3} - 1}{3 z^{2}} - r_i\\,\\right|<\\epsilon,\\,\\forall r_i(\\,f(r_i)=0 )\\right\\}\$"
```
"""
basin(K::Define,j) = K.newt ? nrset(K.E,K.m,j) : jset(K.E,j)
plane(z::Complex) = (2z.re/(z.re^2+(1-z.im)^2))+im*(1-z.re^2-z.im^2)/(z.re^2+(1-z.im)^2)
disk(z::Complex) = (2z.re/(z.re^2+(1+z.im)^2))+im*(z.re^2+z.im^2-1)/(z.re^2+(1+z.im)^2)
# define function for computing orbit of a z0 input
function orbit(K::Define{FT,QT,CT,M,N,P,D},z0::Complex{Float64}) where {FT,QT,CT,M,N,P,D}
M ? (z = K.seed) : (z = P ? plane(z0) : z0)
zn = 0x00
while (N ? (K.Q(z,z0)::Float64>K.ϵ)::Bool : (K.Q(z,z0)::Float64<K.ϵ))::Bool && K.N>zn
z = K.F(z,z0)::Complex{Float64}
zn+=0x01
end; #end
# return the normalized argument of z or iteration count
return (zn::UInt8,(D ? disk(z) : z)::Complex{Float64})
end
"""
Compute(::Fatou.Define)::Union{Matrix{UInt8},Matrix{Float64}}
`Compute` the `Array` for `Fatou.FilledSet` as specefied by `Fatou.Define`.
"""
function Compute(K::Define{FT,QT,CT,M,N,D})::Tuple{Matrix{UInt8},Matrix{Complex{Float64}}} where {FT,QT,CT,M,N,D}
# generate coordinate grid
Kyn = round(UInt16,(K.∂[4]-K.∂[3])/(K.∂[2]-K.∂[1])*K.n)
x = range(K.∂[1]+0.0001,stop=K.∂[2],length=K.n)
y = range(K.∂[4],stop=K.∂[3],length=Kyn)
Z = x' .+ im*y # apply Newton-Orbit function element-wise to coordinate grid
(matU,matF) = (Array{UInt8,2}(undef,Kyn,K.n),Array{Complex{Float64},2}(undef,Kyn,K.n))
@time @threads for j = 1:length(y); for k = 1:length(x);
(matU[j,k],matF[j,k]) = orbit(K,Z[j,k])::Tuple{UInt8,Complex{Float64}}
end; end
return (matU,matF)
end
function __init__()
println("Fatou detected $(Threads.nthreads()) julia threads.")
@require PyPlot="d330b81b-6aea-500a-939a-2ce795aea3ee" include("pyplot.jl")
@require ImageInTerminal="d8c32880-2388-543b-8c61-d9f865259254" include("term.jl")
@require UnicodePlots="b8865327-cd53-5732-bb35-84acbb429228" include("uniplots.jl")
end
end # module
| [
27,
456,
62,
30783,
29,
15,
198,
21412,
12301,
280,
198,
3500,
26375,
897,
27660,
11,
7738,
7234,
11,
14772,
49568,
13290,
654,
11,
39618,
11,
14881,
13,
16818,
82,
198,
198,
2,
220,
220,
770,
2393,
318,
636,
286,
12301,
280,
13,
... | 1.843708 | 6,699 |
<gh_stars>1-10
using Requests
export
PastebinClient,
PastebinResponse,
PasteKey,
NEVER, TEN_M, HOUR, DAY, WEEK, TWO_WEEKS, MONTH,
PUBLIC, UNLISTED, PRIVATE
@enum Expiration NEVER=1 TEN_M=2 HOUR=3 DAY=4 WEEK=5 TWO_WEEKS=6 MONTH=7
@enum Access PUBLIC=0 UNLISTED=1 PRIVATE=2
immutable PastebinClient
devKey::AbstractString
userKey::Nullable{AbstractString}
PastebinClient(devKey::AbstractString) = new(devKey, Nullable{AbstractString}())
PastebinClient(devKey::AbstractString, userKey::AbstractString) = new(devKey, userKey)
# function to auto generate the user key
function PastebinClient(devKey::AbstractString, username::AbstractString, password::AbstractString)
data = Dict(
"api_dev_key" => devKey,
"api_user_name" => username,
"api_user_password" => password,
)
response = readall(Requests.post("http://pastebin.com/api/api_login.php", data=data))
new(devKey, response)
end
end
immutable PastebinResponse
successful::Bool
response::AbstractString
PastebinResponse(successful::Bool, response::AbstractString) = new(successful, response)
end
immutable PasteKey
key::AbstractString
PasteKey(key::AbstractString) = new(key)
end | [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
3500,
9394,
3558,
198,
198,
39344,
198,
220,
220,
220,
29582,
11792,
11,
198,
220,
220,
220,
29582,
31077,
11,
198,
220,
220,
220,
23517,
9218,
11,
198,
220,
220,
220,
33602,
11,
309,
1677... | 2.668763 | 477 |
using Weave
using Flux
using DataFrames, CSV
using Binning
using Random:shuffle!
using BSON: @save, @load
using Plots
include("./trainModel.jl")
filename = "./evalTemplate.jl"
weave(filename; doctype="md2html", out_path=:pwd)
weave(filename; doctype="github", out_path=:pwd)
| [
3500,
775,
1015,
198,
3500,
1610,
2821,
198,
3500,
6060,
35439,
11,
44189,
198,
3500,
20828,
768,
198,
3500,
14534,
25,
1477,
18137,
0,
198,
3500,
347,
11782,
25,
2488,
21928,
11,
2488,
2220,
198,
3500,
1345,
1747,
198,
17256,
7,
1911... | 2.742574 | 101 |
<reponame>SamuelWiqvist/efficient_SDEMEM
# load packages
using CSV
using Random
"""
set_up(;μ_θ_1::Real = -0.7, τ_θ_1::Real = 1.5, μ_θ_2::Real = 1.5, τ_θ_2::Real = 1.2, μ_θ_3::Real = -0.9, τ_θ_3::Real = 1.5, σ_ϵ::Real = 0.3, dt::Real=0.1, M::Int=5, N::Int=100)
Returns ground-truth parameter values and simulated data from the OU SDEMEM model.
"""
function set_up(;μ_θ_1::Real = -0.7, τ_θ_1::Real = 4,
μ_θ_2::Real = 2.3, τ_θ_2::Real = 10,
μ_θ_3::Real = -0.9, τ_θ_3::Real = 4,
σ_ϵ::Real = 0.3, dt::Real=0.05, M::Int=5, N::Int=100,seed::Int)
# sample random effects
Random.seed!(seed)
ϕ = zeros(M,3)
for i = 1:M
ϕ[i,1] = μ_θ_1 + sqrt(1/τ_θ_1)*randn()
ϕ[i,2] = μ_θ_2 + sqrt(1/τ_θ_2)*randn()
ϕ[i,3] = μ_θ_3 + sqrt(1/τ_θ_3)*randn()
end
# model paramters
η = [μ_θ_1; μ_θ_2; μ_θ_3; τ_θ_1; τ_θ_2; τ_θ_3]
# define priors
μ_0_1 = 0
M_0_1 = 1
α_1 = 2
β_1 = 1
μ_0_2 = 1
M_0_2 = 1
α_2 = 2
β_2 = 1/2
μ_0_3 = 0
M_0_3 = 1
α_3 = 2
β_3 = 1
prior_parameters_η = [μ_0_1 M_0_1 α_1 β_1;μ_0_2 M_0_2 α_2 β_2;μ_0_3 M_0_3 α_3 β_3]
prior_parameters_σ_ϵ = [1; 1/0.4] # Gamma with scale parameter α = 1 and rate parameter β = 1/0.4
# generate data
Random.seed!(seed)
y,x,t_vec = generate_data(N, M, η, ϕ, σ_ϵ,dt)
# return: data, model parameteres, and parameteres for priors
return y,x,t_vec,dt,η,σ_ϵ,ϕ,prior_parameters_η,prior_parameters_σ_ϵ
end
"""
generate_data(N::Int, M::Int, η::Array, ϕ::Array, σ_ϵ::Real,dt::Real)
Generate data from the OU SDEMEM model.
"""
function generate_data(N::Int, M::Int, η::Array, ϕ::Array, σ_ϵ::Real,dt::Real)
x_0 = zeros(M) # pre-allocate matriceis
x = zeros(M,N)
y = zeros(M,N)
t_vec = zeros(N)
for m = 1:M
x[m,1] = x_0[m] # set start value
y[m,1] = x[m,1] + σ_ϵ*randn()
θ_1 = exp(ϕ[m,1]) # set parameters for subject m
θ_2 = exp(ϕ[m,2])
θ_3 = exp(ϕ[m,3])
σ_cond = sqrt((θ_3^2/(2*θ_1))*(1-exp(-2*θ_1*dt))) # set latent process std for subject m
# simulate process for subject m
for t = 2:N
t_vec[t] = t_vec[t-1] + dt
μ_cond = θ_2 + (x[m,t-1]-θ_2)*exp(-θ_1*dt)
x[m,t] = μ_cond + σ_cond*randn()
y[m,t] = x[m,t] + σ_ϵ*randn()
end
end
# return data
return y,x,t_vec
end
| [
27,
7856,
261,
480,
29,
16305,
2731,
54,
25011,
85,
396,
14,
16814,
62,
10305,
3620,
3620,
198,
2,
3440,
10392,
198,
3500,
44189,
198,
3500,
14534,
198,
198,
37811,
198,
220,
220,
220,
900,
62,
929,
7,
26,
34703,
62,
138,
116,
62,... | 1.664622 | 1,467 |
<filename>src/module.jl
using Tokenize
# –––––––––––––––
# Some file utils
# –––––––––––––––
function readdir′(dir)
try
readdir(dir)
catch e
String[]
end
end
isdir′(f) = try isdir(f) catch e false end
isfile′(f) = try isfile(f) catch e false end
files(dir) =
@as x dir readdir′(x) map!(f->joinpath(dir, f), x ,x) filter!(isfile′, x)
dirs(dir) =
@as x dir readdir′(x) filter!(f->!startswith(f, "."), x) map!(f->joinpath(dir, f), x, x) filter!(isdir′, x)
jl_files(dir::AbstractString) = @>> dir files filter!(f->endswith(f, ".jl"))
function jl_files(set)
files = Set{String}()
for dir in set, file in jl_files(dir)
push!(files, file)
end
return files
end
# Recursion + Mutable State = Job Security
"""
Takes a start directory and returns a set of nearby directories.
"""
function dirsnearby(dir; descend = 1, ascend = 5, set = Set{String}())
push!(set, dir)
if descend > 0
for down in dirs(dir)
if !(down in set)
push!(set, down)
descend > 1 && dirsnearby(down, descend = descend-1, ascend = 0, set = set)
end
end
end
ascend > 0 && dirname(dir) !== dir && dirsnearby(dirname(dir), descend = descend, ascend = ascend-1, set = set)
return set
end
# ––––––––––––––
# The Good Stuff
# ––––––––––––––
const SCOPE_STARTERS = [Tokens.BEGIN,
Tokens.WHILE,
Tokens.IF,
Tokens.FOR,
Tokens.TRY,
Tokens.FUNCTION,
Tokens.MACRO,
Tokens.LET,
Tokens.TYPE,
Tokens.DO,
Tokens.QUOTE,
Tokens.STRUCT]
const MODULE_STARTERS = [Tokens.MODULE, Tokens.BAREMODULE]
"""
Takes Julia source code and a line number, gives back the string name
of the module at that line.
"""
function codemodule(code, line)
stack = String[]
# count all unterminated block openers, brackets, and parens
openers = [[0,0,0]]
n_openers = 0
n_brackets = 0
n_parens = 0
# index of next modulename token
next_modulename = -1
ts = tokenize(code)
last_token = nothing
for (i, t) in enumerate(ts)
Tokens.startpos(t)[1] > line && break
# Ignore everything in brackets or parnetheses, because any scope started in
# them also needs to be closed in them. That way, we don't need special
# handling for comprehensions and `end`-indexing.
if Tokens.kind(t) == Tokens.LSQUARE
openers[length(stack)+1][2] += 1
elseif openers[length(stack)+1][2] > 0
if Tokens.kind(t) == Tokens.RSQUARE
openers[length(stack)+1][2] -= 1
end
elseif Tokens.kind(t) == Tokens.LPAREN
openers[length(stack)+1][3] += 1
elseif openers[length(stack)+1][3] > 0
if Tokens.kind(t) == Tokens.RPAREN
openers[length(stack)+1][3] -= 1
end
elseif Tokens.exactkind(t) in MODULE_STARTERS && (last_token == nothing || Tokens.kind(last_token) == Tokens.WHITESPACE) # new module
next_modulename = i + 2
elseif i == next_modulename && Tokens.kind(t) == Tokens.IDENTIFIER && Tokens.kind(last_token) == Tokens.WHITESPACE
push!(stack, Tokens.untokenize(t))
push!(openers, [0,0,0])
elseif Tokens.exactkind(t) in SCOPE_STARTERS # new non-module scope
openers[length(stack)+1][1] += 1
elseif Tokens.exactkind(t) == Tokens.END # scope ended
if openers[length(stack)+1][1] == 0
!isempty(stack) && pop!(stack)
length(openers) > 1 && pop!(openers)
else
openers[length(stack)+1][1] -= 1
end
end
last_token = t
end
return join(stack, ".")
end
codemodule(code, pos::Cursor) = codemodule(code, pos.line)
"""
Takes a given Julia source file and another (absolute) path, gives the
line on which the path is included in the file or 0.
"""
function includeline(file::AbstractString, included_file::AbstractString)
# check for erroneous self includes, doesn't detect more complex cycles though
file == included_file && return 0
line = 1
tokens = Tokenize.tokenize(read(file, String))
t, state = iterate(tokens)
while true
if Tokens.kind(t) == Tokens.WHITESPACE
line += count(x -> x == '\n', t.val)
elseif Tokens.kind(t) == Tokens.IDENTIFIER && t.val == "include"
t, state = iterate(tokens, state)
if Tokens.kind(t) == Tokens.LPAREN
t, state = iterate(tokens, state)
if Tokens.kind(t) == Tokens.STRING
if normpath(joinpath(dirname(file), chop(t.val, head=1, tail=1))) == included_file
return line
end
end
end
elseif Tokens.kind(t) == Tokens.ENDMARKER
break
end
t, state = iterate(tokens, state)
end
return 0
end
"""
Takes an absolute path to a file and returns the (file, line) where that
file is included or nothing.
"""
function find_include(path::AbstractString)
for file in @> path dirname dirsnearby jl_files
line = -1
try
line = includeline(file, path)
catch err
return nothing
end
line > 0 && (return file, line)
end
end
"""
Takes an absolute path to a file and returns a string
representing the module it belongs to.
"""
function filemodule_(path::AbstractString)
loc = find_include(path)
if loc != nothing
file, line = loc
mod = codemodule(read(file, String), line)
super = filemodule(file)
if super != "" && mod != ""
return "$super.$mod"
else
return super == "" ? mod : super
end
end
return ""
end
const filemodule = memoize(filemodule_)
# Get all modules
function children(m::Module)
return @>> [moduleusings(m); getmodule.(Ref(m), string.(_names(m, all=true, imported=true)))] filter(x->isa(x, Module) && x ≠ m) unique
end
function allchildren(m::Module, cs = Set{Module}())
for c in children(m)
c in cs || (push!(cs, c); allchildren(c, cs))
end
return cs
end
| [
27,
34345,
29,
10677,
14,
21412,
13,
20362,
198,
3500,
29130,
1096,
198,
198,
2,
784,
25608,
25608,
25608,
25608,
25608,
25608,
25608,
198,
2,
2773,
2393,
3384,
4487,
198,
2,
784,
25608,
25608,
25608,
25608,
25608,
25608,
25608,
198,
19... | 2.377056 | 2,493 |
# This file is auto-generated by AWSMetadata.jl
using AWS
using AWS.AWSServices: fis
using AWS.Compat
using AWS.UUIDs
"""
create_experiment_template(actions, client_token, description, role_arn, stop_conditions)
create_experiment_template(actions, client_token, description, role_arn, stop_conditions, params::Dict{String,<:Any})
Creates an experiment template. To create a template, specify the following information:
Targets: A target can be a specific resource in your AWS environment, or one or more
resources that match criteria that you specify, for example, resources that have specific
tags. Actions: The actions to carry out on the target. You can specify multiple actions,
the duration of each action, and when to start each action during an experiment. Stop
conditions: If a stop condition is triggered while an experiment is running, the experiment
is automatically stopped. You can define a stop condition as a CloudWatch alarm. For more
information, see the AWS Fault Injection Simulator User Guide.
# Arguments
- `actions`: The actions for the experiment.
- `client_token`: Unique, case-sensitive identifier that you provide to ensure the
idempotency of the request.
- `description`: A description for the experiment template. Can contain up to 64 letters
(A-Z and a-z).
- `role_arn`: The Amazon Resource Name (ARN) of an IAM role that grants the AWS FIS service
permission to perform service actions on your behalf.
- `stop_conditions`: The stop conditions.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"tags"`: The tags to apply to the experiment template.
- `"targets"`: The targets for the experiment.
"""
function create_experiment_template(
actions,
clientToken,
description,
roleArn,
stopConditions;
aws_config::AbstractAWSConfig=global_aws_config(),
)
return fis(
"POST",
"/experimentTemplates",
Dict{String,Any}(
"actions" => actions,
"clientToken" => clientToken,
"description" => description,
"roleArn" => roleArn,
"stopConditions" => stopConditions,
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function create_experiment_template(
actions,
clientToken,
description,
roleArn,
stopConditions,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return fis(
"POST",
"/experimentTemplates",
Dict{String,Any}(
mergewith(
_merge,
Dict{String,Any}(
"actions" => actions,
"clientToken" => clientToken,
"description" => description,
"roleArn" => roleArn,
"stopConditions" => stopConditions,
),
params,
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
delete_experiment_template(id)
delete_experiment_template(id, params::Dict{String,<:Any})
Deletes the specified experiment template.
# Arguments
- `id`: The ID of the experiment template.
"""
function delete_experiment_template(id; aws_config::AbstractAWSConfig=global_aws_config())
return fis(
"DELETE",
"/experimentTemplates/$(id)";
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function delete_experiment_template(
id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()
)
return fis(
"DELETE",
"/experimentTemplates/$(id)",
params;
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
get_action(id)
get_action(id, params::Dict{String,<:Any})
Gets information about the specified AWS FIS action.
# Arguments
- `id`: The ID of the action.
"""
function get_action(id; aws_config::AbstractAWSConfig=global_aws_config())
return fis(
"GET", "/actions/$(id)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET
)
end
function get_action(
id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()
)
return fis(
"GET",
"/actions/$(id)",
params;
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
get_experiment(id)
get_experiment(id, params::Dict{String,<:Any})
Gets information about the specified experiment.
# Arguments
- `id`: The ID of the experiment.
"""
function get_experiment(id; aws_config::AbstractAWSConfig=global_aws_config())
return fis(
"GET", "/experiments/$(id)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET
)
end
function get_experiment(
id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()
)
return fis(
"GET",
"/experiments/$(id)",
params;
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
get_experiment_template(id)
get_experiment_template(id, params::Dict{String,<:Any})
Gets information about the specified experiment template.
# Arguments
- `id`: The ID of the experiment template.
"""
function get_experiment_template(id; aws_config::AbstractAWSConfig=global_aws_config())
return fis(
"GET",
"/experimentTemplates/$(id)";
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function get_experiment_template(
id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()
)
return fis(
"GET",
"/experimentTemplates/$(id)",
params;
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
list_actions()
list_actions(params::Dict{String,<:Any})
Lists the available AWS FIS actions.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"maxResults"`: The maximum number of results to return with a single call. To retrieve
the remaining results, make another call with the returned nextToken value.
- `"nextToken"`: The token for the next page of results.
"""
function list_actions(; aws_config::AbstractAWSConfig=global_aws_config())
return fis("GET", "/actions"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET)
end
function list_actions(
params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()
)
return fis(
"GET", "/actions", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET
)
end
"""
list_experiment_templates()
list_experiment_templates(params::Dict{String,<:Any})
Lists your experiment templates.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"maxResults"`: The maximum number of results to return with a single call. To retrieve
the remaining results, make another call with the returned nextToken value.
- `"nextToken"`: The token for the next page of results.
"""
function list_experiment_templates(; aws_config::AbstractAWSConfig=global_aws_config())
return fis(
"GET",
"/experimentTemplates";
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function list_experiment_templates(
params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()
)
return fis(
"GET",
"/experimentTemplates",
params;
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
list_experiments()
list_experiments(params::Dict{String,<:Any})
Lists your experiments.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"maxResults"`: The maximum number of results to return with a single call. To retrieve
the remaining results, make another call with the returned nextToken value.
- `"nextToken"`: The token for the next page of results.
"""
function list_experiments(; aws_config::AbstractAWSConfig=global_aws_config())
return fis(
"GET", "/experiments"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET
)
end
function list_experiments(
params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()
)
return fis(
"GET",
"/experiments",
params;
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
list_tags_for_resource(resource_arn)
list_tags_for_resource(resource_arn, params::Dict{String,<:Any})
Lists the tags for the specified resource.
# Arguments
- `resource_arn`: The Amazon Resource Name (ARN) of the resource.
"""
function list_tags_for_resource(
resourceArn; aws_config::AbstractAWSConfig=global_aws_config()
)
return fis(
"GET",
"/tags/$(resourceArn)";
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function list_tags_for_resource(
resourceArn,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return fis(
"GET",
"/tags/$(resourceArn)",
params;
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
start_experiment(client_token, experiment_template_id)
start_experiment(client_token, experiment_template_id, params::Dict{String,<:Any})
Starts running an experiment from the specified experiment template.
# Arguments
- `client_token`: Unique, case-sensitive identifier that you provide to ensure the
idempotency of the request.
- `experiment_template_id`: The ID of the experiment template.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"tags"`: The tags to apply to the experiment.
"""
function start_experiment(
clientToken, experimentTemplateId; aws_config::AbstractAWSConfig=global_aws_config()
)
return fis(
"POST",
"/experiments",
Dict{String,Any}(
"clientToken" => clientToken, "experimentTemplateId" => experimentTemplateId
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function start_experiment(
clientToken,
experimentTemplateId,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return fis(
"POST",
"/experiments",
Dict{String,Any}(
mergewith(
_merge,
Dict{String,Any}(
"clientToken" => clientToken,
"experimentTemplateId" => experimentTemplateId,
),
params,
),
);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
stop_experiment(id)
stop_experiment(id, params::Dict{String,<:Any})
Stops the specified experiment.
# Arguments
- `id`: The ID of the experiment.
"""
function stop_experiment(id; aws_config::AbstractAWSConfig=global_aws_config())
return fis(
"DELETE",
"/experiments/$(id)";
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function stop_experiment(
id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()
)
return fis(
"DELETE",
"/experiments/$(id)",
params;
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
tag_resource(resource_arn, tags)
tag_resource(resource_arn, tags, params::Dict{String,<:Any})
Applies the specified tags to the specified resource.
# Arguments
- `resource_arn`: The Amazon Resource Name (ARN) of the resource.
- `tags`: The tags for the resource.
"""
function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config())
return fis(
"POST",
"/tags/$(resourceArn)",
Dict{String,Any}("tags" => tags);
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function tag_resource(
resourceArn,
tags,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return fis(
"POST",
"/tags/$(resourceArn)",
Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params));
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
untag_resource(resource_arn)
untag_resource(resource_arn, params::Dict{String,<:Any})
Removes the specified tags from the specified resource.
# Arguments
- `resource_arn`: The Amazon Resource Name (ARN) of the resource.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"tagKeys"`: The tag keys to remove.
"""
function untag_resource(resourceArn; aws_config::AbstractAWSConfig=global_aws_config())
return fis(
"DELETE",
"/tags/$(resourceArn)";
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function untag_resource(
resourceArn,
params::AbstractDict{String};
aws_config::AbstractAWSConfig=global_aws_config(),
)
return fis(
"DELETE",
"/tags/$(resourceArn)",
params;
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
"""
update_experiment_template(id)
update_experiment_template(id, params::Dict{String,<:Any})
Updates the specified experiment template.
# Arguments
- `id`: The ID of the experiment template.
# Optional Parameters
Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are:
- `"actions"`: The actions for the experiment.
- `"description"`: A description for the template.
- `"roleArn"`: The Amazon Resource Name (ARN) of an IAM role that grants the AWS FIS
service permission to perform service actions on your behalf.
- `"stopConditions"`: The stop conditions for the experiment.
- `"targets"`: The targets for the experiment.
"""
function update_experiment_template(id; aws_config::AbstractAWSConfig=global_aws_config())
return fis(
"PATCH",
"/experimentTemplates/$(id)";
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
function update_experiment_template(
id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config()
)
return fis(
"PATCH",
"/experimentTemplates/$(id)",
params;
aws_config=aws_config,
feature_set=SERVICE_FEATURE_SET,
)
end
| [
2,
770,
2393,
318,
8295,
12,
27568,
416,
30865,
9171,
14706,
13,
20362,
198,
3500,
30865,
198,
3500,
30865,
13,
12298,
5432,
712,
1063,
25,
277,
271,
198,
3500,
30865,
13,
40073,
198,
3500,
30865,
13,
52,
27586,
82,
198,
198,
37811,
... | 2.525315 | 5,787 |
<reponame>jpjones76/SeisIO.jl
export fill_pbo!
# https://www.unavco.org/data/strain-seismic/bsm-data/lib/docs/bottle_format.pdf
bottle_chans = Dict{String, Tuple{String,String}}(
"BatteryVolts" => ("ABV", "V"),
"CalOffsetCH0G3" => ("AO0", "{unknown}"),
"CalOffsetCH1G3" => ("AO1", "{unknown}"),
"CalOffsetCH2G3" => ("AO2", "{unknown}"),
"CalOffsetCH3G3" => ("AO3", "{unknown}"),
"CalStepCH0G2" => ("A02", "{unknown}"),
"CalStepCH0G3" => ("A03", "{unknown}"),
"CalStepCH1G2" => ("A12", "{unknown}"),
"CalStepCH1G3" => ("A13", "{unknown}"),
"CalStepCH2G2" => ("A22", "{unknown}"),
"CalStepCH2G3" => ("A23", "{unknown}"),
"CalStepCH3G2" => ("A32", "{unknown}"),
"CalStepCH3G3" => ("A33", "{unknown}"),
"DownholeDegC" => ("KD", "Cel"),
"LoggerDegC" => ("K1", "Cel"),
"PowerBoxDegC" => ("K2", "Cel"),
"PressureKPa" => ("DI", "kPa"),
"Rainfallmm" => ("R0", "mm"),
"RTSettingCH0" => ("AR0", "{unknown}"),
"RTSettingCH1" => ("AR1", "{unknown}"),
"RTSettingCH2" => ("AR2", "{unknown}"),
"RTSettingCH3" => ("AR3", "{unknown}"),
"SolarAmps" => ("ASO", "A"),
"SystemAmps" => ("ASY", "A")
)
bottle_nets = Dict{String, String}(
"AIRS" => "MC",
"TEPE" => "GF",
"BUY1" => "GF",
"ESN1" => "GF",
"TRNT" => "MC",
"HALK" => "GF",
"B948" => "ARRA",
"OLV1" => "MC",
"OLV2" => "MC",
"GERD" => "MC",
"SIV1" => "GF",
"BOZ1" => "GF",
"B947" => "ARRA"
)
function check_bads!(x::AbstractArray, nv::T) where T
# Check for bad samples
@inbounds for i in x
if i == nv
return true
end
end
return false
end
function channel_guess(str::AbstractString, fs::Float64)
si = fs >= 1.0 ? 14 : 10
ei = length(str) - (endswith(str, "_20") ? 3 : 0)
# name, id, units
str = str[si:ei]
if length(str) == 3
units = "m/m"
else
(str, units) = get(bottle_chans, str, ("YY", "{unknown}"))
end
# form channel string
if length(str) == 2
str = (fs > 1.0 ? "B" : fs > 0.1 ? "L" : "R")*str
end
return (str, units)
end
function read_bottle!(S::GphysData, fstr::String, nx_new::Int64, nx_add::Int64, memmap::Bool, strict::Bool, v::Integer)
buf = BUF.buf
files = ls(fstr)
for file in files
io = memmap ? IOBuffer(Mmap.mmap(file)) : open(file, "r")
# Read header ============================================================
fastskip(io, 8)
t0 = round(Int64, fastread(io, Float64)*sμ)
dt = fastread(io, Float32)
nx = fastread(io, Int32)
ty = fastread(io, Int32)
nv = fastread(io, Int32)
fastskip(io, 8)
fs = 1.0/dt
v > 2 && println("t0 = ", t0, ", fs = ", fs, ", nx = ", nx, ", ty = ", ty, ", nv = ", nv)
# Read data ==============================================================
T = ty == 0 ? Int16 : ty == 1 ? Int32 : Float32
nb = nx*sizeof(T)
checkbuf_8!(buf, nb)
fast_readbytes!(io, buf, nb)
close(io)
# Try to create an ID from the file name =================================
# Assumes fname SSSSyyJJJ... (SSSS = station, yy = year, JJJ = Julian day)
fname = splitdir(file)[2]
sta = fname[1:4]
(cha, units) = channel_guess(fname, fs)
# find relevant entry in station data
net = get(bottle_nets, sta, "PB")
id = net * "." * sta * ".." * cha
# Load into S ============================================================
i = findid(id, S.id)
if strict
i = channel_match(S, i, fs)
end
if i == 0
# Create C.x
x = Array{Float64,1}(undef, max(nx_new, nx))
os = 1
C = SeisChannel()
setfield!(C, :id, id)
setfield!(C, :fs, fs)
setfield!(C, :units, units)
mk_t!(C, nx, t0)
setfield!(C, :x, x)
push!(S, C)
else
xi = S.t[i][end,1]
x = getindex(getfield(S, :x), i)
check_for_gap!(S, i, t0, nx, v)
Lx = length(x)
if xi + nx > Lx
resize!(x, Lx + max(nx_add, nx))
end
os = xi + 1
end
# Check for null values
nv = T(nv)
y = reinterpret(T, buf)
b = T == Int16 ? false : check_bads!(y, nv)
if b
j = os
@inbounds for i = 1:nx
if y[i] == nv
x[j] = NaN
else
x[j] = y[i]
end
j += 1
end
else
copyto!(x, os, y, 1, nx)
end
end
trunc_x!(S)
resize!(buf, 65535)
return nothing
end
function read_bottle(fstr::String, nx_new::Int64, nx_add::Int64, memmap::Bool, strict::Bool, v::Integer)
S = SeisData()
read_bottle!(S, fstr, nx_new, nx_add, memmap, strict, v)
return S
end
"""
fill_pbo!(S)
Attempt to fill `:name` and `:loc` fields of S using station names (second field of S.id) cross-referenced against a PBO station info file.
"""
function fill_pbo!(S::GphysData)
sta_data = readdlm(path * "/Formats/PBO_bsm_coords.txt", ',', comments=false)
sta_data[:,2] .= strip.(sta_data[:,2])
sta_data[:,6] .= strip.(sta_data[:,6])
n_sta = size(sta_data, 1)
for i = 1:S.n
sta = split(S.id[i], '.')[2]
for j = 1:n_sta
if sta_data[j, 1] == sta
S.name[i] = String(sta_data[j,2])
lat = sta_data[j,3]
lon = sta_data[j,4]
el = sta_data[j,5]
S.loc[i] = GeoLoc(lat = lat, lon = lon, el = el)
break
end
end
end
return nothing
end
| [
27,
7856,
261,
480,
29,
34523,
73,
1952,
4304,
14,
4653,
271,
9399,
13,
20362,
198,
39344,
6070,
62,
79,
2127,
0,
198,
198,
2,
3740,
1378,
2503,
13,
403,
615,
1073,
13,
2398,
14,
7890,
14,
2536,
391,
12,
325,
1042,
291,
14,
1443... | 2.09189 | 2,503 |
module FisherPlot
using LaTeXStrings
using CairoMakie
using Makie
using Base: @kwdef
function ellipseparameterization(a::Float64, b::Float64, θ::Float64)
t = LinRange(0,2π, 200)
x = Array(a .* sin.(θ) .* cos.(t) + b .* cos.(θ) .* sin.(t))
y = Array(a .* cos.(θ) .* cos.(t) - b .* sin.(θ) .* sin.(t))
return x, y
end
function gaussian(μ::Float64, σ::Float64, x)
return 1/(sqrt(2π*σ^2))*exp(-0.5*(x-μ)^2/σ^2)
end
function ellipseparameters(covmatrix::Matrix{Float64}, i::Int64, j::Int64)
σi = sqrt(covmatrix[i,i])
σj = sqrt(covmatrix[j,j])
σij = covmatrix[i,j]
θ = (atan(2σij,(σi^2-σj^2)))/2
a = sqrt((σi^2+σj^2)/2+sqrt(((σi^2-σj^2)^2)/4+σij^2))
if i == j
b = 0.
else
b = sqrt((σi^2+σj^2)/2-sqrt(((σi^2-σj^2)^2)/4+σij^2))
end
return σi, σj, a, b, θ
end
function preparecanvas(LaTeX_array, limits, ticks, probes, colors, PlotPars::Dict)
matrix_dim = length(LaTeX_array)
#TODO: add the textsize to PlotPars
figure = Makie.Figure(textsize = 40, font = PlotPars["font"])
for i in 1:matrix_dim
for j in 1:i
if i == j
ax = Axis(figure[i,i],
width = PlotPars["sidesquare"], height = PlotPars["sidesquare"],
xticklabelsize = PlotPars["dimticklabel"],
yticklabelsize = PlotPars["dimticklabel"], yaxisposition = (:right),
xlabel = L"%$((LaTeX_array)[i])", xlabelsize = PlotPars["parslabelsize"],
ylabel = L"P/P_\mathrm{max}",ylabelsize = PlotPars["PPmaxlabelsize"], yticks = [0,1],
xticklabelrotation = PlotPars["xticklabelrotation"],
xticks = ([ticks[i,1], 0.5*(ticks[i,1]+ticks[i,2]), ticks[i,2]],
[string(myi) for myi in round.([ticks[i,1], 0.5*(ticks[i,1]+ticks[i,2]), ticks[i,2]], sigdigits = 3)]))
Makie.ylims!(ax, (-0.0,1.05))
Makie.xlims!(ax, (limits[i,1],limits[i,2]))
Makie.hideydecorations!(ax, ticks = false, ticklabels = false, label = false)
if i != matrix_dim
ax.alignmode = Mixed(right = MakieLayout.Protrusion(0), bottom = MakieLayout.Protrusion(0), top= MakieLayout.Protrusion(0))
hidexdecorations!(ax, ticks = true, ticklabels = true, label = true)
else
hidexdecorations!(ax, ticks = false, ticklabels = false, label = false)
end
else
ax = Axis(figure[i,j], width = PlotPars["sidesquare"], height = PlotPars["sidesquare"],
xticklabelsize = PlotPars["dimticklabel"], yticklabelsize = PlotPars["dimticklabel"],
ylabel = L"%$(LaTeX_array[i])", xlabel = L"%$((LaTeX_array)[j])",
ylabelsize = PlotPars["parslabelsize"], xlabelsize = PlotPars["parslabelsize"], xticklabelrotation = PlotPars["xticklabelrotation"],
yticks = ([ticks[i,1], 0.5*(ticks[i,1]+ticks[i,2]), ticks[i,2]],
[string(myi) for myi in round.([ticks[i,1], 0.5*(ticks[i,1]+ticks[i,2]), ticks[i,2]], sigdigits = 3)]),
xticks = ([ticks[j,1], 0.5*(ticks[j,1]+ticks[j,2]), ticks[j,2]],
[string(myi) for myi in round.([ticks[j,1], 0.5*(ticks[j,1]+ticks[j,2]), ticks[j,2]], sigdigits = 3)]),
yticklabelpad=8)
Makie.ylims!(ax, (limits[i,1],limits[i,2]))
Makie.xlims!(ax, (limits[j,1],limits[j,2]))
if i == matrix_dim
hidexdecorations!(ax, ticks = false, ticklabels = false, label = false)
else
hidexdecorations!(ax, ticks = true, ticklabels = true, label = true)
end
if j == 1
hideydecorations!(ax, ticks = false, ticklabels = false, label = false)
Legend(figure[1,matrix_dim],
[PolyElement(color = color, strokecolor = color, strokewidth = 1) for color in colors],
probes,
tellheight = false, tellwidth = false, rowgap = 10,
halign = :right, valign = :top, framecolor = :black, labelsize =55, patchsize = (70, 40), framevisible = true)
else
hideydecorations!(ax, ticks = true, ticklabels = true, label = true)
ax.alignmode = Mixed(right = MakieLayout.Protrusion(0), bottom = MakieLayout.Protrusion(0), top = MakieLayout.Protrusion(0))
end
end
end
end
Makie.resize!(figure.scene, figure.layout.layoutobservables.reportedsize[]...)
return figure
end
function drawgaussian!(canvas, σ, i, central, color)
ax = canvas[i,i]
x = Array(LinRange(-4σ+central,4σ+central, 200))
Makie.lines!(ax, x, gaussian.(central, σ, x)./gaussian.(central, σ, central), color = color, linewidth = 4)
Makie.band!(ax, x, 0, gaussian.(central, σ, x)./gaussian.(central, σ, central) , color=(color, 0.2))
x = Array(LinRange(-σ+central,σ+central, 200))
Makie.band!(ax, x, 0, gaussian.(central, σ, x)./gaussian.(central, σ, central) , color=(color, 0.4))
end
function drawellipse!(canvas, i, j, x, y, central_values, color)
ax = canvas[i,j]
Makie.lines!(ax, x .+ central_values[j], y .+ central_values[i], color = color, linewidth = 4)
Makie.lines!(ax, 3x .+ central_values[j], 3y .+ central_values[i], color = color, linewidth = 4)
Makie.band!(ax, x .+ central_values[j], 0, y .+ central_values[i], color=(color, 0.4))
Makie.band!(ax, 3x .+ central_values[j], 0, 3y .+ central_values[i], color=(color, 0.2))
end
function paintcorrmatrix!(canvas, central_values, corr_matrix, color)
for i in 1:length(central_values)
for j in 1:i
if i == j
drawgaussian!(canvas, sqrt(corr_matrix[i,i]), i, central_values[i], color)
else
σi, σj, a, b, θ = ellipseparameters(corr_matrix, i,j)
x,y = ellipseparameterization(a, b, θ)
drawellipse!(canvas, i, j, x, y, central_values, color)
end
end
end
end
function save(filename, obj)
Makie.save(filename, obj)
end
end # module
| [
21412,
14388,
43328,
198,
198,
3500,
4689,
49568,
13290,
654,
198,
3500,
23732,
44,
461,
494,
198,
3500,
15841,
494,
198,
3500,
7308,
25,
2488,
46265,
4299,
198,
198,
8818,
30004,
541,
325,
17143,
2357,
1634,
7,
64,
3712,
43879,
2414,
... | 1.913136 | 3,281 |
<reponame>Wimmerer/SatisfiabilityInterface.jl
struct BinaryRelation{O, S, T}
x::S
y::T
end
BinaryRelation{O}(x::X, y::Y) where {O,X,Y} = BinaryRelation{O,X,Y}(x, y)
x = DiscreteVariable(:x, 1:5)
y = DiscreteVariable(:y, 2:6)
# BinaryRelation(==, x, y)
# BinaryRelation(==, x, 1)
# Use NodeVariables to break up binary relations like x + y ~ 3 and ternary such as x + y <= z
struct NodeVariable <: Var
name
op
x
y
domain
booleans
varmap
end
Base.show(io::IO, var::NodeVariable) = print(io, "$(var.name) = $(var.op)($(var.x), $(var.y))")
find_domain(op, x, y) = sort(collect(Set([op(i, j) for i in domain(x) for j in domain(y)])))
domain(v::Var) = v.domain
booleans(v::Var) = v.booleans
domain(x::Real) = x:x
"All booleans inside an expression that must be added to the encoding"
recursive_booleans(v::NodeVariable) = v.booleans ∪ booleans(v.x) ∪ booleans(v.y)
function NodeVariable(op, x, y, name=gensym())
domain = find_domain(op, x, y)
booleans = [Variable(name, i) for i ∈ indices(domain)]
varmap = Dict(i => v for (i, v) in zip(domain, booleans))
NodeVariable(name, op, x, y, domain, booleans, varmap)
end
Base.:+(x::Var, y) = NodeVariable(+, x, y)
Base.:+(x, y::Var) = NodeVariable(+, x, y)
Base.:*(x::Var, y) = NodeVariable(*, x, y)
Base.:*(x, y::Var) = NodeVariable(*, x, y)
Base.:^(x::Var, y) = NodeVariable(^, x, y)
Base.:^(x, y::Var) = NodeVariable(^, x, y)
function clauses(var::NodeVariable)
op = var.op
x = var.x
y = var.y
clauses = exactly_one(var.booleans)
# deal with 2 + X
if x isa Real
for j in domain(var.y)
value = op(x, j)
# y == j => var == op(x, j)
push!(clauses, ¬(y.varmap[j]) ∨ var.varmap[value])
end
# deal with x + 2
elseif y isa Real
for i in domain(var.x)
value = op(i, y)
# x == i => var == op(i, y)
push!(clauses, ¬(x.varmap[i]) ∨ var.varmap[value])
end
else
# neither x nor y is real
for i in domain(var.x)
for j in domain(var.y)
value = op(i, j)
# x == i && y == j => var == op(i, j)
push!(clauses, ¬(x.varmap[i]) ∨ ¬(y.varmap[j]) ∨ var.varmap[value])
end
end
end
return clauses
end
Base.:(==)(v::Var, w) = BinaryRelation{==}(v, w)
Base.:(<=)(v::Var, w) = BinaryRelation{<=}(v, w)
"Encode relation like x == 1"
function encode(rel::BinaryRelation{==, <:Var, <:Real})
x = rel.x
y = rel.y
if y ∉ domain(x)
error("$y is not in the domain of $x")
end
boolean = x.varmap[y]
return [boolean]
end
"Encode relation like x != 1"
function encode(rel::BinaryRelation{!=, <:Var, <:Real})
x = rel.x
y = rel.y
if y ∈ domain(x)
boolean = ¬(x.varmap[y])
end
return [boolean]
end
"Encode relation like x == y"
function encode(rel::BinaryRelation{==, <:Var, <:Var})
x = rel.x
y = rel.y
clauses = []
for i in domain(x)
if i in domain(y)
# (x == i) => (y == i)
push!(clauses, ¬(x.varmap[i]) ∨ (y.varmap[i]))
else
push!(clauses, ¬(x.varmap[i]))
end
end
for i in domain(y)
if i in domain(x)
# (y == i) => (x == i)
push!(clauses, ¬(y.varmap[i]) ∨ (x.varmap[i]))
else
push!(clauses, ¬(y.varmap[i]))
end
end
return clauses
end
function encode(rel::BinaryRelation{!=, <:Var, <:Var})
x = rel.x
y = rel.y
clauses = []
for i in domain(x)
if i in domain(y)
# (x == i) => (y != i)
push!(clauses, ¬(x.varmap[i]) ∨ ¬(y.varmap[i]))
end
end
return clauses
end
function parse_expression(varmap, ex)
op = operation(ex)
args = arguments(ex)
new_args = parse_expression.(Ref(varmap), args)
return op, new_args
end
parse_expression(varmap, ex::Term) = varmap[ex]
parse_expression(varmap, ex::Sym) = varmap[ex]
parse_expression(varmap, ex::Num) = parse_expression(varmap, value(ex))
parse_expression(varmap, ex::Real) = ex
"Parse a symbolic expression into a relation"
function parse_relation(varmap, ex)
op = operation(ex)
args = arguments(ex)
lhs = args[1]
rhs = args[2]
return BinaryRelation{op}(parse_expression(varmap, lhs), parse_expression(varmap, rhs))
end
# function parse_relation!(varmap, ex::Equation)
# varmap[ex.lhs] = parse_expression(varmap, ex.rhs)
# end
is_variable(ex) = !istree(ex) || (operation(ex) == getindex)
function process(constraint)
new_constraints = []
constraint2 = value(constraint)
op = operation(constraint2)
args = arguments(constraint2)
lhs = args[1]
rhs = args[2]
intermediates_generated = false
if !is_variable(lhs)
lhs = ReversePropagation.cse_equations(lhs)
append!(new_constraints, lhs[1])
lhs = lhs[2]
end
if !is_variable(rhs)
rhs = ReversePropagation.cse_equations(rhs)
append!(new_constraints, rhs[1])
rhs = rhs[2]
end
# @show new_constraints
# @show op, lhs, rhs
push!(new_constraints, op(lhs, rhs))
return new_constraints
end
process(constraints::Vector) = reduce(vcat, process(constraint) for constraint in constraints)
function parse_constraint!(domains, ex)
expr = value(ex)
op = operation(expr)
new_constraints = []
if op == ∈ # assumes right-hand side is an explicit set specifying the doomain
args = arguments(expr)
var = args[1]
domain = args[2]
domains[var] = domain
else
new_constraints = process(expr)
println()
# @show expr
# @show new_constraints
end
return domains, new_constraints
end
function parse_constraints(constraints)
# domains = Dict(var => -Inf..Inf for var in vars)
additional_vars = []
domains = Dict()
all_new_constraints = [] # excluding domain constraints
for constraint in constraints
# binarize constraints:
domains, new_constraints = parse_constraint!(domains, value(constraint))
for statement in new_constraints
if statement isa Assignment
push!(additional_vars, statement.lhs)
end
end
append!(all_new_constraints, new_constraints)
end
# append!(additional_vars, keys(domains))
return domains, all_new_constraints, additional_vars
end
Base.isless(x::Sym, y::Sym) = isless(x.name, y.name)
Base.isless(x::Term, y::Term) = isless(string(x), string(y))
Base.isless(x::Term, y::Sym) = isless(string(x), string(y))
Base.isless(x::Sym, y::Term) = isless(string(x), string(y))
struct ConstraintSatisfactionProblem
original_vars
additional_vars # from binarizing
domains
constraints
end
function ConstraintSatisfactionProblem(constraints)
domains, new_constraints, additional_vars = parse_constraints(constraints)
@show keys(domains)
vars = sort(identity.(keys(domains)))
additional_vars = sort(identity.(additional_vars))
return ConstraintSatisfactionProblem(vars, additional_vars, domains, new_constraints)
end
struct DiscreteCSP
original_vars
additional_vars
constraints
varmap # maps symbolic variables to the corresponding DiscreteVariable
end
DiscreteCSP(constraints) = DiscreteCSP(ConstraintSatisfactionProblem(constraints))
function DiscreteCSP(prob::ConstraintSatisfactionProblem)
variables = []
varmap = Dict()
new_constraints = []
for (var, domain) in prob.domains
variable = DiscreteVariable(var, domain)
push!(variables, variable)
push!(varmap, var => variable)
end
for constraint in prob.constraints
constraint = value(constraint)
if constraint isa Assignment
lhs = constraint.lhs
op, new_args = parse_expression(varmap, constraint.rhs) # makes a NodeVariable
# @show op, new_args
variable = NodeVariable(op, new_args[1], new_args[2], lhs)
push!(variables, variable)
push!(varmap, lhs => variable)
else
# @show constraint
push!(new_constraints, parse_relation(varmap, constraint))
end
end
original_vars = [varmap[var] for var in prob.original_vars]
additional_vars = [varmap[var] for var in prob.additional_vars]
return DiscreteCSP(original_vars, additional_vars, new_constraints, varmap)
end
function encode(prob::DiscreteCSP)
all_variables = []
all_clauses = []
variables = Any[prob.original_vars; prob.additional_vars]
domains = Dict(v.name => v.domain for v in variables)
for var in variables
append!(all_variables, var.booleans)
append!(all_clauses, clauses(var))
end
# @show prob.constraints
for constraint in prob.constraints
# @show constraint
append!(all_clauses, encode(constraint))
end
# @show all_variables
# @show all_clauses
return SymbolicSATProblem(identity.(all_variables), identity.(all_clauses))
end
function solve(prob::DiscreteCSP)
symbolic_sat_problem = encode(prob)
status, result_dict = solve(symbolic_sat_problem)
(status == :unsat) && return status, missing
# variables = Any[prob.original_vars; prob.additional_vars]
variables = prob.original_vars
return status, decode(prob, result_dict)
end
decode(prob::DiscreteCSP, result_dict) = Dict(v.name => decode(result_dict, v) for v in prob.original_vars)
function all_solutions(prob::DiscreteCSP)
sat_problem = encode(prob)
# variables = Any[prob.original_vars; prob.additional_vars]
variables = prob.original_vars
prob2 = encode(prob)
prob3 = prob2.p # SATProblem
sat_solutions = all_solutions(prob3)
if isempty(sat_solutions)
return sat_solutions
end
solutions = []
for solution in sat_solutions
push!(solutions, decode(prob, decode(prob2, solution)))
end
return solutions
end
| [
27,
7856,
261,
480,
29,
54,
8608,
11882,
14,
50,
17403,
12455,
39317,
13,
20362,
628,
628,
628,
198,
7249,
45755,
6892,
341,
90,
46,
11,
311,
11,
309,
92,
198,
220,
220,
220,
2124,
3712,
50,
198,
220,
220,
220,
331,
3712,
51,
22... | 2.208495 | 4,638 |
<reponame>moeinkh88/FdeSolver.jl
using Test
using FdeSolver
@testset "FdeSolver.jl" begin
b = FdeSolver.greet()
@test @isdefined(b)
end
| [
27,
7856,
261,
480,
29,
76,
2577,
676,
71,
3459,
14,
37,
2934,
50,
14375,
13,
20362,
198,
3500,
6208,
198,
3500,
376,
2934,
50,
14375,
198,
198,
31,
9288,
2617,
366,
37,
2934,
50,
14375,
13,
20362,
1,
2221,
198,
220,
220,
220,
2... | 2.101449 | 69 |
<reponame>clintonTE/CCA<gh_stars>0
struct Characteristic <: AbstractCharacteristic
ws::WeightSpec
Fξ::Symbol
X::Dict{Symbol, Symbol}
end
#builds a characteristic object, which includes cross-sectional weights and
#weight coefficients
function Characteristic(ws::WeightSpec, Fξ::Symbol)
Fw = Symbol(:w, ws.weightprefix, Fξ)
#a couple of helper funtions for consistent renaming
lagname(s::Symbol) = Symbol(:L, s)
retname(s::Symbol) = Symbol(:R, s)
rettname(s::Symbol) = Symbol(:Rt, s)
#WARNING: HARDCODE ALERT!!!
X = Dict{Symbol, Symbol}()
X[:Fw] = Fw
X[:FLw] = lagname(X[:Fw])
X[:FLLw] = lagname(X[:FLw])
X[:FLLLw] = lagname(X[:FLLw])
#not used for now
X[:FRLw] = retname(X[:FLw])
X[:FRLLw] = retname(X[:FLLw])
X[:FRtw] = rettname(X[:Fw])
return Characteristic(ws, Fξ, X)
end
weightfields(ξ::Characteristic) = collect(values(ξ.X))
Characteristic(ξ::Characteristic, Fξ::Symbol, X::Dict
) = Characteristic(ξ.ws, Fξ, X)
demeanξ(ξraw::AbstractVector{<:Union{Real, Missing}}) = ξraw .- mean(skipmissing(ξraw))
#this gives the quantile fo each value in an array
#would have preferred the ecdf function, but it doesn't play nice with
#containers that can hold missing values, even if none are actually held
#=function quantiles(ξraw::AbstractVector{<:Union{Real,Missing}})
(length(ξraw) .- competerank(ξraw,rev=true) .+ 1)./length(ξraw)
end=#
function quantiles(ξraw::AbstractVector{<:Union{Real,Missing}})
(length(ξraw) .- competerank(ξraw,rev=true) .+ 1)./length(ξraw)
end
#alternate procedure
#=function quantiles(ξraw::AbstractVector{<:Union{Real,Missing}})
F = ecdf(ξraw |> Vector{Float64})
return map(F, ξraw)
end=#
#conditions each slice of the panel for a particular characteristic
function conditionξ!(F::Function, panel::AbstractDataFrame, Fξraw::Symbol, Fξ::Symbol)::Nothing
(sum((!ismissing).(panel[!, Fξ])) > 0) && error("$Fξ in panel already contains data")
spanels::GroupedDataFrame = groupby(view(panel, (!ismissing).(panel[!,Fξraw]), :), :date)
Threads.@threads for i ∈ 1:length(spanels)
spanel::SubDataFrame = spanels[i]
Fξsubcol::SubArray = spanel[!, Fξ]
Fξsubcol .= F(spanel[!, Fξraw])
end
return nothing
end
#=standard conditioning of a characteristic column
function conditionξ!(panel::AbstractDataFrame, Fξraw::Symbol;
prefix::Symbol = PARAM[:charprefix],
Fξ::Symbol = Symbol(prefix, Fξraw))::Nothing
#allocate the data
ξtype::Type = Union{eltype(panel[!,Fξraw]), Missing}
panel[!, Fξ] = missings(ξtype, size(panel,1))
conditionξ!(demeanξ, panel, Fξraw, Fξ)
return nothing
end
#apply a quantile transformation before demeaning
function conditionξquantile!(panel::AbstractDataFrame, Fξraw::Symbol;
prefix::Symbol = PARAM[:charprefix],
Fξ::Symbol = Symbol(prefix, Fξraw))::Nothing
#allocate the data
panel[!, Fξ] = Vector{Union{eltype(panel[!,Fξraw])}}(undef, size(panel,1))
conditionξ!(demeanξ ∘ quantiles, panel, Fξraw, Fξ)
return nothing
end=#
| [
27,
7856,
261,
480,
29,
37821,
9328,
14,
4093,
32,
27,
456,
62,
30783,
29,
15,
198,
198,
7249,
15684,
2569,
1279,
25,
27741,
27275,
2569,
198,
220,
266,
82,
3712,
25844,
22882,
198,
220,
376,
138,
122,
3712,
13940,
23650,
198,
220,
... | 2.51602 | 1,186 |
# licor_split.jl
#
# Split Li-7200 TXT files into GHG archives
#
# <NAME>
# Thünen Institut
# Institut für Agrarklimaschutz
# Junior Research Group NITROSPHERE
# Julia 0.7
# 20.05.2014
# Last Edit: 12.04.2019
"# licor_split(source::String,destination::String;verbose::Bool=false)
Split an Li-7200 text file into small files in the standard GHG format. If possible, files will start on the usual four hour marks (0000, 0400, 0800, etc.).
`licor_split(source,destination)` Split Li7200 files in the source directors\n
* **source**::String = Directory of Li-7200 text files with names similar to 2016-03-17T104500.txt
* **destination**::String = Destination directory\n\n
---\n
#### Keywords:\n
* verbose::Bool = Display information as the function runs, TRUE is default
\n\n"
function licor_split(Dr::String,Dest::String;verbose::Bool=true)
###################
## Definitions ##
###################
# Dr = Source Directory
# Dest = Destination Directory
####################
## Parse Inputs ##
####################
# Check Directory Validity
if ~isdir(Dr) | ~isdir(Dest)
error("Source or destination directory does not exist")
end
(files,folders) = dirlist(Dr,regex=r"\d{4}-\d{2}-\d{2}T\d{6}\.txt$",recur=1) # List TXT files
#####################
## Process Files ##
#####################
if isempty(files)
println("No Metdata files to split")
else
if verbose
println("=== Splitting Metdata Files ===")
end
end
time = DateTime[]
starttime = DateTime[]
endtime = DateTime[]
fname = String[]
header = Array{String}(undef,0) # Initialized header array
sentinel = 20; # Sentinel value for header while loop
for i=1:1:length(files)
if verbose
println(" " * files[i])
end
s = stat(files[i]) # Get file info
fid = open(files[i],"r")
# Load Header
j = 1;
l = "Bierchen" # Needs any string at least 5 characters long
while l[1:5] != "DATAH" && j < sentinel
l = readline(fid,keep=true)
header = [header;l]
j += 1
end
data_pos = position(fid) # Position of where the data starts
#Get Start Time
l = readline(fid,keep=true)
ft = findall(x -> x == '\t',l)
starttime = DateTime(l[ft[6]+1:ft[8]-1],"yyyy-mm-dd\tHH:MM:SS:sss")
# Get End Time
endline = true
lastpos = 4 # Characters from the end of the file where the last line begins
while endline == true
seek(fid,s.size-lastpos)
l = readline(fid,keep=true)
if eof(fid) == false
endline = false
else
lastpos += 2
end
end
l = readline(fid,keep=true)
ft = findall(x -> x == '\t',l)
endtime = DateTime(l[ft[6]+1:ft[8]-1],"yyyy-mm-dd\tHH:MM:SS:sss")
# Split the File
seek(fid,data_pos) # Move the cursor to the start of the data
fid2 = open(files[i]);close(fid2)
next_start = []
first_file = true
while eof(fid) == false
if isopen(fid2) == false
# No output stream available
# Open new output file from previous line
if first_file == true
# There is not previously loaded line, load one now
l = readline(fid,keep=true)
first_file = false
end
# Calculate last value of file
ft = findall(x -> x == '\t',l)
temp_start = DateTime(l[ft[6]+1:ft[8]-1],"yyyy-mm-dd\tHH:MM:SS:sss")
HH2 = Dates.Hour[]
DD2 = Dates.Day(temp_start)
if Dates.Hour(0) <= Dates.Hour(temp_start) < Dates.Hour(4)
HH2 = Dates.Hour(4)
elseif Dates.Hour(4) <= Dates.Hour(temp_start) < Dates.Hour(8)
HH2 = Dates.Hour(8)
elseif Dates.Hour(8) <= Dates.Hour(temp_start) < Dates.Hour(12)
HH2 = Dates.Hour(12)
elseif Dates.Hour(12) <= Dates.Hour(temp_start) < Dates.Hour(16)
HH2 = Dates.Hour(16)
elseif Dates.Hour(16) <= Dates.Hour(temp_start) < Dates.Hour(20)
HH2 = Dates.Hour(20)
elseif Dates.Hour(20) <= Dates.Hour(temp_start) < Dates.Hour(24)
HH2 = Dates.Hour(0)
DD2 = DD2 + Dates.Day(1)
end
next_start = DateTime(Dates.Year(temp_start),Dates.Month(temp_start),DD2,HH2)
# Generate File Name
for j=1:1:length(header)
if occursin(r"^Instrument:",header[j])
fname = joinpath(Dest,Dates.format(temp_start,"yyyy-mm-ddTHHMMSS") * "_" * header[j][13:end-1] * ".data")
end
end
if verbose
println("\t\t- " * fname)
end
fid2 = open(fname,"w+")
# Find and replace the Timestamp header line
for j=1:1:length(header);
if occursin(r"^Timestamp",header[j])
header[j] = "Timestamp: " * Dates.format(temp_start,"yyyy-mm-dd HH:MM:SS:sss") * "\n"
end
end
# Iteratively write the header
for j=1:1:length(header)
write(fid2,header[j])
end
write(fid2,l) # Write the line used to create the file name
else
# An output stream is available
l = readline(fid,keep=true) # Load in another line
# Write or Close
ft = findall(x -> x == '\t',l)
temp_end = DateTime(l[ft[6]+1:ft[8]-1],"yyyy-mm-dd\tHH:MM:SS:sss")
if temp_end >= next_start
# The current line is newer than the start of the next file, close the current file
close(fid2)
# Zipped File Name (minus extension)
fzip = splitext(fname)[1]
# Zip File
if verbose
println("\t\t\tCompressing...")
end
fzip = fzip * ".ghg"
ziptextfiles(fzip,fname) # Zip file
rm(fname)
else
# Still within range, write the current line
write(fid2,l)
end
end
end
close(fid2)
close(fid)
# Zipped File Name (minus extension)
fzip = splitext(fname)[1]
# Zip the final file
if verbose
println("\t\t\tCompressing...")
end
fzip = fzip * ".ghg"
fzip2 = fzip * ".ghg"
ziptextfiles(fzip,fname) # Zip file
rm(fname)
end
if verbose
println("Complete")
end
end
| [
2,
3476,
273,
62,
35312,
13,
20362,
201,
198,
2,
201,
198,
2,
220,
220,
27758,
7455,
12,
22,
2167,
15326,
51,
3696,
656,
24739,
38,
22415,
201,
198,
2,
201,
198,
2,
1279,
20608,
29,
201,
198,
2,
536,
9116,
38572,
37931,
315,
201... | 2.098745 | 3,028 |
function CircularFieldOfView(arg0::Vector3D, arg1::jdouble, arg2::jdouble)
return CircularFieldOfView((Vector3D, jdouble, jdouble), arg0, arg1, arg2)
end
function get_center(obj::SmoothFieldOfView)
return jcall(obj, "getCenter", Vector3D, ())
end
function get_footprint(obj::SmoothFieldOfView, arg0::Transform, arg1::OneAxisEllipsoid, arg2::jdouble)
return jcall(obj, "getFootprint", List, (Transform, OneAxisEllipsoid, jdouble), arg0, arg1, arg2)
end
function get_half_aperture(obj::CircularFieldOfView)
return jcall(obj, "getHalfAperture", jdouble, ())
end
function get_x(obj::SmoothFieldOfView)
return jcall(obj, "getX", Vector3D, ())
end
function get_y(obj::SmoothFieldOfView)
return jcall(obj, "getY", Vector3D, ())
end
function get_z(obj::SmoothFieldOfView)
return jcall(obj, "getZ", Vector3D, ())
end
function offset_from_boundary(obj::CircularFieldOfView, arg0::Vector3D, arg1::jdouble, arg2::VisibilityTrigger)
return jcall(obj, "offsetFromBoundary", jdouble, (Vector3D, jdouble, VisibilityTrigger), arg0, arg1, arg2)
end
function project_to_boundary(obj::CircularFieldOfView, arg0::Vector3D)
return jcall(obj, "projectToBoundary", Vector3D, (Vector3D,), arg0)
end
| [
8818,
7672,
934,
15878,
5189,
7680,
7,
853,
15,
3712,
38469,
18,
35,
11,
1822,
16,
3712,
73,
23352,
11,
1822,
17,
3712,
73,
23352,
8,
198,
220,
220,
220,
1441,
7672,
934,
15878,
5189,
7680,
19510,
38469,
18,
35,
11,
474,
23352,
11... | 2.65 | 460 |
<reponame>rawrgrr/LSystems.jl<filename>examples/draw_dragon.jl
using Cairo
using LSystems
c = CairoRGBSurface(485, 720);
cr = CairoContext(c);
save(cr);
set_source_rgb(cr, 1.0, 1.0, 1.0);
rectangle(cr, 0.0, 0.0, 485.0, 720.0);
fill(cr);
restore(cr);
save(cr);
x, y, a, d = 310., 560., 90, 15
set_line_width(cr, 1);
move_to(cr, x, y);
dragon_start = "A"
dragon_trans = Dict([
('A', "A+BF"),
('B', "FA-B")
])
function determine_new_position(x, y, a, d)
x -= sind(a) * d
y -= cosd(a) * d
return x, y
end
level = 10
drawing_instructions = @task dol_system(dragon_start, dragon_trans, level)
for v in drawing_instructions
if v == 'F'
x, y = determine_new_position(x, y, a, d)
line_to(cr, x, y)
elseif v == '+'
a -= 90
elseif v == '-'
a += 90
elseif v == '['
push!(s, [x, y, a])
elseif v == ']'
x, y, a = pop!(s)
move_to(cr, x, y)
end
end
stroke(cr);
write_to_png(c, "dragon_$level.png");
| [
27,
7856,
261,
480,
29,
1831,
81,
2164,
81,
14,
6561,
6781,
82,
13,
20362,
27,
34345,
29,
1069,
12629,
14,
19334,
62,
14844,
13,
20362,
198,
3500,
23732,
198,
3500,
406,
11964,
82,
198,
198,
66,
796,
23732,
48192,
4462,
333,
2550,
... | 1.973948 | 499 |
<filename>src/convert.jl<gh_stars>1-10
###########
## asArr ##
###########
## asArr: try conversion to Array, implicitly dealing with NAs
##
## using DataFrames, columns may be of different type and hence are
## possibly not compatible with each other as columns of an array
function asArr{T}(da::DataArray{T, 1}, typ::Type=Any,
replaceNA=NA)
## convert DataArray to typ and replace NAs with replaceNA
vals = convert(Array{typ, 1}, da.data)
vals[da.na] = replaceNA
return vals
end
function asArr(df::DataFrame, typ::Type=Any,
replaceNA=NA)
nObs, nAss = size(df)
vals = Array(typ, nObs, nAss)
## copying into this array implicitly forces conversion to this
## type afterwards
for ii=1:nAss
if isa(df.columns[ii], DataArray) # deal with NAs
if eltype(df.columns[ii]) == NAtype # column of NAs only
vals[:, ii] = replaceNA
else
vals[:, ii] = df.columns[ii].data
vals[df.columns[ii].na, ii] = replaceNA
end
else # no NAs in simple Array
vals[:, ii] = df.columns[ii]
end
end
return vals
end
function asArr(dfr::DataFrameRow, typ::Type=Any,
replaceNA=NA)
nObs = length(dfr)
res = Array(typ, 1, nObs)
for ii=1:nObs
if isna(dfr[ii])
res[1, ii] = replaceNA
else
res[1, ii] = dfr[ii]
end
end
return res
end
function asArr(tn::AbstractTimedata, typ::Type=Any,
replaceNA=NA)
return asArr(tn.vals, typ, replaceNA)
end
## problem: columns with NAs only
## problem: NaN in boolean context will be true
## problem: DataArray{Any, 1} also exist: da.data then contains NAs
#############
## Timenum ##
#############
import Base.convert
## to Timedata: conversion upwards - always works
function convert(::Type{Timedata}, tn::Timenum)
return Timedata(tn.vals, tn.idx)
end
## to Timematr: conversion downwards - fails for NAs
function convert(::Type{Timematr}, tn::Timenum, rmNA = false)
if rmNA
tm = narm(tn)
tm = Timematr(tm.vals, tm.idx)
else
tm = Timematr(tn.vals, tn.idx)
end
return tm
end
##############
## Timematr ##
##############
## to Timedata: conversion upwards - always works
function convert(::Type{Timedata}, tm::Timematr)
Timedata(tm.vals, tm.idx)
end
## to Timenum: conversion upwards - always works
function convert(::Type{Timenum}, tm::Timematr)
Timenum(tm.vals, tm.idx)
end
##############
## Timedata ##
##############
## to Timenum: conversion downwards - fails for non-numeric values
function convert(::Type{Timenum}, td::Timedata)
Timenum(td.vals, td.idx)
end
## to Timematr: conversion downwards - fails for NAs
function convert(::Type{Timematr}, td::Timedata)
Timematr(td.vals, td.idx)
end
################
## DataFrames ##
################
## convert DataFrame with dates column (as String) to TimeData object
function convert(::Type{AbstractTimedata}, df::DataFrame)
## test if some column already is of type Date
if any(eltypes(df) .== Date)
## take first occuring column
dateCol = find(eltypes(df) .== Date)[1]
idx = convert(Array, df[dateCol])
delete!(df, [dateCol])
else ## find column that contain dates as String
# find columns that have been parsed as Strings by readtable
col_to_test = Array(Symbol, 0)
nCols = size(df, 2)
for ii=1:nCols
isa(df[1, ii], String)?
push!(col_to_test, names(df)[ii]):
nothing
end
# test each column's data to see if Datetime will parse it
col_that_pass = Array(Symbol, 0)
for colname in col_to_test
d = match(r"[-|\s|\/|.]", df[1, colname])
d !== nothing? (bar = split(df[1, colname], d.match)): (bar = [])
if length(bar) == 3
push!(col_that_pass, colname)
end
end
# parse first column that passes the Datetime regex test
idx = Date[Date(d) for d in df[col_that_pass[1]]] # without
# Date it would fail chkIdx
# in constructor
delete!(df, [col_that_pass[1]])
end
## try whether DataFrame fits subtypes
try
td = Timematr(df, idx)
return td
catch
try
td = Timenum(df, idx)
return td
catch
td = Timedata(df, idx)
return td
end
end
return td
end
################
## TimeArrays ##
################
function convert(::Type{AbstractTimedata}, ta::TimeArray)
namesAsSymbols = [DataFrames.identifier(nam) for nam in ta.colnames]
df = composeDataFrame(ta.values, namesAsSymbols)
idx = ta.timestamp
try
td = Timematr(df, idx)
return td
catch
try
td = Timenum(df, idx)
return td
catch
td = Timedata(df, idx)
return td
end
end
td
end
function convert(::Type{TimeArray}, tn::AbstractTimenum)
dats = idx(tn)
nams = UTF8String[string(names(tn)[ii]) for ii = 1:size(tn, 2)]
return TimeArray(dats, asArr(tn, Float64, NaN), nams)
end
| [
27,
34345,
29,
10677,
14,
1102,
1851,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
7804,
21017,
198,
2235,
355,
3163,
81,
22492,
198,
7804,
21017,
198,
198,
2235,
355,
3163,
81,
25,
1949,
11315,
284,
15690,
11,
31821,
7219,
... | 2.176447 | 2,454 |
# SINGLE NODE/CORE VERSION
import Pkg; Pkg.instantiate()
include("/users/yh31/scratch/projects/gigaword_64k/src/gigaword_64k.jl")
const path_apw_dir = "/users/yh31/scratch/datasets/entity_linking/raw_data/gigaword/giga5/data/apw_eng"
const path_intermed_data = "/users/yh31/scratch/datasets/entity_linking/raw_data/gigaword/giga_topk/intermediate_files"
const path_intermed_apw = joinpath(path_intermed_data, "apw")
process_part_of_tree(path_apw_dir, path_intermed_apw, 4)
| [
2,
311,
2751,
2538,
399,
16820,
14,
34,
6965,
44156,
2849,
198,
198,
11748,
350,
10025,
26,
350,
10025,
13,
8625,
415,
9386,
3419,
198,
198,
17256,
7203,
14,
18417,
14,
88,
71,
3132,
14,
1416,
36722,
14,
42068,
14,
70,
328,
707,
5... | 2.274882 | 211 |
<gh_stars>1-10
# fundamental types (expanded below)
# nodes in the AST that describe the grammar. all Matcher instances must be
# mutable and have an attribute
# name::Symbol
# which is set automatically to the matcher type by the constructor.
# (re-set to a more useful type inside with_names() - see names.jl)
abstract type Matcher end
abstract type Message end # data sent between trampoline and methods
abstract type State end # state associated with Matchers during evaluation
# used to configure the parser. all Config subtypes must have associated
# dispatch functions (see parser.jl), a parent() function, and have a
# constructor that takes the source as first argument and additional arguments
# as keywords. the type of the source is exposed and if it's a subclass of
# string then the iterator is assumed to be a simple integer index.
abstract type Config{S,I} end
# important notes on mutability / hash / equality
# 1 - immutable types in julia are not "just" immutable. they are
# effectively values - they are passed by value. so do not use
# "immutable" just because the data should not change. think about
# details.
# 2 - immutable types have an automatic equality and hash based on
# content (which is copied). mutable types have an automatic equality
# and hash based on address. so default hash and equality for
# immutable types that contain mutable types, and for mutable types,
# may not be what is required.
# 3 - caching within the parser REQUIRES that both Matcher and State
# instances have 'useful' equality and hash values.
# 4 - for matchers, which are created when the grammar is defined, and
# then unchanged, the default hash and equality are likely just fine,
# even for mutable objects (in fact, mutable may be slightly faster
# since equality is just a comparison of an Int64 address,
# presumably).
# 5 - for states, which often includes mutable result objects, more
# care is needed:
# 5a - whether or not State instances are mutable or immutable, they,
# and their contents, must not change during matching. so all arrays,
# for example, must be copied when new instances are created with
# different values.
# 5b - structurally identical states must be equal, and hash equally.
# this is critical for efficient caching. so it it likely that
# custom hash and equality methods will be needed (see above and
# auto.jl).
# defaults for mismatching types and types with no content
==(a::Matcher, b::Matcher) = false
==(a::T, b::T) where {T<:Matcher} = true
==(a::State, b::State) = false
==(a::T, b::T) where {T<:State} = true
# use an array to handle empty values in a natural way
const Value = Vector{Any}
const EMPTY = Any[]
function flatten(x::Array{Value,1}) where {Value}
y::Value = vcat(x...)
return y
end
# Message sub-types
# use mutable types here since they are packed and unpacked often
# parent and parent_state are popped from the stack. a call is made to
# success(config, parent, parent_state, child_state, iter, result)
struct Success{CS<:State,I}<:Message
child_state::CS # parent to store, passed in next call for backtracking
iter::I # advanced as appropriate
result::Value # possibly empty
end
# parent and parent_state are popped from the stack. a call is made to
# failure(config, parent, parent_state)
struct Failure<:Message end
const FAILURE = Failure()
# parent and parent_state are pushed to the stack. a call is made to
# execute(config, child, child_state, iter)
struct Execute{I}<:Message
parent::Matcher # stored by trampoline, added to response
parent_state::State # stored by trampoline, added to response
child::Matcher # the matcher to evaluate
child_state::State # needed by for evaluation (was stored by parent)
iter::I
end
# State sub-types
# use immutable types because these are simple, magic values
# the state used on first call
struct Clean<:State end
const CLEAN = Clean()
# the state used when no further calls should be made
struct Dirty<:State end
const DIRTY = Dirty()
# other stuff
# user-generated errors (ie bad input, etc).
# internal errors in the library (bugs) may raise Error
struct ParserException<:Exception
msg
end
# this cannot be cached (thrown by hash())
struct CacheException<:Exception end
# this is equivalent to a matcher returning Failure. used when source
# information is not available.
abstract type FailureException<:Exception end
const Applicable = Union{Function, DataType}
| [
27,
456,
62,
30783,
29,
16,
12,
940,
628,
198,
2,
7531,
3858,
357,
11201,
12249,
2174,
8,
198,
198,
2,
13760,
287,
262,
29273,
326,
6901,
262,
23491,
13,
220,
477,
6550,
2044,
10245,
1276,
307,
198,
2,
4517,
540,
290,
423,
281,
... | 3.573007 | 1,267 |
<reponame>tomerarnon/julia-1
import Base: ==
mutable struct Point{T<:Number}
x::T
y::T
end
==(p::Point, q::Point) = p.x == q.x && p.y == q.y
@enum Heading NORTH=1 EAST=2 SOUTH=3 WEST=4
mutable struct Robot
position::Point
heading::Heading
end
function Robot(position::Tuple{T, T}, heading::Heading) where T<:Number
Robot(Point(position...), heading)
end
heading(r::Robot) = r.heading
position(r::Robot) = r.position
function turn_right!(r::Robot)
r.heading = Heading(mod1(Int(r.heading) + 1, 4))
r
end
function turn_left!(r::Robot)
r.heading = Heading(mod1(Int(r.heading) - 1, 4))
r
end
function advance!(r::Robot)
if heading(r) == NORTH
r.position.y += 1
elseif heading(r) == SOUTH
r.position.y -= 1
elseif heading(r) == EAST
r.position.x += 1
else
r.position.x -= 1
end
r
end
function move!(r::Robot, instructions::AbstractString)
moves = Dict('A' => advance!, 'R' => turn_right!, 'L' => turn_left!)
for move in instructions
move in keys(moves) && moves[move](r)
end
r
end
| [
27,
7856,
261,
480,
29,
83,
12057,
1501,
261,
14,
73,
43640,
12,
16,
198,
11748,
7308,
25,
6624,
198,
198,
76,
18187,
2878,
6252,
90,
51,
27,
25,
15057,
92,
198,
220,
220,
220,
2124,
3712,
51,
198,
220,
220,
220,
331,
3712,
51,
... | 2.251534 | 489 |
# ================================================================================================
# Main API
using FixedEffectModels
@reexport using FixedEffectModels
import StatsModels: width, apply_schema
anova(trms::Vararg{TableRegressionModel{<: FixedEffectModel}};
test::Type{<: GoodnessOfFit} = FTest,
kwargs...) =
anova(test, trms...; kwargs...)
# ================================================================================================
# ANOVA by F test
function anova(::Type{FTest},
trm::TableRegressionModel{<: FixedEffectModel};
type::Int = 1, kwargs...)
type == 2 && throw(ArgumentError("Type 2 anova is not implemented"))
type in [1, 2, 3] || throw(ArgumentError("Invalid type"))
assign = trm.mm.assign
df = dof(assign)
filter!(>(0), df)
# May exist some floating point error from dof_residual
push!(df, round(Int, dof_residual(trm)))
df = tuple(df...)
if type in [1, 3]
# vcov methods
varβ = vcov(trm)
β = trm.model.coef
if type == 1
fs = abs2.(cholesky(Hermitian(inv(varβ))).U * β)
offset = first(assign) - 1
fstat = ntuple(last(assign) - offset) do fix
sum(fs[findall(==(fix + offset), assign)]) / df[fix]
end
else
# calculate block by block
offset = first(assign) - 1
fstat = ntuple(last(assign) - offset) do fix
select = findall(==(fix + offset), assign)
β[select]' * (varβ[select, select] \ β[select]) / df[fix]
end
end
σ² = rss(trm.model) / last(df)
devs = (fstat .* σ²..., σ²) .* df
end
pvalue = (ccdf.(FDist.(df[1:end - 1], last(df)), abs.(fstat))..., NaN)
AnovaResult{FTest}(trm, type, df, devs, (fstat..., NaN), pvalue, NamedTuple())
end
# =================================================================================================================
# Nested models
function anova(::Type{FTest},
trms::Vararg{TableRegressionModel{<: FixedEffectModel}};
check::Bool = true,
isnested::Bool = false)
df = dof.(trms)
ord = sortperm(collect(df))
df = df[ord]
trms = trms[ord]
# check comparable and nested
check && @warn "Could not check whether models are nested: results may not be meaningful"
Δdf = _diff(df)
# May exist some floating point error from dof_residual
dfr = round.(Int, dof_residual.(trms))
dev = ntuple(length(trms)) do i
trms[i].model.rss
end
msr = _diffn(dev) ./Δdf
σ² = last(dev) / last(dfr)
fstat = msr ./ σ²
pval = map(zip(Δdf, dfr[2:end], fstat)) do (dof, dofr, fs)
fs > 0 ? ccdf(FDist(dof, dofr), fs) : NaN
end
AnovaResult{FTest}(trms, 1, df, dev, (NaN, fstat...), (NaN, pval...), NamedTuple())
end
"""
lfe(formula::FormulaTerm, df, vcov::CovarianceEstimator = Vcov.simple(); kwargs...)
Fit a `FixedEffectModel` and wrap it into `TableRegressionModel`.
!!! warn
This function currently does not perform well. It re-compiles everytime; may be due to `@nonspecialize` for parameters of `reg`.
"""
lfe(formula::FormulaTerm, df, vcov::CovarianceEstimator = Vcov.simple(); kwargs...) =
to_trm(reg(df, formula, vcov; kwargs...), df)
"""
to_trm(model, df)
Wrap fitted `FixedEffectModel` into `TableRegressionModel`.
"""
function to_trm(model::FixedEffectModel, df)
f = model.formula
has_fe_intercept = any(fe_intercept(f))
rhs = vectorize(f.rhs)
f = isa(first(rhs), ConstantTerm) ? f : FormulaTerm(f.lhs, (ConstantTerm(1), rhs...))
s = schema(f, df, model.contrasts)
f = apply_schema(f, s, FixedEffectModel, has_fe_intercept)
mf = ModelFrame(f, s, Tables.columntable(df[!, getproperty.(keys(s), :sym)]), FixedEffectModel)
# Fake modelmatrix
assign = asgn(f)
has_fe_intercept && popfirst!(assign)
mm = ModelMatrix(ones(Float64, 1, 1), assign)
TableRegressionModel(model, mf, mm)
end
# =================================================================================================================================
# Fit new models
"""
anova_lfe(f::FormulaTerm, df, vcov::CovarianceEstimator = Vcov.simple();
test::Type{<: GoodnessOfFit} = FTest, <keyword arguments>)
anova_lfe(test::Type{<: GoodnessOfFit}, f::FormulaTerm, df, vcov::CovarianceEstimator = Vcov.simple(); <keyword arguments>)
ANOVA for fixed-effect linear regression.
* `vcov`: estimator of covariance matrix.
* `type`: type of anova.
`anova_lfe` generate a `TableRegressionModel{<: FixedEffectModel}`.
"""
anova_lfe(f::FormulaTerm, df, vcov::CovarianceEstimator = Vcov.simple();
test::Type{<: GoodnessOfFit} = FTest,
kwargs...)=
anova(test, FixedEffectModel, f, df, vcov; kwargs...)
anova_lfe(test::Type{<: GoodnessOfFit}, f::FormulaTerm, df, vcov::CovarianceEstimator = Vcov.simple(); kwargs...) =
anova(test, FixedEffectModel, f, df, vcov; kwargs...)
function anova(test::Type{<: GoodnessOfFit}, ::Type{FixedEffectModel}, f::FormulaTerm, df, vcov::CovarianceEstimator = Vcov.simple();
type::Int = 1,
kwargs...)
trm = to_trm(reg(df, f, vcov; kwargs...), df)
anova(test, trm; type)
end
| [
2,
38093,
4770,
25609,
18604,
198,
2,
8774,
7824,
198,
198,
3500,
10832,
18610,
5841,
1424,
198,
31,
631,
87,
634,
1262,
10832,
18610,
5841,
1424,
198,
11748,
20595,
5841,
1424,
25,
9647,
11,
4174,
62,
15952,
2611,
198,
198,
40993,
7,... | 2.387068 | 2,227 |
<reponame>ericphanson/Diagonalizations.jl
using Diagonalizations, LinearAlgebra, PosDefManifold, Test
# Method (1) real
t, n, k=10, 20, 10
Xset = [genDataMatrix(t, n) for i = 1:k]
Xfixed=randn(t, n)./1
for i=1:length(Xset) Xset[i]+=Xfixed end
C1=Hermitian( mean((X'*X)/t for X∈Xset) )
C2=Hermitian( mean((X*X')/n for X∈Xset) )
Xbar=mean(Xset)
c=cstp(Xbar, C1, C2; simple=true)
@test c.F[1]'*C2*c.F[1]≈I
@test c.F[2]'*C1*c.F[2]≈I
Z=c.F[1]'*Xbar*c.F[2]
n=minimum(size(Z))
@test norm(Z[1:n, 1:n]-Diagonal(Z[1:n, 1:n]))+1. ≈ 1.
cX=cstp(Xset; simple=true)
@test c==cX
# Method (1) complex
t, n, k=10, 20, 10
Xcset = [genDataMatrix(ComplexF64, t, n) for i = 1:k]
Xcfixed=randn(ComplexF64, t, n)./1
for i=1:length(Xcset) Xcset[i]+=Xcfixed end
C1c=Hermitian( mean((Xc'*Xc)/t for Xc∈Xcset) )
C2c=Hermitian( mean((Xc*Xc')/n for Xc∈Xcset) )
Xcbar=mean(Xcset)
cc=cstp(Xcbar, C1c, C2c; simple=true)
@test cc.F[1]'*C2c*cc.F[1]≈I
@test cc.F[2]'*C1c*cc.F[2]≈I
Zc=cc.F[1]'*Xcbar*cc.F[2]
n=minimum(size(Zc))
@test norm(Zc[1:n, 1:n]-Diagonal(Zc[1:n, 1:n]))+1. ≈ 1.
cXc=cstp(Xcset; simple=true)
@test cc==cXc
# Method (2) real
c=cstp(Xset)
# ... selecting subspace dimension allowing an explained variance = 0.9
c=cstp(Xset; eVar=0.9)
# ... giving weights `w` to the covariance matrices
c=cstp(Xset; w=abs2.(randn(k)), eVar=0.9)
# ... subtracting the means
c=cstp(Xset; meanXd₁=nothing, meanXd₂=nothing, w=abs2.(randn(k)), eVar=0.9)
# explained variance
c.eVar
# name of the filter
c.name
using Plots
# plot the original covariance matrices and the transformed counterpart
c=cstp(Xset)
C1Max=maximum(abs.(C1));
h1 = heatmap(C1, clim=(-C1Max, C1Max), title="C1", yflip=true, c=:bluesreds);
D1=c.F[1]'*C2*c.F[1];
D1Max=maximum(abs.(D1));
h2 = heatmap(D1, clim=(0, D1Max), title="F[1]'*C2*F[1]", yflip=true, c=:amp);
C2Max=maximum(abs.(C2));
h3 = heatmap(C2, clim=(-C2Max, C2Max), title="C2", yflip=true, c=:bluesreds);
D2=c.F[2]'*C1*c.F[2];
D2Max=maximum(abs.(D2));
h4 = heatmap(D2, clim=(0, D2Max), title="F[2]'*C1*F[2]", yflip=true, c=:amp);
XbarMax=maximum(abs.(Xbar));
h5 = heatmap(Xbar, clim=(-XbarMax, XbarMax), title="Xbar", yflip=true, c=:bluesreds);
DX=c.F[1]'*Xbar*c.F[2];
DXMax=maximum(abs.(DX));
h6 = heatmap(DX, clim=(0, DXMax), title="F[1]'*Xbar*F[2]", yflip=true, c=:amp);
📈=plot(h1, h3, h5, h2, h4, h6, size=(800,400))
# savefig(📈, homedir()*"\\Documents\\Code\\julia\\Diagonalizations\\docs\\src\\assets\\FigCSTP.png")
# Method (2) complex
cc=cstp(Xcset)
# ... selecting subspace dimension allowing an explained variance = 0.9
cc=cstp(Xcset; eVar=0.9)
# ... giving weights `w` to the covariance matrices
cc=cstp(Xcset; w=abs2.(randn(k)), eVar=0.9)
# ... subtracting the mean
cc=cstp(Xcset; meanXd₁=nothing, meanXd₂=nothing,
w=abs2.(randn(k)), eVar=0.9)
# explained variance
c.eVar
# name of the filter
c.name
| [
27,
7856,
261,
480,
29,
35626,
746,
23103,
14,
18683,
27923,
4582,
13,
20362,
198,
3500,
6031,
27923,
4582,
11,
44800,
2348,
29230,
11,
18574,
7469,
5124,
361,
727,
11,
6208,
198,
198,
2,
11789,
357,
16,
8,
1103,
198,
83,
11,
299,
... | 1.949931 | 1,458 |
<reponame>lindahua/SparseExtensions.jl
using SparseVectors
using Base.Test
import SparseVectors: exact_equal
### Data
rnd_x0 = sprand(50, 0.6)
rnd_x0f = full(rnd_x0)
rnd_x1 = sprand(50, 0.7) * 4.0
rnd_x1f = full(rnd_x1)
spv_x1 = SparseVector(8, [2, 5, 6], [1.25, -0.75, 3.5])
_x2 = SparseVector(8, [1, 2, 6, 7], [3.25, 4.0, -5.5, -6.0])
spv_x2 = view(_x2)
### Arithmetic operations
let x = spv_x1, x2 = spv_x2
# negate
@test exact_equal(-x, SparseVector(8, [2, 5, 6], [-1.25, 0.75, -3.5]))
# abs and abs2
@test exact_equal(abs(x), SparseVector(8, [2, 5, 6], abs([1.25, -0.75, 3.5])))
@test exact_equal(abs2(x), SparseVector(8, [2, 5, 6], abs2([1.25, -0.75, 3.5])))
# plus and minus
xa = SparseVector(8, [1,2,5,6,7], [3.25,5.25,-0.75,-2.0,-6.0])
@test exact_equal(x + x, x * 2)
@test exact_equal(x + x2, xa)
@test exact_equal(x2 + x, xa)
xb = SparseVector(8, [1,2,5,6,7], [-3.25,-2.75,-0.75,9.0,6.0])
@test exact_equal(x - x, SparseVector(8, Int[], Float64[]))
@test exact_equal(x - x2, xb)
@test exact_equal(x2 - x, -xb)
@test full(x) + x2 == full(xa)
@test full(x) - x2 == full(xb)
@test x + full(x2) == full(xa)
@test x - full(x2) == full(xb)
# multiplies
xm = SparseVector(8, [2, 6], [5.0, -19.25])
@test exact_equal(x .* x, abs2(x))
@test exact_equal(x .* x2, xm)
@test exact_equal(x2 .* x, xm)
@test full(x) .* x2 == full(xm)
@test x .* full(x2) == full(xm)
# max & min
@test exact_equal(max(x, x), x)
@test exact_equal(min(x, x), x)
@test exact_equal(max(x, x2),
SparseVector(8, Int[1, 2, 6], Float64[3.25, 4.0, 3.5]))
@test exact_equal(min(x, x2),
SparseVector(8, Int[2, 5, 6, 7], Float64[1.25, -0.75, -5.5, -6.0]))
end
### Complex
let x = spv_x1, x2 = spv_x2
# complex
@test exact_equal(complex(x, x),
SparseVector(8, [2,5,6], [1.25+1.25im, -0.75-0.75im, 3.5+3.5im]))
@test exact_equal(complex(x, x2),
SparseVector(8, [1,2,5,6,7], [3.25im, 1.25+4.0im, -0.75+0.im, 3.5-5.5im, -6.0im]))
@test exact_equal(complex(x2, x),
SparseVector(8, [1,2,5,6,7], [3.25+0.im, 4.0+1.25im, -0.75im, -5.5+3.5im, -6.0+0.im]))
# real & imag
@test is(real(x), x)
@test exact_equal(imag(x), sparsevector(Float64, length(x)))
xcp = complex(x, x2)
@test exact_equal(real(xcp), x)
@test exact_equal(imag(xcp), x2)
end
### Zero-preserving math functions: sparse -> sparse
function check_nz2z_z2z{T}(f::Function, x::SparseVector{T}, xf::Vector{T})
R = typeof(f(zero(T)))
r = f(x)
isa(r, AbstractSparseVector) || error("$f(x) is not a sparse vector.")
eltype(r) == R || error("$f(x) results in eltype = $(eltype(r)), expect $R")
all(r.nzval .!= 0) || error("$f(x) contains zeros in nzval.")
full(r) == f(xf) || error("Incorrect results found in $f(x).")
end
for f in [floor, ceil, trunc, round]
check_nz2z_z2z(f, rnd_x1, rnd_x1f)
end
for f in [log1p, expm1,
sin, tan, sinpi, sind, tand,
asin, atan, asind, atand,
sinh, tanh, asinh, atanh]
check_nz2z_z2z(f, rnd_x0, rnd_x0f)
end
### Non-zero-preserving math functions: sparse -> dense
function check_z2nz{T}(f::Function, x::SparseVector{T}, xf::Vector{T})
R = typeof(f(zero(T)))
r = f(x)
isa(r, Vector) || error("$f(x) is not a dense vector.")
eltype(r) == R || error("$f(x) results in eltype = $(eltype(r)), expect $R")
r == f(xf) || error("Incorrect results found in $f(x).")
end
for f in [exp, exp2, exp10, log, log2, log10,
cos, csc, cot, sec, cospi,
cosd, cscd, cotd, secd,
acos, acot, acosd, acotd,
cosh, csch, coth, sech, acsch, asech]
check_z2nz(f, rnd_x0, rnd_x0f)
end
### Reduction
# sum, sumabs, sumabs2, vecnorm
let x = spv_x1
@test sum(x) == 4.0
@test sumabs(x) == 5.5
@test sumabs2(x) == 14.375
@test vecnorm(x) == sqrt(14.375)
@test vecnorm(x, 1) == 5.5
@test vecnorm(x, 2) == sqrt(14.375)
@test vecnorm(x, Inf) == 3.5
end
# maximum, minimum, maxabs, minabs
let x = spv_x1
@test maximum(x) == 3.5
@test minimum(x) == -0.75
@test maxabs(x) == 3.5
@test minabs(x) == 0.0
end
let x = abs(spv_x1)
@test maximum(x) == 3.5
@test minimum(x) == 0.0
end
let x = -abs(spv_x1)
@test maximum(x) == 0.0
@test minimum(x) == -3.5
end
let x = SparseVector(3, [1, 2, 3], [-4.5, 2.5, 3.5])
@test maximum(x) == 3.5
@test minimum(x) == -4.5
@test maxabs(x) == 4.5
@test minabs(x) == 2.5
end
let x = sparsevector(Float64, 8)
@test maximum(x) == 0.0
@test minimum(x) == 0.0
@test maxabs(x) == 0.0
@test minabs(x) == 0.0
end
# Issue https://github.com/JuliaLang/julia/issues/14046
let s14046 = sprand(5, 1.0)
z = sparsevector(Float64, 5)
@test z + s14046 == s14046
@test 2*z == z + z == z
end
| [
27,
7856,
261,
480,
29,
75,
521,
993,
6413,
14,
50,
29572,
11627,
5736,
13,
20362,
198,
3500,
1338,
17208,
53,
478,
669,
198,
3500,
7308,
13,
14402,
198,
198,
11748,
1338,
17208,
53,
478,
669,
25,
2748,
62,
40496,
198,
198,
21017,
... | 1.919418 | 2,544 |
import accrue.cryptoerase.runtime.Condition;
class C {
int{L} x = 7;
final int z = m();
static final Condition c = new Condition();
int m() {
int y = ({L /c T}) 42;
return y;
}
public static void main(String[] args) {
C obj = new C();
obj.c.set();
}
}
| [
11748,
697,
24508,
13,
29609,
78,
263,
589,
13,
43282,
13,
48362,
26,
198,
198,
4871,
327,
1391,
198,
220,
220,
220,
493,
90,
43,
92,
2124,
796,
767,
26,
198,
220,
220,
220,
2457,
493,
1976,
796,
285,
9783,
198,
220,
220,
220,
9... | 2.309524 | 126 |
<gh_stars>10-100
using Static, Aqua
using Test
@testset "Static.jl" begin
Aqua.test_all(Static)
@testset "StaticInt" begin
@test static(UInt(8)) === StaticInt(UInt(8)) === StaticInt{8}()
@test iszero(StaticInt(0))
@test !iszero(StaticInt(1))
@test !isone(StaticInt(0))
@test isone(StaticInt(1))
@test @inferred(one(StaticInt(1))) === StaticInt(1)
@test @inferred(zero(StaticInt(1))) === StaticInt(0)
@test @inferred(one(StaticInt)) === StaticInt(1)
@test @inferred(zero(StaticInt)) === StaticInt(0) === StaticInt(StaticInt(Val(0)))
@test eltype(one(StaticInt)) <: Int
x = StaticInt(1)
@test @inferred(Bool(x)) isa Bool
@test @inferred(BigInt(x)) isa BigInt
@test @inferred(Integer(x)) === x
@test @inferred(%(x, Integer)) === 1
# test for ambiguities and correctness
for i ∈ Any[StaticInt(0), StaticInt(1), StaticInt(2), 3]
for j ∈ Any[StaticInt(0), StaticInt(1), StaticInt(2), 3]
i === j === 3 && continue
for f ∈ [+, -, *, ÷, %, <<, >>, >>>, &, |, ⊻, ==, ≤, ≥]
(iszero(j) && ((f === ÷) || (f === %))) && continue # integer division error
@test convert(Int, @inferred(f(i,j))) == f(convert(Int, i), convert(Int, j))
end
end
i == 3 && break
for f ∈ [+, -, *, /, ÷, %, ==, ≤, ≥]
w = f(convert(Int, i), 1.4)
x = f(1.4, convert(Int, i))
@test convert(typeof(w), @inferred(f(i, 1.4))) === w
@test convert(typeof(x), @inferred(f(1.4, i))) === x # if f is division and i === StaticInt(0), returns `NaN`; hence use of ==== in check.
(((f === ÷) || (f === %)) && (i === StaticInt(0))) && continue
y = f(convert(Int, i), 2 // 7)
z = f(2 // 7, convert(Int, i))
@test convert(typeof(y), @inferred(f(i, 2 // 7))) === y
@test convert(typeof(z), @inferred(f(2 // 7, i))) === z
end
end
@test UnitRange{Int16}(StaticInt(-9), 17) === Int16(-9):Int16(17)
@test UnitRange{Int16}(-7, StaticInt(19)) === Int16(-7):Int16(19)
@test UnitRange(-11, StaticInt(15)) === -11:15
@test UnitRange(StaticInt(-11), 15) === -11:15
@test UnitRange{Int}(StaticInt(-11), StaticInt(15)) === -11:15
@test UnitRange(StaticInt(-11), StaticInt(15)) === -11:15
@test float(StaticInt(8)) === static(8.0)
# test specific promote rules to ensure we don't cause ambiguities
SI = StaticInt{1}
IR = typeof(1//1)
PI = typeof(pi)
@test @inferred(convert(SI, SI())) === SI()
@test @inferred(promote_rule(SI, PI)) <: promote_type(Int, PI)
@test @inferred(promote_rule(SI, IR)) <: promote_type(Int, IR)
@test @inferred(promote_rule(SI, SI)) <: Int
@test @inferred(promote_rule(Missing, SI)) <: promote_type(Missing, Int)
@test @inferred(promote_rule(Nothing, SI)) <: promote_type(Nothing, Int)
@test @inferred(promote_rule(SI, Missing)) <: promote_type(Int, Missing)
@test @inferred(promote_rule(SI, Nothing)) <: promote_type(Int, Nothing)
@test @inferred(promote_rule(Union{Missing,Int}, SI)) <: promote_type(Union{Missing,Int}, Int)
@test @inferred(promote_rule(Union{Nothing,Int}, SI)) <: promote_type(Union{Nothing,Int}, Int)
@test @inferred(promote_rule(Union{Nothing,Missing,Int}, SI)) <: Union{Nothing,Missing,Int}
@test @inferred(promote_rule(Union{Nothing,Missing}, SI)) <: promote_type(Union{Nothing,Missing}, Int)
@test @inferred(promote_rule(SI, Missing)) <: promote_type(Int, Missing)
@test @inferred(promote_rule(Base.TwicePrecision{Int}, StaticInt{1})) <: Base.TwicePrecision{Int}
@test static(Int8(-18)) === static(-18)
@test static(0xef) === static(239)
@test static(Int16(-18)) === static(-18)
@test static(0xffef) === static(65519)
if sizeof(Int) == 8
@test static(Int32(-18)) === static(-18)
@test static(0xffffffef) === static(4294967279)
end
end
@testset "StaticBool" begin
t = static(static(true))
f = StaticBool(static(false))
@test StaticBool{true}() === t
@test StaticBool{false}() === f
@test @inferred(StaticInt(t)) === StaticInt(1)
@test @inferred(StaticInt(f)) === StaticInt(0)
@test @inferred(~t) === f
@test @inferred(~f) === t
@test @inferred(!t) === f
@test @inferred(!f) === t
@test @inferred(+t) === StaticInt(1)
@test @inferred(+f) === StaticInt(0)
@test @inferred(-t) === StaticInt(-1)
@test @inferred(-f) === StaticInt(0)
@test @inferred(sign(t)) === t
@test @inferred(abs(t)) === t
@test @inferred(abs2(t)) === t
@test @inferred(iszero(t)) === f
@test @inferred(isone(t)) === t
@test @inferred(iszero(f)) === t
@test @inferred(isone(f)) === f
@test @inferred(xor(true, f))
@test @inferred(xor(f, true))
@test @inferred(xor(f, f)) === f
@test @inferred(xor(f, t)) === t
@test @inferred(xor(t, f)) === t
@test @inferred(xor(t, t)) === f
@test @inferred(|(true, f))
@test @inferred(|(true, t)) === t
@test @inferred(|(f, true))
@test @inferred(|(t, true)) === t
@test @inferred(|(f, f)) === f
@test @inferred(|(f, t)) === t
@test @inferred(|(t, f)) === t
@test @inferred(|(t, t)) === t
@test @inferred(Base.:(&)(true, f)) === f
@test @inferred(Base.:(&)(true, t))
@test @inferred(Base.:(&)(f, true)) === f
@test @inferred(Base.:(&)(t, true))
@test @inferred(Base.:(&)(f, f)) === f
@test @inferred(Base.:(&)(f, t)) === f
@test @inferred(Base.:(&)(t, f)) === f
@test @inferred(Base.:(&)(t, t)) === t
@test @inferred(<(f, f)) === false
@test @inferred(<(f, t)) === true
@test @inferred(<(t, f)) === false
@test @inferred(<(t, t)) === false
@test @inferred(<=(f, f)) === true
@test @inferred(<=(f, t)) === true
@test @inferred(<=(t, f)) === false
@test @inferred(<=(t, t)) === true
@test @inferred(==(f, f)) === true
@test @inferred(==(f, t)) === false
@test @inferred(==(t, f)) === false
@test @inferred(==(t, t)) === true
@test @inferred(*(f, t)) === t & f
@test @inferred(-(f, t)) === StaticInt(f) - StaticInt(t)
@test @inferred(+(f, t)) === StaticInt(f) + StaticInt(t)
@test @inferred(^(t, f)) == ^(true, false)
@test @inferred(^(t, t)) == ^(true, true)
@test @inferred(^(2, f)) == 1
@test @inferred(^(2, t)) == 2
@test @inferred(^(BigInt(2), f)) == 1
@test @inferred(^(BigInt(2), t)) == 2
@test @inferred(div(t, t)) === t
@test_throws DivideError div(t, f)
@test @inferred(rem(t, t)) === f
@test_throws DivideError rem(t, f)
@test @inferred(mod(t, t)) === f
@test @inferred(all((t, t, t)))
@test !@inferred(all((t, f, t)))
@test !@inferred(all((f, f, f)))
@test @inferred(any((t, t, t)))
@test @inferred(any((t, f, t)))
@test !@inferred(any((f, f, f)))
x = StaticInt(1)
y = StaticInt(0)
z = StaticInt(-1)
@test @inferred(Static.eq(y)(x)) === f
@test @inferred(Static.eq(x, x)) === t
@test @inferred(Static.ne(y)(x)) === t
@test @inferred(Static.ne(x, x)) === f
@test @inferred(Static.gt(y)(x)) === t
@test @inferred(Static.gt(y, x)) === f
@test @inferred(Static.ge(y)(x)) === t
@test @inferred(Static.ge(y, x)) === f
@test @inferred(Static.lt(x)(y)) === t
@test @inferred(Static.lt(x, y)) === f
@test @inferred(Static.le(x)(y)) === t
@test @inferred(Static.le(x, y)) === f
@test @inferred(Static.ifelse(t, x, y)) === x
@test @inferred(Static.ifelse(f, x, y)) === y
@test @inferred(promote_rule(True, True)) <: StaticBool
@test @inferred(promote_rule(True, Bool)) <: Bool
@test @inferred(promote_rule(Bool, True)) <: Bool
end
@testset "StaticSymbol" begin
x = StaticSymbol(:x)
y = StaticSymbol("y")
z = StaticSymbol(1)
@test y === StaticSymbol(:y)
@test z === StaticSymbol(Symbol(1))
@test @inferred(StaticSymbol(x)) === x
@test @inferred(StaticSymbol(x, y)) === StaticSymbol(:x, :y)
@test @inferred(StaticSymbol(x, y, z)) === static(:xy1)
end
@testset "static interface" begin
v = Val((:a, 1, true))
@test static(1) === StaticInt(1)
@test static(true) === True()
@test static(:a) === StaticSymbol{:a}()
@test Symbol(static(:a)) === :a
@test static((:a, 1, true)) === (static(:a), static(1), static(true))
@test @inferred(static(v)) === (static(:a), static(1), static(true))
@test_throws ErrorException static("a")
@test @inferred(Static.is_static(v)) === True()
@test @inferred(Static.is_static(typeof(v))) === True()
@test @inferred(Static.is_static(typeof(static(true)))) === True()
@test @inferred(Static.is_static(typeof(static(1)))) === True()
@test @inferred(Static.is_static(typeof(static(:x)))) === True()
@test @inferred(Static.is_static(typeof(1))) === False()
@test @inferred(Static.is_static(typeof((static(:x),static(:x))))) === True()
@test @inferred(Static.is_static(typeof((static(:x),:x)))) === False()
@test @inferred(Static.is_static(typeof(static(1.0)))) === True()
@test @inferred(Static.known(v)) === (:a, 1, true)
@test @inferred(Static.known(typeof(v))) === (:a, 1, true)
@test @inferred(Static.known(typeof(static(true))))
@test @inferred(Static.known(typeof(static(false)))) === false
@test @inferred(Static.known(typeof(static(1.0)))) === 1.0
@test @inferred(Static.known(typeof(static(1)))) === 1
@test @inferred(Static.known(typeof(static(:x)))) === :x
@test @inferred(Static.known(typeof(1))) === missing
@test @inferred(Static.known(typeof((static(:x),static(:x))))) === (:x, :x)
@test @inferred(Static.known(typeof((static(:x),:x)))) === (:x, missing)
@test @inferred(Static.dynamic((static(:a), static(1), true))) === (:a, 1, true)
end
@testset "tuple utilities" begin
x = (static(1), static(2), static(3))
y = (static(3), static(2), static(1))
z = (static(1), static(2), static(3), static(4))
T = Tuple{Int,Float64,String}
@test @inferred(Static.invariant_permutation(x, x)) === True()
@test @inferred(Static.invariant_permutation(x, y)) === False()
@test @inferred(Static.invariant_permutation(x, z)) === False()
@test @inferred(Static.permute(x, Val(x))) === x
@test @inferred(Static.permute(x, (static(1), static(2)))) === (static(1), static(2))
@test @inferred(Static.permute(x, x)) === x
@test @inferred(Static.permute(x, y)) === y
@test @inferred(Static.eachop(getindex, x)) === x
get_tuple_add(::Type{T}, ::Type{X}, dim::StaticInt) where {T,X} = Tuple{Static._get_tuple(T, dim),X}
@test @inferred(Static.eachop_tuple(Static._get_tuple, y, T)) === Tuple{String,Float64,Int}
@test @inferred(Static.eachop_tuple(get_tuple_add, y, T, String)) === Tuple{Tuple{String,String},Tuple{Float64,String},Tuple{Int,String}}
@test @inferred(Static.find_first_eq(static(1), y)) === static(3)
# inferred is Union{Int,Nothing}
@test Static.find_first_eq(1, map(Int, y)) === 3
end
@testset "NDIndex" begin
x = NDIndex((1,2,3))
y = NDIndex((1,static(2),3))
z = NDIndex(static(3), static(3), static(3))
@testset "constructors" begin
@test static(CartesianIndex(3, 3, 3)) === z == Base.setindex(Base.setindex(x, 3, 1), 3, 2)
@test @inferred(CartesianIndex(z)) === @inferred(Static.dynamic(z)) === CartesianIndex(3, 3, 3)
@test @inferred(Static.known(z)) === (3, 3, 3)
@test Tuple(@inferred(NDIndex{0}())) === ()
@test @inferred(NDIndex{3}(1, static(2), 3)) === y
@test @inferred(NDIndex{3}((1, static(2), 3))) === y
@test @inferred(NDIndex{3}((1, static(2)))) === NDIndex(1, static(2), static(1))
@test @inferred(NDIndex(x, y)) === NDIndex(1, 2, 3, 1, static(2), 3)
end
@test @inferred(Base.IteratorsMD.split(x, Val(2))) === (NDIndex(1, 2), NDIndex(3,))
@test @inferred(length(x)) === 3
@test @inferred(length(typeof(x))) === 3
@test @inferred(y[2]) === 2
@test @inferred(y[static(2)]) === static(2)
@test @inferred(-y) === NDIndex((-1,-static(2),-3))
@test @inferred(y + y) === NDIndex((2,static(4),6))
@test @inferred(y - y) === NDIndex((0,static(0),0))
@test @inferred(zero(x)) === NDIndex(static(0),static(0),static(0))
@test @inferred(oneunit(x)) === NDIndex(static(1),static(1),static(1))
@test @inferred(x * 3) === NDIndex((3,6,9))
@test @inferred(3 * x) === NDIndex((3,6,9))
@test @inferred(min(x, z)) === x
@test @inferred(max(x, z)) === NDIndex(3, 3, 3)
@test !@inferred(isless(y, x))
@test @inferred(isless(x, z))
@test @inferred(Static.lt(oneunit(z), z)) === static(true)
A = rand(3,3,3);
@test @inferred(to_indices(A, axes(A), (x,))) === (1, 2, 3)
@test @inferred(to_indices(A, axes(A), ([y,y],))) == ([y, y],)
end
@test repr(static(float(1))) == "static($(float(1)))"
@test repr(static(1)) == "static(1)"
@test repr(static(:x)) == "static(:x)"
@test repr(static(true)) == "static(true)"
@test repr(static(CartesianIndex(1,1))) == "NDIndex(static(1), static(1))"
end
# for some reason this can't be inferred when in the "Static.jl" test set
known_length(x) = known_length(typeof(x))
known_length(::Type{T}) where {N,T<:Tuple{Vararg{Any,N}}} = N
known_length(::Type{T}) where {T} = missing
maybe_static_length(x) = Static.maybe_static(known_length, length, x)
x = ntuple(+, 10)
y = 1:10
@test @inferred(maybe_static_length(x)) === StaticInt(10)
@test @inferred(maybe_static_length(y)) === 10
include("float.jl")
#=
A = rand(3,4);
offset1(x::Base.OneTo) = static(1)
offset1(x::AbstractUnitRange) = first(x)
offsets(x) = Static._eachop(offset1, (axes(x),), Static.nstatic(Val(ndims(x))))
=#
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
3500,
36125,
11,
24838,
198,
3500,
6208,
198,
198,
31,
9288,
2617,
366,
45442,
13,
20362,
1,
2221,
198,
220,
220,
220,
24838,
13,
9288,
62,
439,
7,
45442,
8,
628,
220,
220,
220,
2488,
... | 2.07922 | 7,132 |
<filename>BuddhaV2/genimg.jl<gh_stars>0
using JLD, Images
function main(dim)
points = load("./out/20k.jld", "points")
img = zeros(RGB,dim,dim)
for c in points
z = c
while abs(z) < 2
x = trunc(Int,(real(z) + 2)*dim/3); y = trunc(Int,(imag(z) + 1.5)*dim/3)
0<x<dim>y>0 && (img[x,y] += RGB(0.0005,0,0))
z = z^2 + c
end
end
return map(clamp01nan, img)
end
@time save("./images/t1.png", main(1500)) | [
27,
34345,
29,
33,
4185,
3099,
53,
17,
14,
5235,
9600,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
3500,
449,
11163,
11,
5382,
201,
198,
201,
198,
8818,
1388,
7,
27740,
8,
201,
198,
220,
220,
220,
2173,
796,
3440,
7,
1911,
14,
... | 1.769784 | 278 |
<reponame>sonosole/Delta
clear()
@testset "check pooling op's gradient at single dim" begin
for d in [1 2 3]
for pool in [maximum minimum sum mean linearpool exppool]
@testset "check $pool op's gradient at dim = $d" begin
DIMS = d
TYPE = Array{Float64};
# [1] prepare input data and its label
inputdims = 64;
timeSteps = 16;
batchsize = 32;
x = Variable(rand(inputdims, timeSteps, batchsize); type=TYPE, keepsgrad=true,);
if d==1;l = Variable(rand(1, timeSteps, batchsize); type=TYPE);end
if d==2;l = Variable(rand(inputdims, 1, batchsize); type=TYPE);end
if d==3;l = Variable(rand(inputdims, timeSteps, 1); type=TYPE);end
# [2] forward and backward propagation
COST1 = cost(mseLoss(pool(x; dims=DIMS), l));
backward();
# [3] with a samll change of a weight
GRAD = x.delta[1];
DELTA = 1e-6;
x.value[1] += DELTA;
# [4] forward and backward propagation
COST2 = cost(mseLoss(pool(x; dims=DIMS), l));
backward();
# [5] check if the auto-grad is true or not
dLdW = (COST2 - COST1)/DELTA; # numerical gradient
err = abs((dLdW-GRAD)/(GRAD+eps(Float32)))*100; # relative error in %
err = err < 1e-1 ? 0.0 : err;
@test err<1.0
end
end
end
end
@testset "check pooling op's gradient at mutiple dims" begin
for pool in [maximum minimum sum mean linearpool exppool]
@testset "check $pool op's gradient" begin
DIMS = (1,2)
TYPE = Array{Float64};
# [1] prepare input data and its label
inputdims = 64;
timeSteps = 16;
batchsize = 32;
x = Variable(rand(inputdims, timeSteps, batchsize); type=TYPE,keepsgrad=true);
l = Variable(rand(1, 1, batchsize); type=TYPE);
# [2] forward and backward propagation
COST1 = cost(mseLoss(pool(x; dims=DIMS), l));
backward();
# [3] with a samll change of a weight
GRAD = x.delta[1];
DELTA = 1e-6;
x.value[1] += DELTA;
# [4] forward and backward propagation with a samll change of a weight
COST2 = cost(mseLoss(pool(x; dims=DIMS), l));
backward();
# [5] check if the auto-grad is true or not
dLdW = (COST2 - COST1)/DELTA; # numerical gradient
err = abs((dLdW-GRAD)/(GRAD+eps(Float64)))*100; # relative error in %
err = err < 1e-1 ? 0.0 : err;
@test err<1.0
end
end
end
@testset "check maxmin and minmax op's gradient at mutiple dims" begin
for pool in [maxmin minmax]
@testset "check $pool op's gradient" begin
DIM1 = 1
DIM2 = 2
TYPE = Array{Float64};
# [1] prepare input data and its label
inputdims = 64;
timeSteps = 16;
batchsize = 32;
x = Variable(rand(inputdims, timeSteps, batchsize); type=TYPE,keepsgrad=true);
l = Variable(rand(1, 1, batchsize); type=TYPE);
# [2] forward and backward propagation
COST1 = mseLoss(pool(x; dims1=DIM1, dims2=DIM2), l) |> cost
backward();
# [3] with a samll change of a weight
GRAD = x.delta[1];
DELTA = 1e-6;
x.value[1] += DELTA;
# [4] forward and backward propagation with a samll change of a weight
COST2 = mseLoss(pool(x; dims1=DIM1, dims2=DIM2), l) |> cost
backward();
# [5] check if the auto-grad is true or not
dLdW = (COST2 - COST1)/DELTA; # numerical gradient
err = abs((dLdW-GRAD)/(GRAD+eps(Float64)))*100; # relative error in %
err = err < 1e-1 ? 0.0 : err;
@test err<1.0
end
end
end
| [
27,
7856,
261,
480,
29,
1559,
418,
2305,
14,
42430,
198,
20063,
3419,
198,
31,
9288,
2617,
366,
9122,
5933,
278,
1034,
338,
31312,
379,
2060,
5391,
1,
2221,
198,
220,
220,
220,
329,
288,
287,
685,
16,
362,
513,
60,
198,
220,
220,
... | 1.889543 | 2,209 |
# ====================================================================
# Load Different Logger
# ====================================================================
"""
load_logger_from_config(file_path::AbstractString)::TeeLogger
Create a combined logger from a config file path.
### Note
A combined logger is a `TeeLogger` struct that allows to send the same log message to
all loggers included in the combined logger at once.
"""
function load_logger_from_config(file_path::AbstractString)
json_string = read(file_path, String)
json3_dict = JSON3.read(json_string) #, Dict
return load_logger_from_config(json3_dict)
end
"""
load_logger_from_config(config::Dict)::TeeLogger
Create a `TeeLoger`, a collection of logger, from a configuration `Dict`.
### Note
- A `TeeLogger` struct allows to send the same log message to all loggers included in the `TeeLoger` at once.
- The configuration `Dict` requires as `"logging"` field, see example for more details.
### Example
```julia
config = Dict(
...,
"logging" => [
# logger_type => logger_config_dict,
"SeqLogger" => Dict(...),
...
],
...
)
```
### Returns
`TeeLogger` as defined in `config`
"""
function load_logger_from_config(config::AbstractDict)
loggers = [
get_logger(string(logger_type), logger_specs) for (logger_type, logger_specs) in config["logging"]
]
return TeeLogger(loggers...)
end
# ====================================================================
# Load Single Logger
# ====================================================================
"""
load_seqlogger(logger_config::AbstractDict)::Union{SeqLogger, TransformerLogger}
Return a `SeqLogger` or `TransformerLogger` according to `logger_config`.
### Config Parameters
- `"server_url"` -- required
- `"min_level"` -- required (`"DEBUG", "INFO", "WARN", "ERROR"`)
- `"transformation"` -- optional, default `identity`
- `"api_key"` -- optional, default `""`
- `"batch_size"` -- optional, default `10`
All other config parameters are used as global event properties.
### Example
```julia
log_dict = Dict(
"server_url" => "http://subdn215:5341/",
"min_level" => "INFO",
"batch_size" => 12,
"App" => "SeqLoggers_Test",
"Env" => "Test"
)
seq_logger = SeqLoggers.load_seqlogger(log_dict)
```
"""
function load_seqlogger(logger_config::AbstractDict)
server_url = logger_config["server_url"]
min_level = logger_config["min_level"] |> get_log_level
transformation_str = get(logger_config, "transformation", "identity")
transformation = transformation_str |> get_transformation_function
# Create a NamedTuple from remaining Config Keyvalue Pairs
kwarg_keys = filter(
key -> string(key) ∉ ["server_url", "min_level", "transformation"], keys(logger_config)
)
kwarg_keys_names = Tuple(Symbol(key) for key in kwarg_keys)
kwarg_keys_values = [logger_config[key] for key in kwarg_keys]
kwarg_keys = NamedTuple{kwarg_keys_names}(kwarg_keys_values)
return SeqLogger(
server_url; min_level=min_level, kwarg_keys...
) |> transformation
end
"""
load_consolelogger(logger_config::AbstractDict)::Union{ConsoleLogger, TransformerLogger}
Return a `ConsoleLogger` or `TransformerLogger` according to `logger_config`.
### Config Parameters
- `"min_level"` -- required (`"DEBUG", "INFO", "WARN", "ERROR"`)
- `"transformation"` -- optional, default `identity`
### Example
```julia
logging_config = Dict(
"min_level" => "ERROR",
"transformation" => "add_timestamp",
)
seq_logger = SeqLoggers.load_consolelogger(log_dict)
```
"""
function load_consolelogger(logger_config::AbstractDict)
min_level = logger_config["min_level"] |> get_log_level
transformation_str = get(logger_config, "transformation", "identity")
transformation = transformation_str |> get_transformation_function
return ConsoleLogger(stderr, min_level) |> transformation
end
"""
load_consolelogger(logger_config::AbstractDict)::AbstractLogger
Return a `MinLevelLogger{FileLogger}` or `TransformerLogger` according to `logger_config`.
### Config Parameters
- `"file_path"` -- required
- `"min_level"` -- required (`"DEBUG", "INFO", "WARN", "ERROR"`)
- `"append"` -- optional, default `true`, append to file if `true`, otherwise truncate file. (See [`LoggingExtras.FileLogger`](@ref) for more information.)
- `"transformation"` -- optional, default `identity`
### Example
```julia
logging_config = Dict(
"file_path" => "C:/Temp/test.log",
"min_level" => "ERROR",
"append" => true,
"transformation" => "add_timestamp",
)
seq_logger = SeqLoggers.load_filelogger(log_dict)
```
"""
function load_filelogger(logger_config::AbstractDict)
min_level = logger_config["min_level"] |> get_log_level
file_path = logger_config["file_path"]
append = get(logger_config, "append", true)
transformation_str = get(logger_config, "transformation", "identity")
transformation = transformation_str |> get_transformation_function
return MinLevelLogger(FileLogger(file_path; append=append), min_level) |> transformation
end
"""
load_advanced_filelogger(logger_config::AbstractDict)::AbstractLogger
Return a `DatetimeRotatingFileLogger` or `TransformerLogger` according to `logger_config`.
### Config Parameters
- `"dir_path"` -- required
- `"min_level"` -- required (`"DEBUG", "INFO", "WARN", "ERROR"`)
- `"file_name_pattern"` -- required e.g. `"\\a\\c\\c\\e\\s\\s-YYYY-mm-dd-HH-MM.\\l\\o\\g"`
- `"transformation"` -- optional, default `identity`
### Example
```julia
logging_config = Dict(
"dir_path" => "C:/Temp",
"min_level" => "ERROR",
"file_name_pattern" => "\\a\\c\\c\\e\\s\\s-YYYY-mm-dd-HH-MM.\\l\\o\\g",
"transformation" => "add_timestamp",
)
seq_logger = SeqLoggers.load_advanced_filelogger(log_dict)
```
"""
function load_advanced_filelogger(logger_config::AbstractDict)
min_level = logger_config["min_level"] |> get_log_level
dir_path = logger_config["dir_path"]
file_name_pattern = logger_config["file_name_pattern"]
transformation_str = get(logger_config, "transformation", "identity")
transformation = transformation_str |> get_transformation_function
return AdvancedFileLogger(
dir_path,
file_name_pattern;
log_format_function=print_standard_format,
min_level=min_level
) |> transformation
end
# ====================================================================
# Logger Type Mapping and Register
# ====================================================================
"""
get_logger(logger_type::AbstractString, logger_config::AbstractDict)::AbstractLogger
Create logger struct from logger type name and `Dict` with required parameters.
By default, the following logger types are supported:
- `"SeqLogger"` → [`SeqLogger`](@ref)
- `"ConsoleLogger"` → [`ConsoleLogger`](@ref)
- `"FileLogger"` → [`FileLogger`](@ref)
Use [`register_logger!`](@ref) to add custom `AbstractLogger`s.
"""
function get_logger(logger_type::AbstractString, logger_config::AbstractDict)
logger_constructor = get(LOGGER_TYPE_MAPPING, logger_type, nothing)
if isnothing(logger_constructor)
throw(
ArgumentError(
"There is no logger corresponding to the key `$logger_type`. " *
"Available options are $(collect(keys(LOGGER_TYPE_MAPPING))). " *
"Use `register_logger!` to add new logger types."
)
)
end
return logger_constructor(logger_config)
end
"""
register_logger!(logger_type::AbstractString, logger_constructor::Function)
Register a new logger type.
Registering enables the user to use custom `AbstractLogger` struct, defined outside of `SeqLoggers`,
to be used with [`load_logger_from_config`](@ref).
"""
function register_logger!(logger_type::AbstractString, logger_constructor::Function)
if haskey(LOGGER_TYPE_MAPPING, logger_type)
@warn "Logger type `$logger_type` already exists and will be overwritten"
end
LOGGER_TYPE_MAPPING[logger_type] = logger_constructor
return nothing
end
const LOGGER_TYPE_MAPPING = Dict(
"SeqLogger" => load_seqlogger,
"ConsoleLogger" => load_consolelogger,
"FileLogger" => load_filelogger,
"AdvancedFileLogger" => load_advanced_filelogger,
)
# ====================================================================
# Transformation Function and Register
# ====================================================================
"""
get_transformation_function(key::String)
Convert a string (from config) into a transformation function.
By default, the following transformation functions are supported:
- `"identity"` → [`identity`](@ref): no transformation
- `"add_timestamp"` → [`add_timestamp`](@ref): add timestamp at the beginning of log message
Use [`register_transformation_function!`](@ref) to add custom transformation functions.
"""
function get_transformation_function(key::String)
return LOGGER_TRANSFORMATION_MAPPING[key]
end
"""
register_transformation_function!(key::AbstractString, transformation_function::Function)
Register new transformation function.
Registering enables the user to use custom transformation functions, defined outside of `SeqLoggers`,
to be used with [`load_logger_from_config`](@ref).
"""
function register_transformation_function!(key::AbstractString, transformation_function::Function)
LOGGER_TRANSFORMATION_MAPPING[key] = transformation_function
return nothing
end
const STANDARD_DATETIME_FORMAT = "yyyy-mm-dd HH:MM:SS"
"""
add_timestamp(logger::AbstractLogger)
Logger transformation function that prepends a timestamp to a logging message.
"""
add_timestamp(logger) = TransformerLogger(logger) do log
merge(log, (; message="$(Dates.format(now(), STANDARD_DATETIME_FORMAT)): $(log.message)"))
end
const LOGGER_TRANSFORMATION_MAPPING = Dict(
"identity" => identity,
"add_timestamp" => add_timestamp,
)
# ====================================================================
# Log Level
# ====================================================================
"""
get_log_level(key::String)::Logging.LogLevel
Return the `Loggin.LogLevel` corresponding to the input string.
"""
function get_log_level(key::String)
log_level = get(LOG_LEVEL_MAPPING, uppercase(key), nothing)
if isnothing(key)
throw(
ArgumentError(
"There is no log level corresponding to the key $key." *
"Available options are $(collect(keys(LOG_LEVEL_MAPPING)))"
)
)
end
return log_level
end
const LOG_LEVEL_MAPPING = Dict(
"INFO" => Logging.Info,
"INFORMATOIN" => Logging.Info,
"DEBUG" => Logging.Debug,
"WARN" => Logging.Warn,
"WARNING" => Logging.Warn,
"ERROR" => Logging.Error,
)
| [
2,
38093,
18604,
198,
2,
8778,
20615,
5972,
1362,
198,
2,
38093,
18604,
198,
37811,
198,
220,
220,
220,
3440,
62,
6404,
1362,
62,
6738,
62,
11250,
7,
7753,
62,
6978,
3712,
23839,
10100,
2599,
25,
51,
1453,
11187,
1362,
198,
198,
164... | 2.877757 | 3,763 |
<filename>julia/make_regex.jl
#=
A port of my Perl/Java/Python MakeRegex in Julia.
make_regex(words)
generates a regex for words. It use a simple approach which
combined common prefixes in generating the regex.
Some examples:
* words = ["a", "al", "all", "alla", "an", "ann", "anna", "annas", "ananas"]
regex: a(l(la?)?|n(anas|n(as?)?)?)?
* words: ["and", "at", "do", "end", "for", "in", "is", "not", "of", "or", "use"]
regex: (a(nd|t)|do|end|for|i[ns]|not|o[fr]|use)
There is a simple way of handling character classes
* words: ["price1", "price2", "price3", "price4"]
regex: price[1234]
If there is no common prefix then it just put '|' between the words
* words: ["this", "is", "a", "very", "boring", "example", "with", "no", "common", "prefix"]
regex: (a|boring|common|example|is|no|prefix|this|very|with)
Also, see the (very old) page for my Perl package MakeRegex: http://hakank.org/makeregex/index.html
The REAME file in that package states:
"""
The Perl package MakeRegex composes a regex-expression from a list of
words. It had been inspired by the emacs elisp module make-regex.el,
by Simon Marshall.
"""
This Julia program was created by <NAME>, <EMAIL>
See also my Julia page: http://www.hakank.org/julia/
=#
#
# common_prefix(p, list)
#
# Here is where the main work is done. It's somewhat magically ported from my
# Perl/Java/Python versions...
#
function common_prefix(p, list)
list_len = length(list)
if list_len == 0
return ""
end
if list_len == 1
return p * join(list,"")
end
#
# fix for some - as of now - unknown bug. To fix in later version!
#
if p == "" && list[1] == "" && list[2] == ""
return ""
end
#
# * Collect all the strings with the same prefix-char
#
hash = Dict()
for word in sort(list)
prefix = suffixed_word = ""
if length(word) > 0
prefix, suffixed_word... = word
end
# put the suffix in the list of other suffixes for
# the this prefix
hash[prefix] = push!(get(hash,prefix,[]),suffixed_word)
end
#
# And recurse this list
#
all = []
for key in keys(hash)
comm = ""
values = hash[key]
if length(key) > 0
sort!(values)
comm = common_prefix(key, values)
end
# hack to be able to use the '?' char . Should be re-written!
if comm == ""
comm = " "
end
push!(all,comm)
end
sort!(all)
# paren: what to put in parenthesis ('()' or '[]') if anything
paren = ""
all_len = length(all)
if all_len == 1
paren = join(all,"")
else
len = maximum(length.(all))
joinChar = len != 1 ? '|' : ""
# joins all entries except for " "
join_str = mark = ""
count = 0
for w in all
got_hack_mark = w == " " ? true : false # This is a hack for handling '?'
if length(w) > 0 && w != " "
join_str *= w
if count < all_len-1
join_str *= joinChar
end
end
if got_hack_mark
mark = '?'
end
count = count + 1
end
paren = ""
if length(join_str) === 1
paren = join_str * mark
else
if len == 1
paren = '[' * join_str * ']' * mark
else
paren = '(' * join_str * ')' * mark
end
end
end
return p * paren
end
function make_regex(words)
replace.(words,r"([*?+])"=>s"\\\1") # replace meta characters
# We sort the words to induce more common prefixes
return common_prefix("", sort(words))
end
#
# check_regex(regex, words)
#
# Checks the regex againts a list of words.
#
function check_regex(regex, words)
p = Regex(regex)
for word in words
println(word, " matches", !occursin(p,word) ? " NOT!" : "")
end
end
tests = [
["all","alla"],
# A lot of Swedish words
[ "all", "alla", "alle", "alls", "palle", "palla", "pelle", "perkele",
"ann", "anna", "annas", "anders", "håkan", "ångest", "ärlig",
"solsken", "sture", "stina", "hörapparat", "hörsel", "hårig"],
["alla", "palla", "balla", "kalla", "all", "pall", "ball", "kall"],
# "ananas" is the Swedish word for pineapple
["a", "al", "all", "alla", "an", "ann", "anna", "annas", "ananas"],
["a", "an", "ann", "anna", "annan", "annas", "annans", "ananas", "ananasens"],
["a", "ab", "abc", "abcd", "abcde", "abcdef", "b", "bc", "bcd", "bcde", "bcdef",
"bcdefg", "abb", "abbc", "abbcc", "abbccdd"],
["this", "is", "a", "very", "boring", "example", "with", "no", "common", "prefix"],
["price1","price2","price3","price4"],
# This is from Marshall's make-regex.el
["and", "at", "do", "end", "for", "in", "is", "not", "of", "or", "use"],
# This is from Marshall's make-regex.el
["cond", "if", "while", "let*?", "prog1", "prog2", "progn",
"catch", "throw", "save-restriction", "save-excursion",
"save-window-excursion", "save-match-data", "unwind-protect",
"condition-case", "track-mouse"],
# This is from Marshall's make-regex.el
["abort", "abs", "accept", "access", "array",
"begin", "body", "case", "constant", "declare",
"delay", "delta", "digits", "else", "elsif", "entry",
"exception", "exit", "function", "generic", "goto",
"if", "others", "limited", "loop", "mod", "new",
"null", "out", "subtype", "package", "pragma",
"private", "procedure", "raise", "range", "record",
"rem", "renames", "return", "reverse", "select",
"separate", "task", "terminate", "then", "type",
"when", "while", "with", "xor"]
]
for t in tests
println("testing $t")
println(make_regex(t))
println()
end
#=
words = last(tests)
rx = make_regex(words)
println(words)
println("regex:$rx")
check_regex(rx,words)
=# | [
27,
34345,
29,
73,
43640,
14,
15883,
62,
260,
25636,
13,
20362,
198,
2,
28,
198,
220,
220,
220,
317,
2493,
286,
616,
24316,
14,
29584,
14,
37906,
6889,
3041,
25636,
287,
22300,
13,
628,
220,
220,
220,
787,
62,
260,
25636,
7,
10879... | 2.197158 | 2,815 |
<gh_stars>0
const olm_pomc_c = 1e5 # exploration parameter for MCTS
const olm_pomc_iterations = 300 # exploration parameter for MCTS
const olm_pomc_dt = 0.5 # exploration parameter for MCTS
mutable struct PomcNode
x::AbstractArray{Float64, 1}
u::AbstractArray{Float64, 1}
b::AbstractArray{Float64, 1}
adv_x::AbstractArray{Float64, 1}
c::pomdp.Collision
q::Float64
N::Int
Ns::Int
PomcNode() = new(Float64[], Float64[], Float64[], Float64[],
pomdp.Collision(), 0.0, 1, 1)
end
function plan_pomc(x::AbstractArray{Float64, 1}, b::AbstractArray{Float64, 1},
agent::Agent, adv_agent::Agent, world::World,
reward::Function, ctrl_d::Discretization, depth::Int)
#backup variables
adv_agent_controller! = adv_agent.controller!
adv_agent_custom = adv_agent.custom
x = copy(x)
x[1] = mod(x[1], world.road.path.S[end])
value = PomcNode()
value.x = x
value.b = b
value.adv_x = adv_agent.x
adv_agent.controller! = pomdp.adv_controller!
(c, _) = adv.predict_collision(adv_agent.x, agent.x, world)
value.c = c
node = GroupTree(value)
num_iterations = round(Int, olm_pomc_iterations / 4 * depth)
visited = Set{PomcNode}()
for i in 1:num_iterations
simulate_pomc(node, agent, adv_agent, world, reward,
ctrl_d, visited, depth)
end
us = Float64[]
qs = -Inf
ret = nothing
(adv_c, _) = predict_collision(adv_agent.x, x, world)
P = map(o -> reduce(+, map(i -> node.value.b[i] *
pomdp.P_adv(o, adv_c, pomdp.DRIVERS[i]),
1:length(pomdp.DRIVERS))), pomdp.ACTIONS)
for a in 1:length(node.next)
q = reduce(+, map(o -> node.next[a][o].value.q * P[o],
1:length(pomdp.ACTIONS)))
if q > qs
qs = q
us = node.next[a][1].value.u
ret = node
end
end
# revert variables
adv_agent.controller! = adv_agent_controller!
adv_agent.custom = adv_agent_custom
return (us, node)
end
function simulate_pomc(node::GroupTree, agent::Agent, adv_agent::Agent,
world::World, reward::Function, ctrl_d::Discretization,
visited::Set{PomcNode}, depth::Int)
if depth <= 0
return 0.0
end
node.value.x[1] = mod(node.value.x[1], world.road.path.S[end])
(adv_c, _) = predict_collision(node.value.adv_x, node.value.x, world)
if !(node.value in visited)
push!(visited, node.value)
# predict adv agent doing three possible actions
adv_NX = Array{Array{Float64, 1}, 1}(undef, length(pomdp.ACTIONS))
for o in pomdp.ACTIONS
oidx = Int(o)
adv_nx = copy(node.value.adv_x)
adv_agent.custom[3] = o
sim.advance!(sim.default_dynamics!, adv_nx, Pair(adv_agent, world), 0.0,
olm_pomc_dt, olm_h)
adv_nx[1] = mod(adv_nx[1], world.road.path.S[end])
adv_NX[oidx] = adv_nx
end
# allocate next level of the tree
la_len = ctrl_d.thr[end] * ctrl_d.pt[end] # number of actions to survey
node.next = Array{Array{GroupTree, 1}, 1}(undef, la_len)
for aidx in 1:la_len
node.next[aidx] = Array{GroupTree, 1}(undef, length(pomdp.ACTIONS))
end
for la in 0:(la_len - 1)
u = dis.ls2x(ctrl_d, la)
agent.custom = u
nx = copy(node.value.x)
sim.advance!(sim.default_dynamics!, nx, Pair(agent, world), 0.0,
olm_pomc_dt, olm_h)
nx[1] = mod(nx[1], world.road.path.S[end])
ra = 0.0
for o in pomdp.ACTIONS
oidx = Int(o)
adv_nx = adv_NX[oidx]
(nc, _) = adv.predict_collision(nx, adv_nx, world)
custom = agent.custom
agent.custom = nc
r = reward(node.value.x, u, nx, agent, world)
agent.custom = custom
bp = pomdp.update_belief(node.value.b, o, adv_c)
value = PomcNode()
value.x = nx
value.adv_x = adv_nx
value.u = u
value.b = bp
value.c = nc
value.q = r
next_node = GroupTree(value)
node.next[la + 1][oidx] = next_node
end
end
return 0.0
end
# probability of each observation
P = map(o -> reduce(+, map(i -> node.value.b[i] *
pomdp.P_adv(o, adv_c, pomdp.DRIVERS[i]),
1:length(pomdp.DRIVERS))), pomdp.ACTIONS)
# find the best action for value and exploration
as = -1
vs = -Inf
for a in 1:length(node.next)
# expected reward
ra = reduce(+, map(o -> node.next[a][o].value.q * P[o],
1:length(pomdp.ACTIONS)))
v = ra + olm_pomc_c * sqrt(log(node.value.Ns) /
reduce(+, map(o -> node.next[a][o].value.N,
1:length(pomdp.ACTIONS))))
if v > vs
vs = v
as = a
end
end
# sample a transition
cP = cumsum(P)
rnd = rand()
oidx = findfirst(i -> cP[i] >= rnd, 1:length(cP))
x = node.value.x
adv_nx = node.next[as][oidx].value.adv_x
u = node.next[as][oidx].value.u
nx = node.next[as][oidx].value.x
bp = node.next[as][oidx].value.b
(nc, _) = adv.predict_collision(nx, adv_nx, world)
custom = agent.custom
agent.custom = nc
r = reward(node.value.x, u, nx, agent, world)
agent.custom = custom
q = r + olm_gamma * simulate_pomc(node.next[as][oidx], agent, adv_agent,
world, reward, ctrl_d, visited, depth - 1)
# update Qsa for fuck's sake ---------------------------------------------- #
dq = (q - node.next[as][oidx].value.q)
node.next[as][oidx].value.q += dq / node.next[as][oidx].value.N
# this works best BY FAR
#node.next[as].value.q = max(q, node.next[as].value.q)
#alpha = 0.5
#Qsa = node.next[as].value.q
#Qsa = Qsa * (1 - alpha) + q * alpha
#node.next[as].value.q = Qsa
# ------------------------------------------------------------------------- #
node.next[as][oidx].value.N += 1
node.value.Ns += 1
return q
end
function rollout(x::AbstractArray{Float64, 1},
adv_x::AbstractArray{Float64, 1},
b::AbstractArray{Float64, 1},
agent::Agent, adv_agent::Agent,
world::World, reward::Function,
ctrl_d::Discretization, depth::Int)
if depth <= 0
return 0.0
end
return 0.0
# choose middle action, generally reasonable
end
function controller_pomc!(u::AbstractArray{Float64},
x::AbstractArray{Float64},
dx::AbstractArray{Float64},
agent_world::Pair{Agent, World}, t::Float64)
agent = agent_world.first
u[1] = agent.custom[1]
u[2] = agent.custom[2]
end
| [
27,
456,
62,
30783,
29,
15,
198,
9979,
25776,
76,
62,
79,
296,
66,
62,
66,
796,
352,
68,
20,
1303,
13936,
11507,
329,
337,
4177,
50,
198,
9979,
25776,
76,
62,
79,
296,
66,
62,
2676,
602,
796,
5867,
1303,
13936,
11507,
329,
337,
... | 2.059293 | 3,255 |
<filename>O9/O9-praksis.jl
# Question 1
function findset(x::DisjointSetNode)
if (x !=x.p )
x.p = findset(x.p)
end
x.p
end
# Question 2
function link!(x,y)
if (x.rank > y.rank)
y.p=x
else (x.p =y)
if (x.rank == y.rank)
y.rank = y.rank + 1
end
end
end
function union!(x::DisjointSetNode,y::DisjointSetNode)
link!(findset(x),findset(y))
end
# Question 3
function hammingdistance(s1::String,s2::String)
teller = 0
for i = 1:length(s1)
if s1[i] != s2[i]
teller +=1
end
end
teller
end | [
27,
34345,
29,
46,
24,
14,
46,
24,
12,
79,
430,
591,
271,
13,
20362,
198,
2,
18233,
352,
198,
8818,
1064,
2617,
7,
87,
3712,
7279,
73,
1563,
7248,
19667,
8,
198,
220,
220,
220,
611,
357,
87,
14512,
87,
13,
79,
1267,
198,
220,
... | 1.817073 | 328 |
<reponame>astrieanna/GitHub.jl
module GitHub
import Base.show
using Requests
import JSON
using HttpCommon
abstract GitHubType
abstract Owner <: GitHubType
# types
export User,
Organization,
Repo,
Issue,
HttpError,
AuthError,
StatsError
# methods
export authenticate,
set_api_endpoint,
set_web_endpoint,
user,
star,
unstar,
stargazers,
starred,
forks,
fork,
contributors,
commit_activity,
code_frequency,
participation,
punch_card,
collaborators,
iscollaborator,
add_collaborator,
remove_collaborator,
watchers,
watched,
watching,
watch,
unwatch,
followers,
following,
org,
orgs,
repo,
issue,
create_issue,
edit_issue,
issues
include("utils.jl")
include("endpoint.jl")
include("error.jl")
include("auth.jl")
include("users.jl")
include("organizations.jl")
include("repos.jl")
include("issues.jl")
include("starring.jl")
include("forks.jl")
include("statistics.jl")
include("collaborators.jl")
include("watching.jl")
end
| [
27,
7856,
261,
480,
29,
459,
5034,
7697,
14,
38,
270,
16066,
13,
20362,
198,
198,
21412,
21722,
198,
198,
11748,
7308,
13,
12860,
198,
198,
3500,
9394,
3558,
198,
11748,
19449,
198,
3500,
367,
29281,
17227,
628,
198,
397,
8709,
21722,... | 2.158559 | 555 |
<filename>src/gridmodule/gridreader.jl<gh_stars>0
export readgrid
if VERSION < v"0.4.0-dev+1419"
stringtoint(s) = int(s);
stringtofloat(s) = float(s);
else
stringtoint(s) = parse(Int64,s);
stringtofloat(s) = parse(Float64,s);
end
"""
**Summary:**
```
xyz,vert,pattr,ele,eattr,nd=function readgrid(fname::String)
```
**Description:**
Reads a *1D,2D,3D* topology grid from the file `fname` and stores data in
intermediate structure consisting of
- `xyz ::Array{Float64,2}` vertices positions
- `vert ::Array{Int64,2}` vertex numbers
- `pattr::Array{Int64,2}` vertex attributes
- `ele ::Array{Int64,2}` element connectivity
- `eattr::Array{Int64,2}` element attributes
- `nd::Int64` spatial dimension (<=topological dimension)
xyz,vert,pattr,ele,eattr,nd
as read from node and ele files. Can handle meshes generated with *triangle*,
*tetgen*, and the corresponding *1D* equivalent. The type of mesh (intervals,
triangles, tetraeders etc.) should be known by the caller of readgrid, when
creating the proper derived objects, i.e. topologies.
"""
function readgrid(fname::String)
# read vertex related stuff
fid = open(fname * ".node","r")
mesh_header = split(strip(readline(fid))) # read file header
nvert = stringtoint(mesh_header[1]) # number of vertices
nd = stringtoint(mesh_header[2]) # spatial dimension
npattr = stringtoint(mesh_header[3]) # number of point attributes
nidv = stringtoint(mesh_header[4]) # number of boundary markers
# allocate
xyz = zeros(Float64,nvert,nd)
vert = zeros(Int64, nvert, 2 )
pattr = zeros(Int64, nvert, nidv + npattr)
# read vertex positions, enumeration, and attributes
for i=1:nvert
data = split(strip(readline(fid)))
for j=1:nd
xyz[i,j] = stringtofloat(data[1+j])
end
vert[i,1] = i
vert[i,2] = stringtoint(data[1])
for j=1:nidv+npattr
pattr[i,j] = stringtoint(data[1+nd+j])
end
end
close(fid)
# read element related stuff
fid = open(fname * ".ele","r")
mesh_header = split(strip(readline(fid)))
nele = stringtoint(mesh_header[1]) # number of elements
ntop = stringtoint(mesh_header[2]) # number of entities per element
nide = stringtoint(mesh_header[3]) # number of element attributes
# allocate
ele = zeros(Int64,nele,ntop+1)
eattr = zeros(Int64,nele,nide)
# read element enumeration, connectivity and attributes
for k=1:nele
data = split(strip(readline(fid)))
for i=1:(ntop+1)
ele[k,i] = stringtoint(data[i])
end
for i=1:nide
eattr[k,i] = stringtoint(data[ntop+1+i])
end
end
close(fid)
return xyz,vert,pattr,ele,eattr,nd
end
# see http://userpages.umbc.edu/~squire/UCD_Form.htm
# readable with ParaView
function readgrid_avs(fname::String)
# read vertex related stuff
fid = open(fname * ".inp","r")
mesh_header = split(strip(readline(fid))) # read file header
nvert = stringtoint(mesh_header[1]) # number of vertices
nele = stringtoint(mesh_header[2]) # number of elements
npattr = stringtoint(mesh_header[3]) # number of point attributes
nide = stringtoint(mesh_header[4]) # number of element attributes
nmodel = stringtoint(mesh_header[5]) # model data (assume this is zero)
nd = 3 # spatial dimension
nide = 1 # overwrite above
ntop = 3
# allocate
xyz = zeros(Float64,nvert,nd)
vert = zeros(Int64, nvert, 2 )
# read vertex positions, enumeration, and attributes
for i=1:nvert
data = split(strip(readline(fid)))
for j=1:nd
xyz[i,j] = stringtofloat(data[1+j])
end
vert[i,1] = i
vert[i,2] = stringtoint(data[1])
end
ele = zeros(Int64,nele,4)
eattr = zeros(Int64,nele,nide) # ==1?
for k=1:nele
data = split(strip(readline(fid)))
if data[3]=="tri"
ele[k,1] = stringtoint(data[1])
for i=1:3
ele[k,1+i] = stringtoint(data[3+i])
end
for i=1:nide
eattr[k,i] = stringtoint(data[1+i])
end
end
end
# read from INP file?
pattr = zeros(Int64,nvert,1)
close(fid)
return xyz,vert,pattr,ele,eattr,nd
end
| [
27,
34345,
29,
10677,
14,
25928,
21412,
14,
25928,
46862,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
39344,
1100,
25928,
198,
198,
361,
44156,
2849,
1279,
410,
1,
15,
13,
19,
13,
15,
12,
7959,
10,
1415,
1129,
1,
198,
220,
4731,
... | 2.353837 | 1,772 |
"""
将三角切分的结果再进行切分
"""
module Refine
using ..Basics
export split_triangle, refine_triangles, find_adjs_by_adjoint
"""
将三角形重新切分成四个,可以用在直角三角形上
依旧保证出来的四个是直角三角形,且第一个角是直角
"""
@generated function split_triangle(rtri::Basics.AbstractTriangle{T}) where T
type2func = Dict(
:COM => Triangle,
:RT => RtTriangle,
:EQ => EqTriangle
)
func = type2func[T]
if T == :RT
return quote
#新的四个直角三角形的四个直角
rtvexs = Vector{Point2D}(undef, 4)
rtvexs[1] = rtri.vertex[1]
rtvexs[2] = middle_point(rtri.vertex[1], rtri.vertex[2])
rtvexs[3] = middle_point(rtri.vertex[2], rtri.vertex[3])
rtvexs[4] = middle_point(rtri.vertex[3], rtri.vertex[1])
#
newtris = Vector{Basics.AbstractTriangle{T}}(undef, 4)
newtris[1] = RtTriangle(rtvexs[2], rtvexs[3], rtvexs[1])
newtris[2] = RtTriangle(rtvexs[2], rtri.vertex[2], rtvexs[3])
newtris[3] = RtTriangle(rtvexs[4], rtvexs[3], rtri.vertex[3])
newtris[4] = RtTriangle(rtvexs[4], rtvexs[3], rtvexs[1])
return newtris
end
end
return quote
#新的四个直角三角形的四个直角
rtvexs = Vector{Point2D}(undef, 4)
rtvexs[1] = rtri.vertex[1]
rtvexs[2] = middle_point(rtri.vertex[1], rtri.vertex[2])
rtvexs[3] = middle_point(rtri.vertex[2], rtri.vertex[3])
rtvexs[4] = middle_point(rtri.vertex[3], rtri.vertex[1])
#
newtris = Vector{Basics.AbstractTriangle{T}}(undef, 4)
newtris[1] = ($func)(rtvexs[1], rtvexs[2], rtvexs[4])
newtris[2] = ($func)(rtvexs[2], rtri.vertex[2], rtvexs[3])
newtris[3] = ($func)(rtvexs[3], rtvexs[4], rtvexs[2])
newtris[4] = ($func)(rtvexs[4], rtvexs[3], rtri.vertex[3])
return newtris
end # end quote
end
"""
将一组的三角形细化
"""
function refine_triangles(ltris::Vector{T}) where T <: Basics.AbstractTriangle
newtris = Matrix{T}(undef, length(ltris), 4)
for (idx, tri) in enumerate(ltris)
newtris[idx, :] = split_triangle(tri)
end
newtris = reshape(newtris, length(ltris)*4)
return newtris
end
"""
判断一条边是不是某个三角形的
"""
macro isedge(edge, tri)
return esc(quote
topt1 = [vex - ($edge).pt1 for vex in ($tri).vertex]
topt1 = [sqrt(pt.x^2 + pt.y^2) for pt in topt1]
haspt1 = isapprox(minimum(topt1), 0., atol=1e-6)
topt2 = [vex - ($edge).pt2 for vex in ($tri).vertex]
topt2 = [sqrt(pt.x^2 + pt.y^2) for pt in topt2]
haspt2 = isapprox(minimum(topt2), 0., atol=1e-6)
haspt1 && haspt2
end)
end
"""
通过判断边是否重合来判断两个三角形是否挨在一起
"""
function find_adjs_by_adjoint(ltris::Vector{T}) where T <: Basics.AbstractTriangle
ladjs = Vector{Tuple{
Union{Missing, T}, Union{Missing, T}, Union{Missing, T}
}}(undef, length(ltris))
for idx in 1:1:length(ltris)
ladjs[idx] = (missing, missing, missing)
end
@Threads.threads for idx in 1:1:length(ltris)
tri = ltris[idx]
fadjs = Vector{Union{Missing, T}}(undef, 3)
fadjs[:] = [missing, missing, missing]
for (adjidx, adjtri) in enumerate(ltris)
if idx == adjidx
continue
end
if @isedge tri.edges[1] adjtri
fadjs[1] = adjtri
continue
end
if @isedge tri.edges[2] adjtri
fadjs[2] = adjtri
continue
end
if @isedge tri.edges[3] adjtri
fadjs[3] = adjtri
continue
end
end
ladjs[idx] = Tuple(fadjs)
end
return ladjs
end
end # end module
| [
37811,
198,
49546,
49011,
164,
100,
240,
26344,
229,
26344,
228,
21410,
163,
119,
241,
162,
252,
250,
37863,
235,
32573,
249,
26193,
234,
26344,
229,
26344,
228,
198,
37811,
198,
21412,
6524,
500,
198,
220,
220,
220,
220,
198,
3500,
1... | 1.659613 | 2,221 |
<gh_stars>10-100
#!/usr/bin/env julia
# Copyright 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import SemanticFlowGraphs: CLI
function expand_paths(path::String)::Vector{String}
if isfile(path); [ path ]
elseif isdir(path); readdir(path)
else String[] end
end
cmds, cmd_args = CLI.parse(ARGS)
# XXX: Reduce load time by only importing extra packages as needed.
# Julia really needs a better solution to this problem...
if first(cmds) == "record"
paths = expand_paths(cmd_args["path"])
if any(endswith(path, ".py") for path in paths)
import PyCall
end
if any(endswith(path, ".R") for path in paths)
import RCall
end
end
CLI.invoke(cmds, cmd_args)
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
2,
48443,
14629,
14,
8800,
14,
24330,
474,
43640,
198,
198,
2,
15069,
2864,
19764,
11421,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156... | 3.187166 | 374 |
<filename>src/entities/summitbackgroundmanager.jl
module SummitBackgroundManager
using ..Ahorn, Maple
const placements = Ahorn.PlacementDict(
"Summit Background Manager" => Ahorn.EntityPlacement(
Maple.SummitBackgroundManager
)
)
function Ahorn.selection(entity::Maple.SummitBackgroundManager)
x, y = Ahorn.position(entity)
return Ahorn.Rectangle[Ahorn.Rectangle(x - 12, y - 12, 24, 24)]
end
Ahorn.render(ctx::Ahorn.Cairo.CairoContext, entity::Maple.SummitBackgroundManager, room::Maple.Room) = Ahorn.drawImage(ctx, Ahorn.Assets.summitBackgroundManager, -12, -12)
end | [
27,
34345,
29,
10677,
14,
298,
871,
14,
16345,
2781,
25249,
37153,
13,
20362,
198,
21412,
20014,
21756,
13511,
198,
198,
3500,
11485,
10910,
1211,
11,
21249,
198,
198,
9979,
21957,
3196,
796,
7900,
1211,
13,
3646,
5592,
35,
713,
7,
19... | 2.856459 | 209 |
##########################################################################
# Copyright 2017 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
# Dynamic Double Standard Model, with some modifications
# From paper: "A dynamic model and parallel tabu search heuristic for real-time ambulance relocation"
# Modifications (compared with original ddsm):
# - Added slack variables to coverage constraints, to avoid infeasibility. Cost of slack per unit is given by slackWeight.
# - Redeployment cost only depends on travel time, not on: ambulance history, avoiding round trips.
# initialise data relevant to move up
function initDdsm!(sim::Simulation;
coverFractionTargetT1::Float = 0.5, travelTimeCost::Float = 50.0, slackWeight::Float = 1e9,
coverTimeDemandPriorities::Vector{Priority} = [highPriority, lowPriority],
options::Dict{Symbol,Any} = Dict{Symbol,Any}())
# slackWeight is the weight to apply to slack variables, needs to be sufficiently large so that the slack variables are zero when possible
# coverTimeDemandPriorities[i] is demand priority for coverTimes[i], where coverTimes[1] and [2] are the targets for ddsm
@assert(0 <= coverFractionTargetT1 <= 1)
@assert(length(coverTimeDemandPriorities) == 2)
# initialise demand and demand coverage data if not already initialised
sim.demand.initialised || initDemand!(sim)
sim.demandCoverage.initialised || initDemandCoverage!(sim)
demand = sim.demand # shorthand
@assert(all([demandMode.rasterIndex for demandMode in demand.modes[demand.modeLookup[i,:]]] |> unique |> length == 1 for i = 1:demand.numSets)) # distribution of demand must be same for all demand priorities, in any given time period
# two target cover times for ddsm
coverTimes = [sim.demandCoverage.coverTimes[p] for p in coverTimeDemandPriorities]
@assert(length(coverTimes) == 2)
@assert(coverTimes[1] < coverTimes[2]) # change to warning?
# set options
ddsmd = sim.moveUpData.ddsmData # shorthand
ddsmd.options[:solver] = "cbc" # can be slower than glpk, but more reliable for some reason
ddsmd.options[:solver_args] = []
ddsmd.options[:solver_kwargs] = []
ddsmd.options[:v] = v"1"
ddsmd.options[:z_var] = true
ddsmd.options[:bin_tol] = 1e-5
merge!(ddsmd.options, Dict([:x_bin => true, :y11_bin => true, :y12_bin => true, :y2_bin => true]))
ddsmOptions!(sim, options)
ddsmd.coverFractionTargetT1 = coverFractionTargetT1
ddsmd.travelTimeCost = travelTimeCost
ddsmd.slackWeight = slackWeight
ddsmd.coverTimeDemandPriorities = coverTimeDemandPriorities # coverTimeDemandPriorities[i] is demand priority for coverTimes[i]
ddsmd.coverTimes = coverTimes
end
# change the options for ddsm
function ddsmOptions!(sim, options::Dict{Symbol,T}) where T <: Any
options = merge!(sim.moveUpData.ddsmData.options, options) # update options
@assert(in(options[:solver], ["cbc", "glpk", "gurobi"]))
@assert(typeof(options[:v]) == VersionNumber)
@assert(typeof(options[:z_var]) == Bool)
@assert(0 < options[:bin_tol] < 0.1)
if options[:solver] == "gurobi" try Gurobi; catch; options[:solver] = "cbc"; @warn("Failed to use Gurobi, using Cbc instead.") end end
# options[:v] == v"0" # do nothing, values should already be set in options if using this
options[:v] == v"1" && merge!(options, Dict([:x_bin => true, :y11_bin => true, :y12_bin => true, :y2_bin => true]))
options[:v] == v"2" && merge!(options, Dict([:x_bin => true, :y11_bin => true, :y12_bin => false, :y2_bin => false]))
options[:v] == v"3" && merge!(options, Dict([:x_bin => false, :y11_bin => true, :y12_bin => false, :y2_bin => false]))
options[:v] == v"4" && merge!(options, Dict([:x_bin => false, :y11_bin => true, :y12_bin => true, :y2_bin => true]))
if options[:y11_bin] == false && in(options[:solver], ["cbc", "glpk"]) @warn("Removing binary constraint for `y11` may not work with CBC or GLPK.") end
if options[:x_bin] == false && options[:solver] == "gurobi" @warn("Removing binary constraint for `x` may not work with Gurobi.") end
@assert(all(key -> haskey(options, key), [:x_bin, :y11_bin, :y12_bin, :y2_bin]))
if isdefined(sim, :backup) sim.backup.moveUpData.ddsmData.options = options end # to keep options if sim is reset
return options
end
function ddsmMoveUp(sim::Simulation)
@assert(sim.moveUpData.useMoveUp)
# shorthand:
@unpack ambulances, stations, numStations, demand = sim
currentTime = sim.time
ddsmd = sim.moveUpData.ddsmData
@unpack coverTimes, coverTimeDemandPriorities, coverFractionTargetT1, slackWeight, options = ddsmd
# get movable ambulances (movableAmbs)
ambMovable = [isAmbMovable(amb) for amb in ambulances]
movableAmbs = ambulances[ambMovable]
numMovableAmbs = length(movableAmbs)
if numMovableAmbs == 0
return moveUpNull()
end
# calculate travel time for each movable ambulance
ambToStationTimes = zeros(Float, numMovableAmbs, numStations) # ambToStationTimes[i,j] = time for movable ambulance i to travel to station j
for i = 1:numMovableAmbs
ambToStationTimes[i,:] = ambMoveUpTravelTimes!(sim, movableAmbs[i])
end
ambToStationCosts = ambToStationTimes * ddsmd.travelTimeCost
# get demand point coverage data for the two target cover times
# for coverTimes[ti], point j has demand pointDemands[ti][j] and is covered by stations pointStations[ti][j]
pointStations = [] # pointStations[ti][j] is coverTimes[ti]
pointDemands = []
numPoints = Int[] # can have different number of points (really point sets) for each cover time
demandPriorityArrivalRates = [getDemandMode!(demand, demandPriority, currentTime).arrivalRate for demandPriority in priorities]
for demandPriority in coverTimeDemandPriorities
# get demand point coverage data
pointsCoverageMode = getPointsCoverageMode!(sim, demandPriority, currentTime)
demandMode = getDemandMode!(demand, demandPriority, currentTime)
pointSetsDemands = getPointSetsDemands!(sim, demandPriority, currentTime; pointsCoverageMode = pointsCoverageMode) * demandMode.rasterMultiplier
pointSetsDemands *= sum(demandPriorityArrivalRates) / demandPriorityArrivalRates[Int(demandPriority)] # scale demand to be for all priorities, not just current demandPriority
push!(pointStations, pointsCoverageMode.stationSets)
push!(pointDemands, pointSetsDemands)
push!(numPoints, length(pointSetsDemands))
end
@assert(isapprox(sum(pointDemands[1]), sum(pointDemands[2])))
# could have it so that pointStations, pointDemands, and numPoints are only updated if the demandSet has changed since last query,
# but this may only shave off a couple of seconds per 100 sim days, which is not the current bottleneck
######################
# IP
# shorthand
a = numMovableAmbs
np = numPoints # np[ti] is number of points for coverTimes[i]
s = numStations
t = coverTimes
# using JuMP
model = Model()
solver = options[:solver] # shorthand
args = options[:solver_args] # shorthand
kwargs = options[:solver_kwargs] # shorthand
if solver == "cbc" set_optimizer(model, with_optimizer(Cbc.Optimizer, logLevel=0, args...; kwargs...))
elseif solver == "glpk" set_optimizer(model, with_optimizer(GLPK.Optimizer, args...; kwargs...))
elseif solver == "gurobi" @stdout_silent(set_optimizer(model, with_optimizer(Gurobi.Optimizer, OutputFlag=0, args...; kwargs...)))
end
m = model # shorthand
options[:x_bin] ? @variable(m, x[i=1:a,j=1:s], Bin) : @variable(m, 0 <= x[i=1:a,j=1:s] <= 1)
options[:y11_bin] ? @variable(m, y11[p=1:np[1]], Bin) : @variable(m, 0 <= y11[p=1:np[1]] <= 1)
options[:y12_bin] ? @variable(m, y12[p=1:np[1]], Bin) : @variable(m, 0 <= y12[p=1:np[1]] <= 1)
options[:y2_bin] ? @variable(m, y2[p=1:np[2]], Bin) : @variable(m, 0 <= y2[p=1:np[2]] <= 1)
# x[i,j] = 1 if ambulance: movableAmbs[i] should be moved to station: stations[j], 0 otherwise
# y11[p,k] = 1 if demand point p is covered at least once within t[1], 0 otherwise
# y12[p,k] = 1 if demand point p is covered at least twice within t[1], 0 otherwise
# y2[p] = 1 if demand point p is covered at least once within t[2], 0 otherwise
@variables(model, begin
(s1 >= 0) # slack
(s2 >= 0) # slack
end)
@constraints(model, begin
(ambAtOneStation[i=1:a], sum(x[i,:]) == 1) # each ambulance must be assigned to one station
(pointCoverOrderY1[p=1:np[1]], y11[p] >= y12[p]) # single coverage before double coverage; not needed for y2
(demandCoveredOnceT1, sum(y11[p] * pointDemands[1][p] for p=1:np[1]) + s1 >= coverFractionTargetT1 * sum(pointDemands[1])) # fraction of demand covered once within t[1]
(demandCoveredOnceT2, sum(y2[p] * pointDemands[2][p] for p=1:np[2]) + s2 >= sum(pointDemands[2])) # all demand covered once within t[2]
end)
if options[:z_var]
@variable(model, z[j=1:s], Int) # z[j] = number of ambulances to assign to station j
@constraints(model, begin
(stationAmbCount[j=1:s], z[j] == sum(x[:,j]))
(pointCoverCountY1[p=1:np[1]], y11[p] + y12[p] <= sum(z[pointStations[1][p]]))
(pointCoverCountY2[p=1:np[2]], y2[p] <= sum(z[pointStations[2][p]]))
end)
else
@constraints(model, begin
(pointCoverCountY1[p=1:np[1]], y11[p] + y12[p] <= sum(x[:,pointStations[1][p]]))
(pointCoverCountY2[p=1:np[2]], y2[p] <= sum(x[:,pointStations[2][p]]))
end)
end
@expressions(model, begin
demandCoveredTwiceT1, sum(y12[p] * pointDemands[1][p] for p=1:np[1])
totalAmbTravelCost, sum(x[i,j] * ambToStationCosts[i,j] for i=1:a, j=1:s)
slackCost, (s1 + s2) * slackWeight
end)
# solve
@objective(model, Max, demandCoveredTwiceT1 - totalAmbTravelCost - slackCost)
@stdout_silent optimize!(model)
@assert(termination_status(model) == MOI.OPTIMAL)
# get solution
vals = Dict()
vals[:x] = JuMP.value.(x)
vals[:y11] = JuMP.value.(y11)
vals[:y12] = JuMP.value.(y12)
vals[:y2] = JuMP.value.(y2)
sol = convert(Array{Bool,2}, round.(vals[:x]))
if checkMode
@assert(all(sum(sol, dims=2) .== 1)) # check constraint: ambAtOneStation
# check that values are binary
for sym in [:x, :y11, :y12, :y2]
err = maximum(abs.(vals[sym] - round.(vals[sym])))
@assert(err <= options[:bin_tol], (sym, err))
end
end
ambStations = [stations[findfirst(sol[i,:])] for i=1:a]
return movableAmbs, ambStations
end
| [
29113,
29113,
7804,
2235,
198,
2,
15069,
2177,
1279,
20608,
28401,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
26... | 2.752702 | 3,886 |
module Criterion
include("stats.jl")
include("analysis.jl")
include("types.jl")
include("measurement.jl")
include("environment.jl")
include("run.jl")
include("report.jl")
end
| [
21412,
10056,
28019,
198,
198,
17256,
7203,
34242,
13,
20362,
4943,
198,
17256,
7203,
20930,
13,
20362,
4943,
198,
17256,
7203,
19199,
13,
20362,
4943,
198,
17256,
7203,
1326,
5015,
434,
13,
20362,
4943,
198,
17256,
7203,
38986,
13,
20362... | 3.034483 | 58 |
#=
*** rewrite for import purposes ***
This is a resnet implementation by <NAME>uret (https://github.com/denizyuret/Knet.jl/blob/master/examples/resnet/resnet.jl) with minor changes:
Copyright (c) 2015: <NAME>.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
julia resnet.jl image-file-or-url
This example implements the ResNet-50, ResNet-101 and ResNet-152 models from
'Deep Residual Learning for Image Regocnition', <NAME>, <NAME>,
<NAME>, <NAME>, arXiv technical report 1512.03385, 2015.
* Paper url: https://arxiv.org/abs/1512.03385
* Project page: https://github.com/KaimingHe/deep-residual-networks
* MatConvNet weights used here: http://www.vlfeat.org/matconvnet/pretrained
=#
# mode, 0=>train, 1=>test
function resnet50(w,x,ms; mode=1)
# layer 1
conv1 = conv4(w[1],x; padding=3, stride=2) .+ w[2]
bn1 = batchnorm(w[3:4],conv1,ms; mode=mode)
pool1 = pool(bn1; window=3, stride=2)
# layer 2,3,4,5
r2 = reslayerx5(w[5:34], pool1, ms; strides=[1,1,1,1], mode=mode)
r3 = reslayerx5(w[35:73], r2, ms; mode=mode)
r4 = reslayerx5(w[74:130], r3, ms; mode=mode) # 5
r5 = reslayerx5(w[131:160], r4, ms; mode=mode)
# fully connected layer
pool5 = pool(r5; stride=1, window=7, mode=2)
fc1000 = w[161] * mat(pool5) .+ w[162]
end
# mode, 0=>train, 1=>test
function resnet101(w,x,ms; mode=1)
# layer 1
conv1 = reslayerx1(w[1:3],x,ms; padding=3, stride=2, mode=mode)
pool1 = pool(conv1; window=3, stride=2)
# layer 2,3,4,5
r2 = reslayerx5(w[4:33], pool1, ms; strides=[1,1,1,1], mode=mode)
r3 = reslayerx5(w[34:72], r2, ms; mode=mode)
r4 = reslayerx5(w[73:282], r3, ms; mode=mode)
r5 = reslayerx5(w[283:312], r4, ms; mode=mode)
# fully connected layer
pool5 = pool(r5; stride=1, window=7, mode=2)
fc1000 = w[313] * mat(pool5) .+ w[314]
end
# mode, 0=>train, 1=>test
function resnet152(w,x,ms; mode=1)
# layer 1
conv1 = reslayerx1(w[1:3],x,ms; padding=3, stride=2, mode=mode)
pool1 = pool(conv1; window=3, stride=2)
# layer 2,3,4,5
r2 = reslayerx5(w[4:33], pool1, ms; strides=[1,1,1,1], mode=mode)
r3 = reslayerx5(w[34:108], r2, ms; mode=mode)
r4 = reslayerx5(w[109:435], r3, ms; mode=mode)
r5 = reslayerx5(w[436:465], r4, ms; mode=mode)
# fully connected layer
pool5 = pool(r5; stride=1, window=7, mode=2)
fc1000 = w[466] * mat(pool5) .+ w[467]
end
# Batch Normalization Layer
# works both for convolutional and fully connected layers
# mode, 0=>train, 1=>test
function batchnorm(w, x, ms; mode=1, epsilon=1e-5)
mu, sigma = nothing, nothing
if mode == 0
d = ndims(x) == 4 ? (1,2,4) : (2,)
s = prod(size(x,d...))
mu = sum(x,d) / s
x0 = x .- mu
x1 = x0 .* x0
sigma = sqrt(epsilon + (sum(x1, d)) / s)
elseif mode == 1
mu = popfirst!(ms)
sigma = popfirst!(ms)
end
# we need value in backpropagation
push!(ms, value(mu), value(sigma))
xhat = (x.-mu) ./ sigma
return w[1] .* xhat .+ w[2]
end
function reslayerx0(w,x,ms; padding=0, stride=1, mode=1)
b = conv4(w[1],x; padding=padding, stride=stride)
bx = batchnorm(w[2:3],b,ms; mode=mode)
end
function reslayerx1(w,x,ms; padding=0, stride=1, mode=1)
relu.(reslayerx0(w,x,ms; padding=padding, stride=stride, mode=mode))
end
function reslayerx2(w,x,ms; pads=[0,1,0], strides=[1,1,1], mode=1)
ba = reslayerx1(w[1:3],x,ms; padding=pads[1], stride=strides[1], mode=mode)
bb = reslayerx1(w[4:6],ba,ms; padding=pads[2], stride=strides[2], mode=mode)
bc = reslayerx0(w[7:9],bb,ms; padding=pads[3], stride=strides[3], mode=mode)
end
function reslayerx3(w,x,ms; pads=[0,0,1,0], strides=[2,2,1,1], mode=1) # 12
a = reslayerx0(w[1:3],x,ms; stride=strides[1], padding=pads[1], mode=mode)
b = reslayerx2(w[4:12],x,ms; strides=strides[2:4], pads=pads[2:4], mode=mode)
relu.(a .+ b)
end
function reslayerx4(w,x,ms; pads=[0,1,0], strides=[1,1,1], mode=1)
relu.(x .+ reslayerx2(w,x,ms; pads=pads, strides=strides, mode=mode))
end
function reslayerx5(w,x,ms; strides=[2,2,1,1], mode=1)
x = reslayerx3(w[1:12],x,ms; strides=strides, mode=mode)
for k = 13:9:length(w)
x = reslayerx4(w[k:k+8],x,ms; mode=mode)
end
return x
end
function get_params(params, atype)
len = length(params["value"])
ws, ms = [], []
for k = 1:len
name = params["name"][k]
value = convert(Array{Float32}, params["value"][k])
if endswith(name, "moments")
push!(ms, reshape(value[:,1], (1,1,size(value,1),1)))
push!(ms, reshape(value[:,2], (1,1,size(value,1),1)))
elseif startswith(name, "bn")
push!(ws, reshape(value, (1,1,length(value),1)))
elseif startswith(name, "fc") && endswith(name, "filter")
push!(ws, transpose(reshape(value,(size(value,3),size(value,4)))))
elseif startswith(name, "conv") && endswith(name, "bias")
push!(ws, reshape(value, (1,1,length(value),1)))
else
push!(ws, value)
end
end
map(wi->convert(atype, wi), ws),
map(mi->convert(atype, mi), ms)
end
# This allows both non-interactive (shell command) and interactive calls like:
# $ julia vgg.jl cat.jpg
# julia> ResNet.main("cat.jpg")
#PROGRAM_FILE=="resnet.jl" && main(ARGS)
| [
2,
28,
198,
8162,
28183,
329,
1330,
4959,
17202,
198,
198,
1212,
318,
257,
581,
3262,
7822,
416,
1279,
20608,
29,
495,
83,
357,
5450,
1378,
12567,
13,
785,
14,
6559,
528,
88,
495,
83,
14,
42,
3262,
13,
20362,
14,
2436,
672,
14,
... | 2.179389 | 2,620 |
<filename>utils/install.jl<gh_stars>1-10
#!/usr/bin/env julia
# Usage
#
# You can execute this file directly as long as the `julia` executable is
# in your PATH and if you are on a Linux/macOS system. If `julia` is not in
# your PATH or if you are on a Windows system, call Julia and explicitly
# provide this file as a command line parameter, e.g., `path/to/julia
# Trixi.jl/utils/install.jl`.
import Pkg
# Get Trixi root directory
trixi_root_dir = dirname(@__DIR__)
# Install Trixi dependencies
println("*"^80)
println("Installing dependencies for Trixi...")
Pkg.activate(trixi_root_dir)
Pkg.instantiate()
# Install Trixi2Img dependencies
println("*"^80)
println("Installing dependencies for Trixi2Img...")
Pkg.activate(joinpath(trixi_root_dir, "postprocessing", "pkg", "Trixi2Img"))
Pkg.instantiate()
# Install Trixi2Vtk dependencies
println("*"^80)
println("Installing dependencies for Trixi2Vtk...")
Pkg.activate(joinpath(trixi_root_dir, "postprocessing", "pkg", "Trixi2Vtk"))
Pkg.instantiate()
println("*"^80)
println("Done.")
| [
27,
34345,
29,
26791,
14,
17350,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
2,
48443,
14629,
14,
8800,
14,
24330,
474,
43640,
198,
198,
2,
29566,
198,
2,
198,
2,
921,
460,
12260,
428,
2393,
3264,
355,
890,
355,
262,
46... | 2.857143 | 364 |
<gh_stars>10-100
# Energy
function NbodyEnergy(u,Gm)
"""
Nbody problem Hamiltonian (Cartesian Coordinates)
"""
dim=2
nbody=length(Gm)
@inbounds begin
x = view(u,1:7) # x
y = view(u,8:14) # y
v = view(u,15:21) # x′
w = view(u,22:28) # y′
H=zero(eltype(u))
P=zero(eltype(u))
for i in 1:nbody
H+=Gm[i]*(v[i]*v[i]+w[i]*w[i])
for j in i+1:nbody
r = ((x[i]-x[j])^2+(y[i]-y[j])^2)^(1/2)
P+=(Gm[i]/r)*Gm[j]
end
end
return(H/2-P)
end
end
# OdeProblem
function f(du,u,p,t)
@inbounds begin
x = view(u,1:7) # x
y = view(u,8:14) # y
v = view(u,15:21) # x′
w = view(u,22:28) # y′
du[1:7] .= v
du[8:14].= w
for i in 15:28
du[i] = zero(u[1])
end
for i=1:7,j=1:7
if i != j
r = ((x[i]-x[j])^2 + (y[i] - y[j])^2)^(3/2)
du[14+i] += j*(x[j] - x[i])/r
du[21+i] += j*(y[j] - y[i])/r
end
end
end
end
# DynamicalOdeProblem
function dotv(dv,q,v,par,t)
@inbounds begin
x = view(q,1:7) # x
y = view(q,8:14) # y
vx = view(v,1:7) # x′
vy = view(v,8:14) # y′
for i in 1:14
dv[i] = zero(x[1])
end
for i=1:7,j=1:7
if i != j
r = ((x[i]-x[j])^2 + (y[i] - y[j])^2)^(3/2)
dv[i] += j*(x[j] - x[i])/r
dv[7+i] += j*(y[j] - y[i])/r
end
end
end
end
function dotq(dq,q,v,par,t)
@inbounds begin
x = view(q,1:7) # x
y = view(q,8:14) # y
vx = view(v,1:7) # x′
vy = view(v,8:14) # y′
dq[1:7] .= vx
dq[8:14].= vy
end
end
#
# Second Order Problem
#
function f2nd!(ddu,du,u,p,t)
@inbounds begin
x = view(u,1:7) # x
y = view(u,8:14) # y
vx = view(du,1:7) # x′
vy = view(du,8:14) # y′
for i in 1:14
ddu[i] = zero(x[1])
end
for i=1:7,j=1:7
if i != j
r = ((x[i]-x[j])^2 + (y[i] - y[j])^2)^(3/2)
ddu[i] += j*(x[j] - x[i])/r
ddu[7+i] += j*(y[j] - y[i])/r
end
end
end
end
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
198,
2,
6682,
198,
198,
8818,
399,
2618,
28925,
7,
84,
11,
38,
76,
8,
198,
37811,
198,
220,
220,
220,
220,
399,
2618,
1917,
11582,
666,
357,
43476,
35610,
22819,
17540,
8,
198,
37811,
... | 1.580328 | 1,220 |
f (1)
| [
69,
357,
16,
8,
198
] | 1.2 | 5 |
<reponame>alenskorobogatova/ImPlot.jl
# Histogram (Bar) plots
# Vertical bars
function PlotBars(label_id, x::AbstractArray{T1}, y::AbstractArray{T2},
args...) where {T1<:Real,T2<:Real}
return PlotBars(label_id, promote(x, y)..., args...)
end
function PlotBars(values::AbstractArray{T}; count::Integer=length(values),
label_id::String="", width=0.67, shift=0.0, offset::Integer=0,
stride::Integer=1) where {T<:ImPlotData}
return PlotBars(label_id, values, count, width, shift, offset, stride * sizeof(T))
end
function PlotBars(x::AbstractArray{T}, y::AbstractArray{T};
count::Integer=min(length(x), length(y)), label_id::String="", width=0.67,
offset::Integer=0, stride::Integer=1) where {T<:ImPlotData}
return PlotBars(label_id, x, y, count, width, offset, stride * sizeof(T))
end
function PlotBars(x::AbstractArray{T1}, y::AbstractArray{T2};
kwargs...) where {T1<:Real,T2<:Real}
return PlotBars(promote(x, y)..., kwargs...)
end
# Horizontal bars
function PlotBarsH(label_id, x::AbstractArray{T1}, y::AbstractArray{T2},
args...) where {T1<:Real,T2<:Real}
return PlotBarsH(label_id, promote(x, y)..., args...)
end
function PlotBarsH(values::AbstractArray{T}; count::Integer=length(values),
label_id::String="", width=0.67, shift=0.0, offset::Integer=0,
stride::Integer=1) where {T<:ImPlotData}
return PlotBarsH(label_id, values, count, width, shift, offset, stride * sizeof(T))
end
function PlotBarsH(x::AbstractArray{T}, y::AbstractArray{T};
count::Integer=min(length(x), length(y)), label_id::String="",
width=0.67, offset::Integer=0, stride::Integer=1) where {T<:ImPlotData}
return PlotBarsH(label_id, x, y, count, width, offset, stride * sizeof(T))
end
function PlotBarsH(x::AbstractArray{T1}, y::AbstractArray{T2};
kwargs...) where {T1<:Real,T2<:Real}
return PlotBarsH(promote(x, y)..., kwargs...)
end
| [
27,
7856,
261,
480,
29,
282,
641,
74,
273,
672,
519,
265,
10071,
14,
3546,
43328,
13,
20362,
198,
2,
5590,
21857,
357,
10374,
8,
21528,
198,
2,
38937,
9210,
198,
8818,
28114,
33,
945,
7,
18242,
62,
312,
11,
2124,
3712,
23839,
1918... | 2.263965 | 913 |
using RecipesBase
export SeqPlot
export TestPlot
export rectangle_corners
function rectangle_corners(x::Real,y::Real,w,h; anchor=:bottomright)
if anchor == :botttomright
[x,x+w,x+w,x], [y,y,y+h,y+h]
elseif anchor == :center
[x-w/2, x+w/2, x+w/2, x-w/2], [y-h/2, y-h/2, y+h/2, y+h/2]
else
error("Anchor not recognised.")
end
end
function rectangle_corners(
x_vec::Vector{T},y_vec::Vector{T},
w,h; anchor=:bottomleft
) where {T<:Real}
x_out = Vector{Real}[]
y_out = Vector{Real}[]
if anchor == :bottomleft
for (x,y) in zip(x_vec, y_vec)
push!(x_out, [x,x+w,x+w,x])
push!(y_out, [y,y,y+h,y+h])
end
elseif anchor == :center
for (x,y) in zip(x_vec, y_vec)
push!(x_out, [x-w/2, x+w/2, x+w/2, x-w/2])
push!(y_out, [y-h/2, y-h/2, y+h/2, y+h/2])
end
else
error("Anchor not recognised.")
end
x_out, y_out
end
@userplot SeqPlot
@recipe function f(
h::SeqPlot;
entrymargin=0.1,
entryfontsize=20
)
seq = h.args[1]
w,h = (1,1)
y = fill(0.0, length(seq))
x = [0.0 + i*(w+entrymargin) for i in 0:(length(seq)-1)]
annotations := [(x,y,string(i)) for (x,y,i) in zip(x,y,seq)]
annotationfontsize := entryfontsize
x_cords, y_cords = rectangle_corners(x,y, w, h; anchor=:center)
showaxis --> false
axis --> nothing
aspect_ratio --> 1
legend --> false
@series begin
seriestype := :shape
x_cords,y_cords
end
end
@userplot TestPlot
@recipe function f(
h::TestPlot
)
aspect_ratio --> 1
showaxis --> false
@series begin
seriestype := :shape
1:4,1:4
end
@series begin
seriestype := :line
1:4, 1:4
annotations := [[];[(i,i,string(i)) for i in 1:4]]
annotationhalign --> :right
end
end | [
3500,
44229,
14881,
198,
39344,
1001,
80,
43328,
198,
39344,
6208,
43328,
198,
39344,
35991,
62,
20772,
364,
198,
198,
8818,
35991,
62,
20772,
364,
7,
87,
3712,
15633,
11,
88,
3712,
15633,
11,
86,
11,
71,
26,
18021,
28,
25,
22487,
3... | 1.892399 | 1,013 |
<filename>Chapter4/ex8.jl
## Exercise 4-8
## Enter the code in this chapter in a notebook.
## 1. Draw a stack diagram that shows the state of the program while executing circle(🐢, radius).
## You can do the arithmetic by hand or add print statements to the code.
println("Ans 1: ")
println(" turtle --> Turtle")
println(" turtle --> Turtle")
println(" radius --> 100")
println(" circumference --> 628.318...")
println(" n --> 212")
println(" len --> 2")
println(" turtle --> Turtle")
println(" nsides --> 212")
println(" len --> 2")
println(" i --> 1:212")
println(" return")
## 2. The version of arc in Refactoring is not very accurate because the linear approximation of the circle
## is always outside the true circle. As a result, the turtle ends up a few pixels away from the correct destination.
## My solution shows a way to reduce the effect of this error. Read the code and see if it makes sense to you.
## If you draw a diagram, you might see how it works.
using ThinkJulia
function polyline(t, n, len, angle)
for i in 1:n
forward(t, len)
turn(t, -angle)
end
end
"""
arc(t, r, angle)
Draws an arc with the given radius and angle:
t: turtle
r: radius
angle: angle subtended by the arc, in degrees
"""
function arc(t, r, angle)
arc_len = 2 * π * r * abs(angle) / 360
n = trunc(arc_len / 4) + 3
step_len = arc_len / n
step_angle = angle / n
# making a slight left turn before starting reduces
# the error caused by the linear approximation of the arc
turn(t, -step_angle/2)
polyline(t, n, step_len, step_angle)
turn(t, step_angle/2)
end
println("Ans 2: ")
@svg begin
turtle = Turtle()
# forward(turtle, 30) # for checking the position of the turtle
arc(turtle, 100, 125)
end
##
println("End.")
| [
27,
34345,
29,
14126,
19,
14,
1069,
23,
13,
20362,
198,
2235,
32900,
604,
12,
23,
198,
2235,
6062,
262,
2438,
287,
428,
6843,
287,
257,
20922,
13,
198,
198,
2235,
352,
13,
15315,
257,
8931,
16362,
326,
2523,
262,
1181,
286,
262,
1... | 2.845092 | 652 |
include("ctqw/ctqw.jl")
include("szegedy/szegedy.jl")
| [
17256,
7203,
310,
80,
86,
14,
310,
80,
86,
13,
20362,
4943,
198,
17256,
7203,
82,
89,
1533,
4716,
14,
82,
89,
1533,
4716,
13,
20362,
4943,
198
] | 1.928571 | 28 |
<filename>scripts/01_check_biomass_richness_shapes.jl
##########
##########
# A check of the distribution of shapes of biomass and richness
# in relevant paper
##########
##########
# AUTHOR: <NAME>
# DATE OF CREATION: 2021-05-17
##########
##########
# set up =======================================================================
using DrWatson
@quickactivate "trait-based-rewiring"
# set packages
using DataFrames, Plots, StatsPlots
include(srcdir("01_eqn_component_functions.jl"))
# set all positional values
organism_id_pos = 1
organism_type_pos = 2
body_size_pos = 3
lower_prey_limit_pos = 4
upper_prey_limit_pos = 5
habitat_midpoint_pos = 6
habitat_lower_limit_pos = 7
habitat_upper_limit_pos = 8
nutritional_value_pos = 9
biomass_pos = 10
network_id_pos = 11
alive_pos = 12
# perform loop to grab all values ==============================================
# draw 100 networks of the biomass/richness shapes and look at how they group
richness_biomass_comp = Array{Any}(undef, (450000, 5))
j = 1
for i in 1:1000
for richness in ["top-heavy", "bottom-heavy", "uniform"]
for biomass in ["pyramid", "uniform", "inverted"]
temp = Array{Any}(undef, (50, 5))
species_list = network_draw(50, richness)[1]
species_list = initialize_biomass(species_list, biomass)
for k in 1:size(species_list, 1)
temp[k, 1] = richness
temp[k, 2] = biomass
temp[k, 3] = species_list[k, body_size_pos]
temp[k, 4] = species_list[k, habitat_midpoint_pos]
temp[k, 5] = species_list[k, organism_type_pos]
end
richness_biomass_comp[(j:j+49), :] = temp
j = j + 50
end
end
end
# turn array into dataframe
richness_biomass_comp = DataFrame(
richness_biomass_comp,
[:richness_shape, :biomass_shape, :body_size, :biomass, :org_type]
)
# make plots of the 9 combos ===================================================
# top-heavy & pyramid
topheavy_pryamid_data = DataFrame(filter(
(row -> row.richness_shape == "top-heavy" &&
row.biomass_shape == "pyramid"),
richness_biomass_comp
))
topheavy_pryamid_plot = @df topheavy_pryamid_data plot(
:body_size,
:biomass,
group = :org_type,
seriestype = :scatter,
xlabel = "body size",
ylabel = "biomass",
legend = false
#title = "Richness = top-heavy, biomass = pyramid"
)
# top-heavy & uniform
topheavy_uniform_data = filter(
(row -> row.richness_shape == "top-heavy" &&
row.biomass_shape == "uniform"),
richness_biomass_comp
)
topheavy_uniform_plot = @df topheavy_uniform_data plot(
:body_size,
:biomass,
group = :org_type,
seriestype = :scatter,
xlabel = "body size",
ylabel = "biomass",
legend = false
#title = "Richness = top-heavy, biomass = uniform"
)
# top-heavy and inverted
topheavy_inverted_data = filter(
(row -> row.richness_shape == "top-heavy" &&
row.biomass_shape == "inverted"),
richness_biomass_comp
)
topheavy_inverted_plot = @df topheavy_inverted_data plot(
:body_size,
:biomass,
group = :org_type,
seriestype = :scatter,
xlabel = "body size",
ylabel = "biomass",
legend = false
#title = "Richness = top-heavy, biomass = inverted"
)
# uniform and pyramid
uniform_pyramid_data = filter(
(row -> row.richness_shape == "uniform" &&
row.biomass_shape == "pyramid"),
richness_biomass_comp
)
uniform_pyramid_plot = @df uniform_pyramid_data plot(
:body_size,
:biomass,
group = :org_type,
seriestype = :scatter,
xlabel = "body size",
ylabel = "biomass",
legend = false
#title = "Richness = uniform, biomass = pyramid"
)
# uniform and uniform
uniform_uniform_data = filter(
(row -> row.richness_shape == "uniform" &&
row.biomass_shape == "uniform"),
richness_biomass_comp
)
uniform_uniform_plot = @df uniform_uniform_data plot(
:body_size,
:biomass,
group = :org_type,
seriestype = :scatter,
xlabel = "body size",
ylabel = "biomass",
legend = false
#title = "Richness = uniform, biomass = uniform"
)
# uniform and inverted
uniform_inverted_data = filter(
(row -> row.richness_shape == "uniform" &&
row.biomass_shape == "inverted"),
richness_biomass_comp
)
uniform_inverted_plot = @df uniform_inverted_data plot(
:body_size,
:biomass,
group = :org_type,
seriestype = :scatter,
xlabel = "body size",
ylabel = "biomass",
legend = false
#title = "Richness = uniform, biomass = inverted"
)
# bottom-heavy and pyramid
bottomheavy_pryamid_data = filter(
(row -> row.richness_shape == "bottom-heavy" &&
row.biomass_shape == "pyramid"),
richness_biomass_comp
)
bottomheavy_pryamid_plot = @df bottomheavy_pryamid_data plot(
:body_size,
:biomass,
group = :org_type,
seriestype = :scatter,
xlabel = "body size",
ylabel = "biomass",
legend = false
#title = "Richness = bottom-heavy, biomass = pyramid"
)
# bottom-heavy and uniform
bottomheavy_uniform_data = filter(
(row -> row.richness_shape == "bottom-heavy" &&
row.biomass_shape == "uniform"),
richness_biomass_comp
)
bottomheavy_uniform_plot = @df bottomheavy_uniform_data plot(
:body_size,
:biomass,
group = :org_type,
seriestype = :scatter,
xlabel = "body size",
ylabel = "biomass",
legend = false
#title = "Richness = bottom-heavy, biomass = uniform"
)
# bottom-heavy and inverted
bottomheavy_inverted_data = filter(
(row -> row.richness_shape == "bottom-heavy" &&
row.biomass_shape == "inverted"),
richness_biomass_comp
)
bottomheavy_inverted_plot = @df bottomheavy_inverted_data plot(
:body_size,
:biomass,
group = :org_type,
seriestype = :scatter,
xlabel = "body size",
ylabel = "biomass",
#title = "Richness = bottom-heavy, biomass = inverted",
legend = false
)
# put all plots together
all_shapes_plots = Plots.plot(
topheavy_pryamid_plot,
topheavy_uniform_plot,
topheavy_inverted_plot,
uniform_pyramid_plot,
uniform_uniform_plot,
uniform_inverted_plot,
bottomheavy_pryamid_plot,
bottomheavy_uniform_plot,
bottomheavy_inverted_plot,
layout = (3,3),
#legend = false,
#layout = Plots.grid(3,3, heights = [0.3, 0.3, 0.3]),
#xlabel = "body size",
#ylabel = "biomass"
title = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
)
Plots.savefig(all_shapes_plots, "./figs/all_shapes_plots.png")
| [
27,
34345,
29,
46521,
14,
486,
62,
9122,
62,
8482,
296,
562,
62,
7527,
1108,
62,
1477,
7916,
13,
20362,
198,
7804,
2235,
220,
198,
7804,
2235,
198,
2,
317,
2198,
286,
262,
6082,
286,
15268,
286,
42584,
290,
46792,
198,
2,
287,
598... | 2.413299 | 2,722 |
########################################
## File Name: control_types.jl
## Author: <NAME> (<EMAIL>)
## Date Created: 2020/05/12
## Description: Control Type Definitions for SACBP
########################################
import Base.vec
abstract type Control end
struct PosControl{T<:Real} <: Control
t::Float64
vel::Vector{T}
dim::Int64
function PosControl{T}(t,vel,dim) where {T<:Real}
if dim != length(vel)
error(ArgumentError("Invalid control vector length."));
else
return new(t,vel,dim);
end
end
end
PosControl(t::Real,vel::Vector{T},dim::Int64=length(vel)) where {T<:Real} = PosControl{T}(t,vel,dim);
PosControl(t::Real,vel::T) where {T<:Real} = PosControl{T}(t,[vel],1);
vec(u::PosControl) = u.vel;
struct MControl2D{T<:Real} <: Control
t::Float64
fx::T # Force x
fy::T # Force y
tr::T # Torque
end
function MControl2D(t::Real,params::Vector{T}) where {T<:Real}
if length(params) != 3
error(ArgumentError("Invalid parameter vector length."))
else
return MControl2D{T}(t,params[1],params[2],params[3]);
end
end
vec(u::MControl2D) = [u.fx,u.fy,u.tr];
| [
29113,
7804,
198,
2235,
9220,
6530,
25,
1630,
62,
19199,
13,
20362,
198,
2235,
6434,
25,
1279,
20608,
29,
38155,
27630,
4146,
43734,
198,
2235,
7536,
15622,
25,
12131,
14,
2713,
14,
1065,
198,
2235,
12489,
25,
6779,
5994,
45205,
329,
... | 2.444906 | 481 |
<reponame>sdangelis/GenomePermutations.jl
using GenomePermutations
using GenomicFeatures
using Documenter
push!(LOAD_PATH,"../src/")
DocMeta.setdocmeta!(GenomePermutations, :DocTestSetup, :(using GenomePermutations); recursive=true)
makedocs(;
modules=[GenomePermutations],
authors="<NAME> <<EMAIL>> <<EMAIL>>",
repo="https://github.com/sdangelis/GenomePermutations.jl/blob/{commit}{path}#{line}",
sitename="GenomePermutations.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://sdangelis.github.io/GenomePermutations.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
],
)
deploydocs(;
repo="github.com/sdangelis/GenomePermutations.jl",
devbranch="main",
)
| [
27,
7856,
261,
480,
29,
21282,
8368,
271,
14,
13746,
462,
5990,
21973,
602,
13,
20362,
198,
3500,
5215,
462,
5990,
21973,
602,
198,
3500,
5215,
10179,
23595,
198,
3500,
16854,
263,
198,
198,
14689,
0,
7,
35613,
62,
34219,
553,
40720,
... | 2.407407 | 324 |
# ---
# title: 688. Knight Probability in Chessboard
# id: problem688
# author: Indigo
# date: 2021-06-29
# difficulty: Medium
# categories: Dynamic Programming
# link: <https://leetcode.com/problems/knight-probability-in-chessboard/description/>
# hidden: true
# ---
#
# On an `N`x`N` chessboard, a knight starts at the `r`-th row and `c`-th column
# and attempts to make exactly `K` moves. The rows and columns are 0 indexed, so
# the top-left square is `(0, 0)`, and the bottom-right square is `(N-1, N-1)`.
#
# A chess knight has 8 possible moves it can make, as illustrated below. Each
# move is two squares in a cardinal direction, then one square in an orthogonal
# direction.
#
#
#
# 
#
#
#
# Each time the knight is to move, it chooses one of eight possible moves
# uniformly at random (even if the piece would go off the chessboard) and moves
# there.
#
# The knight continues moving until it has made exactly `K` moves or has moved
# off the chessboard. Return the probability that the knight remains on the
# board after it has stopped moving.
#
#
#
# **Example:**
#
#
#
# Input: 3, 2, 0, 0
# Output: 0.0625
# Explanation: There are two moves (to (1,2), (2,1)) that will keep the knight on the board.
# From each of those positions, there are also two moves that will keep the knight on the board.
# The total probability the knight stays on the board is 0.0625.
#
#
#
#
# **Note:**
#
# * `N` will be between 1 and 25.
# * `K` will be between 0 and 100.
# * The knight always initially starts on the board.
#
#
## @lc code=start
using LeetCode
function knight_probability(n::Int, k::Int, row::Int, col::Int)
dp = fill(0, n, n, 2)
dp[row + 1, col + 1, 1] = 1
for i in 1:k
dp1 = @view(dp[:, :, mod1(i, 2)])
dp2 = @view(dp[:, :, mod1(i + 1, 2)])
idcs = CartesianIndices(dp2)
for I in idcs, hop in ((1, 2), (2, 1), (-1, 2), (2, -1), (1, -2), (-2, 1), (-1, -2), (-2, -1))
new_I = I + CartesianIndex(hop)
new_I ∈ idcs && (dp2[new_I] += dp1[I])
end
fill!(dp1, 0)
end
return sum(dp) / 8 ^ k
end
## @lc code=end
| [
2,
11420,
198,
2,
3670,
25,
718,
3459,
13,
6700,
30873,
1799,
287,
25774,
3526,
198,
2,
4686,
25,
1917,
34427,
198,
2,
1772,
25,
40673,
198,
2,
3128,
25,
33448,
12,
3312,
12,
1959,
198,
2,
8722,
25,
13398,
198,
2,
9376,
25,
2697... | 2.464835 | 910 |
using Test
using NCDatasets
using DataStructures
function example_file(i,array)
fname = tempname()
@debug "fname $fname"
Dataset(fname,"c") do ds
# Dimensions
ds.dim["lon"] = size(array,1)
ds.dim["lat"] = size(array,2)
ds.dim["time"] = Inf
# Declare variables
ncvar = defVar(ds,"var", Float64, ("lon", "lat", "time"))
ncvar.attrib["field"] = "u-wind, scalar, series"
ncvar.attrib["units"] = "meter second-1"
ncvar.attrib["long_name"] = "surface u-wind component"
ncvar.attrib["time"] = "time"
ncvar.attrib["coordinates"] = "lon lat"
nclat = defVar(ds,"lat", Float64, ("lat",))
nclat.attrib["units"] = "degrees_north"
nclon = defVar(ds,"lon", Float64, ("lon",))
nclon.attrib["units"] = "degrees_east"
nclon.attrib["modulo"] = 360.0
nctime = defVar(ds,"time", Float64, ("time",))
nctime.attrib["long_name"] = "surface wind time"
nctime.attrib["field"] = "time, scalar, series"
nctime.attrib["units"] = "days since 2000-01-01 00:00:00 GMT"
# Global attributes
ds.attrib["history"] = "foo"
# Define variables
g = defGroup(ds,"mygroup")
ncvarg = defVar(g,"var", Float64, ("lon", "lat", "time"))
ncvarg.attrib["field"] = "u-wind, scalar, series"
ncvarg.attrib["units"] = "meter second-1"
ncvarg.attrib["long_name"] = "surface u-wind component"
ncvarg.attrib["time"] = "time"
ncvarg.attrib["coordinates"] = "lon lat"
ncvar[:,:,1] = array
ncvarg[:,:,1] = array.+1
#nclon[:] = 1:size(array,1)
#nclat[:] = 1:size(array,2)
nctime[:] = i
end
return fname
end
A = randn(2,3,1)
fname = example_file(1,A)
ds = Dataset(fname)
info = NCDatasets.metadata(ds)
#@show info
dds = DeferDataset(fname);
varname = "var"
datavar = variable(dds,varname);
data = datavar[:,:,:]
@test A == datavar[:,:,:]
@test dds.attrib["history"] == "foo"
@test datavar.attrib["units"] == "meter second-1"
@test dimnames(datavar) == ("lon", "lat", "time")
lon = variable(dds,"lon");
@test lon.attrib["units"] == "degrees_east"
@test size(lon) == (size(data,1),)
datavar = dds[varname]
@test A == datavar[:,:,:]
@test dimnames(datavar) == ("lon", "lat", "time")
@test dds.dim["lon"] == size(A,1)
@test dds.dim["lat"] == size(A,2)
@test dds.dim["time"] == size(A,3)
@test dds.dim["time"] == size(A,3)
close(dds)
# show
dds_buf = IOBuffer()
show(dds_buf,dds)
ds_buf = IOBuffer()
show(ds_buf,dds)
@test String(take!(dds_buf)) == String(take!(ds_buf))
#=
# write
dds = Dataset(fnames,"a");
dds["datavar"][2,2,:] = 1:length(fnames)
for n = 1:length(fnames)
Dataset(fnames[n]) do ds
@test ds["datavar"][2,2,1] == n
end
end
dds.attrib["history"] = "foo2"
sync(dds)
Dataset(fnames[1]) do ds
@test ds.attrib["history"] == "foo2"
end
@test_throws NCDatasets.NetCDFError Dataset(fnames,"not-a-mode")
@test keys(dds) == ["datavar", "lat", "lon", "time"]
@test keys(dds.dim) == ["lon", "lat", "time"]
@test NCDatasets.groupname(dds) == "/"
@test size(dds["var"]) == (2, 3, 3)
@test size(dds["var"].var) == (2, 3, 3)
@test name(dds["var"].var) == "var"
@test NCDatasets.groupname(dds.group["mygroup"]) == "mygroup"
# create new dimension in all files
dds.dim["newdim"] = 123;
sync(dds);
Dataset(fnames[1]) do ds
@test ds.dim["newdim"] == 123
end
close(dds)
=#
nothing
| [
3500,
6208,
198,
3500,
399,
8610,
265,
292,
1039,
198,
3500,
6060,
44909,
942,
198,
198,
8818,
1672,
62,
7753,
7,
72,
11,
18747,
8,
198,
220,
220,
220,
277,
3672,
796,
20218,
3672,
3419,
198,
220,
220,
220,
2488,
24442,
366,
69,
3... | 2.107273 | 1,650 |
<filename>JuliaFactorial/BasicSwingFactorial.jl
# Copyright <NAME>. License is MIT.
module BasicSwingFactorial
export SwingFactorial
"""
Return the factorial of ``n``. Implementation of the swing algorithm using no
primes. An advanced version based on prime-factorization which is much faster
is available as the prime-swing factorial. However the claim is that this is
the fastest algorithm not using prime-factorization. It has the same recursive
structure as his big brother.
"""
function SwingFactorial(n::Int)::BigInt
smallOddFactorial = BigInt[ 0x0000000000000000000000000000001,
0x0000000000000000000000000000001, 0x0000000000000000000000000000001,
0x0000000000000000000000000000003, 0x0000000000000000000000000000003,
0x000000000000000000000000000000f, 0x000000000000000000000000000002d,
0x000000000000000000000000000013b, 0x000000000000000000000000000013b,
0x0000000000000000000000000000b13, 0x000000000000000000000000000375f,
0x0000000000000000000000000026115, 0x000000000000000000000000007233f,
0x00000000000000000000000005cca33, 0x0000000000000000000000002898765,
0x00000000000000000000000260eeeeb, 0x00000000000000000000000260eeeeb,
0x0000000000000000000000286fddd9b, 0x00000000000000000000016beecca73,
0x000000000000000000001b02b930689, 0x00000000000000000000870d9df20ad,
0x0000000000000000000b141df4dae31, 0x00000000000000000079dd498567c1b,
0x00000000000000000af2e19afc5266d, 0x000000000000000020d8a4d0f4f7347,
0x000000000000000335281867ec241ef, 0x0000000000000029b3093d46fdd5923,
0x0000000000000465e1f9767cc5866b1, 0x0000000000001ec92dd23d6966aced7,
0x0000000000037cca30d0f4f0a196e5b, 0x0000000000344fd8dc3e5a1977d7755,
0x000000000655ab42ab8ce915831734b, 0x000000000655ab42ab8ce915831734b,
0x00000000d10b13981d2a0bc5e5fdcab, 0x0000000de1bc4d19efcac82445da75b,
0x000001e5dcbe8a8bc8b95cf58cde171, 0x00001114c2b2deea0e8444a1f3cecf9,
0x0002780023da37d4191deb683ce3ffd, 0x002ee802a93224bddd3878bc84ebfc7,
0x07255867c6a398ecb39a64b83ff3751, 0x23baba06e131fc9f8203f7993fc1495]
function oddProduct(m, len)
if len < 24
p = BigInt(m)
for k in 2:2:2(len-1)
p *= (m - k)
end
return p
end
hlen = len >> 1
oddProduct(m - 2 * hlen, len - hlen) * oddProduct(m, hlen)
end
function oddFactorial(n)
if n < 41
oddFact = smallOddFactorial[1+n]
sqrOddFact = smallOddFactorial[1+div(n, 2)]
else
sqrOddFact, oldOddFact = oddFactorial(div(n, 2))
len = div(n - 1, 4)
(n % 4) != 2 && (len += 1)
high = n - ((n + 1) & 1)
oddSwing = div(oddProduct(high, len), oldOddFact)
oddFact = sqrOddFact^2 * oddSwing
end
(oddFact, sqrOddFact)
end
n < 0 && ArgumentError("n must be ≥ 0")
if n == 0 return 1 end
sh = n - count_ones(n)
oddFactorial(n)[1] << sh
end
#START-TEST-################################################
using Test
function main()
@testset "SwingFactorial" begin
for n in 0:999
S = SwingFactorial(n)
B = Base.factorial(BigInt(n))
@test S == B
end
end
GC.gc()
n = 1000000
@time SwingFactorial(n)
end
main()
end # module
| [
27,
34345,
29,
16980,
544,
29054,
5132,
14,
26416,
50,
5469,
29054,
5132,
13,
20362,
198,
2,
15069,
1279,
20608,
28401,
13789,
318,
17168,
13,
198,
198,
21412,
14392,
50,
5469,
29054,
5132,
198,
39344,
43650,
29054,
5132,
198,
198,
3781... | 2.335894 | 1,432 |
#This script calculates the squeezing for different quadratures.
using LinearAlgebra
using Plots; pyplot()
using NPZ
using LaTeXStrings
include("MagnusExpansion.jl")
include("boson.jl")
include("HarmonicCorrectionBoson.jl")
"""
Calculate the uncertainties on the x(θ) and y(θ) quadratures for
a range imax values of θ in the interval [θi, θi+θrange].
"""
function quadrature_scan(moments, imax; θi=-pi/4, θrange=pi/2)
θ_vec, Δx2_vec, Δy2_vec = zeros(imax), zeros(imax), zeros(imax)
for i = 1: imax
θ = θi + (i - 1)*θrange/(imax - 1)
θ_vec[i] = θ
Δx2_vec[i] = (moments[3] + cos(2*θ)*moments[1] + sin(2*θ)*moments[2])/2
Δy2_vec[i] = (moments[3] - cos(2*θ)*moments[1] - sin(2*θ)*moments[2])/2
end
return θ_vec, Δx2_vec, Δy2_vec
end
"""
Apply the function quadrature_scan a couple of times, always around the value of θ
where maximum squeezing has been found.
"""
function fine_quadrature_scan(moments, imax)
θ_vec, Δx2_vec, Δy2_vec = quadrature_scan(moments, imax)
for i = 1: 4
k = argmin(Δy2_vec)
θi = θ_vec[k - 1]
θrange = θ_vec[k + 1] - θ_vec[k - 1]
output = quadrature_scan(moments, imax; θi=θi, θrange=θrange)
θ_vec .= output[1]
Δx2_vec .= output[2]
Δy2_vec .= output[3]
end
return θ_vec, Δx2_vec, Δy2_vec
end
#array with different gate times tf
tf0, tf1 = 2.0, 15.0
nt = 200
tf_vec = zeros(nt)
nt = size(tf_vec)[1]
dB_vec = zeros(nt, 3)
moments = zeros(nt, 3)
coeffs_array = zeros(nt, 3)
imax = 10
θopt = zeros(nt,2)
for j = 1: nt
println(j)
tf = tf0*(tf1/tf0)^((j - 1)/(nt - 1))
tf_vec[j] = tf
κ = 0.0
# solution in the absence of counter-rotating terms
cr_terms = false
moments[j, :] = solve_2moments(tf, κ, cr_terms, zeros(3))
Δx2 = (moments[j, 3] + moments[j, 1])/2
Δy2 = (moments[j, 3] - moments[j, 1])/2
dB_vec[j, 1] = -10.0*log10(2*Δy2)
# solution in the presence of counter-rotating terms
cr_terms = true
moments[j, :] = solve_2moments(tf, κ, cr_terms, zeros(3))
Δx2 = (moments[j, 3] + moments[j, 1])/2
Δy2 = (moments[j, 3] - moments[j, 1])/2
dB_vec[j, 2] = -10.0*log10(2*Δy2)
θ_vec, Δx2_vec, Δy2_vec = fine_quadrature_scan(moments[j, :], imax)
k = argmin(Δy2_vec)
θopt[j, 1] = θ_vec[k]
# solution in the presence of counter-rotating terms and correction.
cr_terms = true
M = correction_matrix(tf)
order = 6
coeffs = get_coeffs(M, order, tf)
coeffs_array[j, :] = coeffs
moments[j, :] = solve_2moments(tf, κ, cr_terms, coeffs)
Δx2 = (moments[j, 3] + moments[j, 1])/2
Δy2 = (moments[j, 3] - moments[j, 1])/2
dB_vec[j, 3] = -10.0*log10(2*Δy2)
θ_vec, Δx2_vec, Δy2_vec = fine_quadrature_scan(moments[j, :], imax)
k = argmin(Δy2_vec)
θopt[j, 2] = θ_vec[k]
end
# save figures
plot(tf_vec, dB_vec,
linewidth = 2,
labels = ["No correction" "6th order\nMagnus" "RWA"])
plot!(tickfont = font(12, "Serif"),
xlabel = "Pulse width "*L"\omega_\mathrm{a} t_\mathrm{f}",
ylabel = "Squeezing (dB)",
guidefont = font(14, "Serif"),
legendfont = font(12, "Serif"),
background_color_legend = false,
margin = 5Plots.mm)
savefig("squeezing.pdf")
y = hcat(θopt, zeros(size(θopt)[1]))
plot(tf_vec, y,
linewidth = 2,
labels = ["No correction" "6th order\nMagnus" "RWA"])
plot!(tickfont = font(12, "Serif"),
xlabel = "Pulse width "*L"\omega_\mathrm{a} t_\mathrm{f}",
ylabel = L"\varphi",
guidefont = font(14, "Serif"),
legendfont = font(12, "Serif"),
background_color_legend = false,
margin = 5Plots.mm)
savefig("squeezing_angle.pdf")
y = hcat(1.0./tf_vec, abs.(coeffs_array))
plot(tf_vec, y,
linewidth = 2,
labels = [L"1/t_\mathrm{f}" L"|c_{x,1}|" L"|c_{y,1}|" L"|\Delta|"])
plot!(yaxis = :log,
tickfont = font(12, "Serif"),
xlabel = "Pulse width "*L"\omega_\mathrm{a} t_\mathrm{f}",
ylabel = "coeff."*L"\times \omega_\mathrm{a}^{-1}",
guidefont = font(14, "Serif"),
legendfont = font(12, "Serif"),
background_color_legend = false,
margin = 5Plots.mm)
savefig("coeffs.pdf")
# save data files
output = zeros(nt,4)
output[:, 1] = tf_vec
output[:, 2: end] = dB_vec
npzwrite("dB.npy", output)
output = zeros(nt,3)
output[:, 1] = tf_vec
output[:, 2: end] = θopt
npzwrite("angle.npy", output)
output = zeros(nt,4)
output[:, 1] = tf_vec
output[:, 2: end] = coeffs_array
npzwrite("coeffs.npy", output)
| [
2,
1212,
4226,
43707,
262,
40237,
329,
1180,
15094,
81,
6691,
13,
198,
3500,
44800,
2348,
29230,
198,
3500,
1345,
1747,
26,
12972,
29487,
3419,
198,
3500,
28498,
57,
198,
3500,
4689,
49568,
13290,
654,
198,
198,
17256,
7203,
48017,
385,... | 2.059512 | 2,050 |
<filename>test/runtests.jl
using LinearAlgebra, SparseArrays, Test
using MultivariatePolynomials
using SumOfSquares
# Taken from JuMP/test/solvers.jl
function try_import(name::Symbol)
try
@eval import $name
return true
catch e
return false
end
end
if try_import(:DynamicPolynomials)
import DynamicPolynomials.@polyvar
import DynamicPolynomials.@ncpolyvar
else
if try_import(:TypedPolynomials)
import TypedPolynomials.@polyvar
else
error("No polynomial implementation installed : Please install TypedPolynomials or DynamicPolynomials")
end
end
include("certificate.jl")
include("gram_matrix.jl")
include("variable.jl")
include("constraint.jl")
include("Mock/mock_tests.jl")
# Tests needing a solver
# FIXME these tests should be converted to Literate and moved to `examples` or
# converted to be used with `MockOptimizer` and moved to `test/Tests`
include("solvers.jl")
include("sospoly.jl")
include("sosquartic.jl")
include("equalitypolyconstr.jl")
| [
27,
34345,
29,
9288,
14,
81,
2797,
3558,
13,
20362,
198,
3500,
44800,
2348,
29230,
11,
1338,
17208,
3163,
20477,
11,
6208,
198,
198,
3500,
7854,
42524,
34220,
26601,
8231,
198,
3500,
5060,
5189,
22266,
3565,
198,
198,
2,
30222,
422,
1... | 2.781081 | 370 |
<gh_stars>0
# ------------------------------------------------------------------
# Licensed under the MIT License. See LICENSE in the project root.
# ------------------------------------------------------------------
"""
DensityRatioWeighting(tdata, [vars]; [options])
Density ratio weights based on empirical distribution of
variables in target data `tdata`. Default to all variables.
## Optional parameters
* `estimator` - Density ratio estimator (default to `LSIF()`)
* `optlib` - Optimization library (default to `default_optlib(estimator)`)
### Notes
Estimators from `DensityRatioEstimation.jl` are supported.
"""
struct DensityRatioWeighting <: WeightingMethod
tdata
vars
dre
optlib
end
function DensityRatioWeighting(tdata, vars=nothing; estimator=LSIF(),
optlib=default_optlib(estimator))
validvars = collect(name.(variables(tdata)))
wvars = isnothing(vars) ? validvars : vars
@assert wvars ⊆ validvars "invalid variables ($wvars) for spatial data"
DensityRatioWeighting(tdata, wvars, estimator, optlib)
end
function weight(sdata, method::DensityRatioWeighting)
# retrieve method parameters
tdata = method.tdata
vars = method.vars
dre = method.dre
optlib = method.optlib
@assert vars ⊆ name.(variables(sdata)) "invalid variables ($vars) for spatial data"
# numerator and denominator samples
Ωnu = view(tdata, vars)
Ωde = view(sdata, vars)
xnu = collect(Tables.rows(values(Ωnu)))
xde = collect(Tables.rows(values(Ωde)))
# perform denstiy ratio estimation
ratios = densratio(xnu, xde, dre, optlib=optlib)
SpatialWeights(domain(sdata), ratios)
end
| [
27,
456,
62,
30783,
29,
15,
198,
2,
16529,
438,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
38559,
24290,
287,
262,
1628,
6808,
13,
198,
2,
16529,
438,
198,
198,
37811,
198,
220,
220,
220,
360,
6377,
29665,
952,
25844,
278,
7... | 2.951786 | 560 |
function eval_ast(interpstate::InterpState, mod, expr)
interpstate.options.debug && @show :eval_ast mod expr
try
# ans = Core.eval(mod, expr)
ans = @eval mod $expr
interpstate.options.debug && @show :eval_ast ans
return ans
catch exception
interpstate.options.debug && @show :eval_ast mod expr exception
rethrow()
end
end
function eval_lower_ast(interpstate::InterpState, mod, expr)
interpstate.options.debug && @show :eval_lower_ast mod expr
try
lwr = Meta.lower(mod, expr)::Expr
if lwr.head == :thunk
codestate = code_state_from_thunk(interpstate, mod, lwr.args[1]::Core.CodeInfo)
ans = interpret_lower(codestate)
else
@show lwr mod expr
# ans = Core.eval(mod, expr)
ans = @eval mod $expr
end
interpstate.options.debug && @show :eval_lower_ast ans
return ans
catch exception
interpstate.options.debug && @show :eval_lower_ast mod expr exception
rethrow()
end
end
function interpret_ast_comparison(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :comparison args
expr = Expr(:comparison, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_or(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :(||) args
expr = Expr(:(||), args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_and(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :(&&) args
expr = Expr(:(&&), args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_primitive(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :primitive args
expr = Expr(:primitive, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_call(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :call args
expr = Expr(:call, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_macrocall(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :macrocall args
expr = Expr(:macrocall, args...)
# Macros be better handled by the compiler?
eval_ast(interpstate, mod, expr)
end
function interpret_ast_broadcast(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :(.=) args
expr = Expr(:(.=), args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_assign(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :(=) args
expr = Expr(:(=), args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_const(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :const args
expr = Expr(:const, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_local(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :local args
expr = Expr(:local, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_global(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :global args
expr = Expr(:global, args...)
eval_ast(interpstate, mod, expr)
end
function interpret_ast_let(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :let args
expr = Expr(:let, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_block(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :block args
local linenumbernode
try
local ans
for arg in args
ans = interpret_ast_node(interpstate, mod, arg)
if ans isa LineNumberNode
linenumbernode = ans
end
end
ans
catch
@show linenumbernode
rethrow()
end
end
function interpret_ast_try(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :try args
expr = Expr(:try, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_do(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :do args
expr = Expr(:do, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_if(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :if args
expr = Expr(:if, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_for(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :for args
expr = Expr(:for, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_while(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :while args
expr = Expr(:while, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_function(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :function args
expr = Expr(:function, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_where(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :function args
expr = Expr(:where, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_macro(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :macro args
expr = Expr(:macro, args...)
# Macros be better handled by the compiler?
eval_ast(interpstate, mod, expr)
end
function interpret_ast_struct(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :struct args
expr = Expr(:struct, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_curly(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :curly args
expr = Expr(:curly, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_abstract(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :abstract args
expr = Expr(:abstract, args...)
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_using(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :using args
expr = Expr(:using, args...)
eval_ast(interpstate, mod, expr)
end
function interpret_ast_import(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :import args
expr = Expr(:import, args...)
eval_ast(interpstate, mod, expr)
end
function interpret_ast_export(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :export args
expr = Expr(:export, args...)
# Can we really lower exports?
eval_lower_ast(interpstate, mod, expr)
end
function interpret_ast_error(interpstate::InterpState, mod, args)
@show mod :error args
end
function interpret_ast_incomplete(interpstate, mod, args)
@show mod :incomplete args
end
function interpret_ast_toplevel(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :toplevel args
local linenumbernode
try
local ans
for arg in args
ans = interpret_ast_node(interpstate, mod, arg)
if ans isa LineNumberNode
linenumbernode = ans
end
end
ans
catch
@show linenumbernode
rethrow()
end
end
function interpret_ast_module(interpstate::InterpState, mod, args)
interpstate.options.debug && @show mod :module args
expr = Expr(:module, args...)
eval_ast(interpstate, mod, expr)
end
function interpret_ast_expr(interpstate::InterpState, mod, expr::Expr)
if expr.head == :comparison
interpret_ast_comparison(interpstate, mod, expr.args)
elseif expr.head == :(||)
interpret_ast_or(interpstate, mod, expr.args)
elseif expr.head == :(&&)
interpret_ast_and(interpstate, mod, expr.args)
elseif expr.head == :primitive
interpret_ast_primitive(interpstate, mod, expr.args)
elseif expr.head == :call
interpret_ast_call(interpstate, mod, expr.args)
elseif expr.head == :macrocall
interpret_ast_macrocall(interpstate, mod, expr.args)
elseif expr.head == :(.=)
interpret_ast_broadcast(interpstate, mod, expr.args)
elseif expr.head == :(=)
interpret_ast_assign(interpstate, mod, expr.args)
elseif expr.head == :const
interpret_ast_const(interpstate, mod, expr.args)
elseif expr.head == :local
interpret_ast_local(interpstate, mod, expr.args)
elseif expr.head == :global
interpret_ast_global(interpstate, mod, expr.args)
elseif expr.head == :let
interpret_ast_let(interpstate, mod, expr.args)
elseif expr.head == :block
interpret_ast_block(interpstate, mod, expr.args)
elseif expr.head == :try
interpret_ast_try(interpstate, mod, expr.args)
elseif expr.head == :do
interpret_ast_do(interpstate, mod, expr.args)
elseif expr.head == :if
interpret_ast_if(interpstate, mod, expr.args)
elseif expr.head == :for
interpret_ast_for(interpstate, mod, expr.args)
elseif expr.head == :while
interpret_ast_while(interpstate, mod, expr.args)
elseif expr.head == :function
interpret_ast_function(interpstate, mod, expr.args)
elseif expr.head == :where
interpret_ast_where(interpstate, mod, expr.args)
elseif expr.head == :macro
interpret_ast_macro(interpstate, mod, expr.args)
elseif expr.head == :struct
interpret_ast_struct(interpstate, mod, expr.args)
elseif expr.head == :curly
interpret_ast_curly(interpstate, mod, expr.args)
elseif expr.head == :abstract
interpret_ast_abstract(interpstate, mod, expr.args)
elseif expr.head == :using
interpret_ast_using(interpstate, mod, expr.args)
elseif expr.head == :import
interpret_ast_import(interpstate, mod, expr.args)
elseif expr.head == :export
interpret_ast_export(interpstate, mod, expr.args)
elseif expr.head == :error
interpret_ast_error(interpstate, mod, expr.args)
elseif expr.head == :incomplete
interpret_ast_incomplete(interpstate, mod, expr.args)
elseif expr.head == :toplevel
interpret_ast_toplevel(interpstate, mod, expr.args)
elseif expr.head == :module
interpret_ast_module(interpstate, mod, expr.args)
else
@show expr
@assert false
end
end
function interpret_ast_node(interpstate::InterpState, mod, node)
@nospecialize node
if node isa Expr
expr = node
interpret_ast_expr(interpstate, mod, expr)
elseif node isa Symbol
symbol = node
eval_ast(interpstate, mod, symbol)
elseif node isa LineNumberNode
linenumbernode = node
linenumbernode
else
@show typeof(node) node
@assert false
end
end
function interpret_ast(mod, expr, options)
interpstate = interp_state(mod, options)
try
interpret_ast_node(interpstate, mod, expr)
finally
#= for (wt, (meth, sparam_vals, src)) in interpstate.meths
@show wt meth sparam_vals
end =#
if interpstate.options.stats
for (mod_or_meth, stats) in sort(collect(interpstate.stats), by=kv -> kv[2].elapsed)
@show mod_or_meth Int(stats.calls) Int(stats.elapsed)
end
end
end
end
| [
8818,
5418,
62,
459,
7,
3849,
79,
5219,
3712,
9492,
79,
9012,
11,
953,
11,
44052,
8,
198,
220,
220,
220,
987,
79,
5219,
13,
25811,
13,
24442,
11405,
2488,
12860,
1058,
18206,
62,
459,
953,
44052,
198,
220,
220,
220,
1949,
198,
220... | 2.49515 | 4,639 |
module Apply
export apply, offset_apply
using Base: tail
apply(f::F, a::Tuple{A,Vararg}, args...) where {F,A} =
(f(a[1], args...), apply(f, tail(a), args...)...)
apply(f::F, a::Tuple{A,Vararg}, b::Tuple{B,Vararg}, args...) where {F,A,B} =
(f(a[1], b[1], args...), apply(f, tail(a), tail(b), args...)...)
apply(f::F, a::Tuple{A,Vararg}, b::Tuple{B,Vararg}, c::Tuple{C,Vararg}, args...) where {F,A,B,C} =
(f(a[1], b[1], c[1], args...), apply(f, tail(a), tail(b), tail(c), args...)...)
apply(f, ::Tuple{}, ::Tuple{}, ::Tuple{}, args...) = ()
apply(f, a, ::Tuple{}, ::Tuple{}, args...) = ()
apply(f, ::Tuple{}, b, ::Tuple{}, args...) = ()
apply(f, a, b, ::Tuple{}, args...) = ()
apply(f, ::Tuple{}, ::Tuple{}, args...) = ()
apply(f, a, ::Tuple{}, args...) = ()
apply(f, ::Tuple{}, args...) = ()
offset_apply(f, o::Tuple{O,Vararg}, a::AbstractArray, offset::Int, args...) where O = begin
offset = f(o[1], a, offset, args...)
offset_apply(f, tail(o), a, offset::Int, args...)
nothing
end
offset_apply(f, a::AbstractArray, o::Tuple{O,Vararg}, offset::Int, args...) where O = begin
offset = f(a, o[1], offset, args...)
offset_apply(f, a, tail(o), offset::Int, args...)
nothing
end
offset_apply(f, o::Tuple{O,Vararg}, offset::Int, args...) where O = begin
offset = f(o[1], offset, args...)
offset_apply(f, tail(o), offset::Int, args...)
nothing
end
offset_apply(f, o::Tuple{}, offset::Int, args...) = nothing
offset_apply(f, a::AbstractArray, o::Tuple{}, offset::Int, args...) = nothing
offset_apply(f, o::Tuple{}, a::AbstractArray, offset::Int, args...) = nothing
end # module
| [
21412,
27967,
198,
198,
39344,
4174,
11,
11677,
62,
39014,
198,
3500,
7308,
25,
7894,
198,
198,
39014,
7,
69,
3712,
37,
11,
257,
3712,
51,
29291,
90,
32,
11,
19852,
853,
5512,
26498,
23029,
810,
1391,
37,
11,
32,
92,
796,
220,
198... | 2.412463 | 674 |
using Documenter, ICD10Utilities
makedocs(
sitename = "ICD-10 Utilities",
modules=[ICD10Utilities],
pages=[
"ICD-10" => "index.md",
"icdo3.md",
"icd10am.md",
"icd10cm.md"
])
deploydocs(
repo = "github.com/timbp/ICD10Utilities.jl.git",
devbranch = "main",
)
| [
3500,
16854,
263,
11,
314,
8610,
940,
18274,
2410,
198,
198,
76,
4335,
420,
82,
7,
198,
197,
1650,
12453,
796,
366,
2149,
35,
12,
940,
41086,
1600,
198,
197,
13103,
41888,
2149,
35,
940,
18274,
2410,
4357,
198,
220,
220,
5468,
41888... | 2.020408 | 147 |
function ShortPeriodicsInterpolatedCoefficient(arg0::jint)
return ShortPeriodicsInterpolatedCoefficient((jint,), arg0)
end
function add_grid_point(obj::ShortPeriodicsInterpolatedCoefficient, arg0::AbsoluteDate, arg1::Vector{jdouble})
return jcall(obj, "addGridPoint", void, (AbsoluteDate, Vector{jdouble}), arg0, arg1)
end
function clear_history(obj::ShortPeriodicsInterpolatedCoefficient)
return jcall(obj, "clearHistory", void, ())
end
function value(obj::ShortPeriodicsInterpolatedCoefficient, arg0::AbsoluteDate)
return jcall(obj, "value", Vector{jdouble}, (AbsoluteDate,), arg0)
end
| [
8818,
10073,
5990,
2101,
873,
9492,
16104,
515,
34,
2577,
5632,
7,
853,
15,
3712,
73,
600,
8,
198,
220,
220,
220,
1441,
10073,
5990,
2101,
873,
9492,
16104,
515,
34,
2577,
5632,
19510,
73,
600,
11,
828,
1822,
15,
8,
198,
437,
198,... | 2.895238 | 210 |
function cartesianmoveindex(direction)
directiondict = Dict("UP" => (-1, 0), "DOWN" => (1, 0), "LEFT" => (0, -1), "RIGHT" => (0, 1))
return CartesianIndex(directiondict[direction])
end
function swaptiles!(board, x, y)
temp = board[x]
board[x] = board[y]
board[y] = temp
end
function moves(board)
first = findfirst(iszero, board)
movedict = Dict("UP" => "DOWN", "DOWN" => "UP", "LEFT" => "RIGHT", "RIGHT" => "LEFT")
directions = []
for i in ("UP", "DOWN", "LEFT", "RIGHT")
index = first + cartesianmoveindex(i)
if checkbounds(Bool, board, index)
action = (index, movedict[i])
push!(directions, action)
end
end
return directions
end
function copyboard(action, board)
newboard = copy(board)
index1 = action[1]
index2 = index1 + cartesianmoveindex(action[2])
swaptiles!(newboard, index1, index2)
return newboard
end
## determine all options for move states
function expansion(board)
states = []
possacts = moves(board)
for act in possacts
push!(states, copyboard(act, board))
end
return states
end
| [
8818,
6383,
35610,
21084,
9630,
7,
37295,
8,
198,
220,
220,
220,
4571,
11600,
796,
360,
713,
7203,
8577,
1,
5218,
13841,
16,
11,
657,
828,
366,
41925,
1,
5218,
357,
16,
11,
657,
828,
366,
2538,
9792,
1,
5218,
357,
15,
11,
532,
1... | 2.452991 | 468 |
<filename>src/weierstrasspoints.jl
######################################################################
# weierstrasspoints.jl: Addition laws for projective points on Weierstrass curves
######################################################################
export infinity, projective_add, projective_scalar_mul
######################################################################
# Basic methods
######################################################################
"""
Get the point at infinity on an elliptic curve in Weierstrass form.
"""
function infinity(E::AbstractWeierstrass)
R = base_ring(E)
return EllipticPoint(Nemo.zero(R), Nemo.one(R), Nemo.zero(R), E)
end
function zero(E::AbstractWeierstrass)
return infinity(E)
end
######################################################################
# Addition law
######################################################################
"""
Get the opposite of a point on an elliptic curve in Weierstrass form.
"""
function -(P::EllipticPoint)
E = P.curve
x, y, z = coordinates(P)
a1, _, a3, _, _ = a_invariants(E)
return EllipticPoint(x, - y - a1 * x - a3 * z, z, E)
end
"""
Get the sum of two normalized projective points on the same Weierstrass curve, assuming they are not equal and not inverse of each other.
"""
function _addgeneric(P::EllipticPoint{T}, Q::EllipticPoint{T}) where T<:Nemo.FieldElem
E = P.curve
a1, a2, a3, _, _ = a_invariants(E)
xq, yq, zq = coordinates(Q)
@assert zq == 1
xp, yp, zp = coordinates(P)
@assert zp == 1
#sanity check
@assert xp != xq
denom = xq - xp
inverse = 1//denom
lambda = (yq - yp) * inverse
nu = (yp * xq - xp * yq) * inverse
Xplus = lambda^2 + a1 * lambda - a2 - xp - xq
Yplus = -(lambda + a1) * Xplus - nu - a3
Zplus = Nemo.one(base_ring(P))
return EllipticPoint(Xplus, Yplus, Zplus, E)
end
"""
Get the sum of two normalized projective points on the same Weierstrass curve, assuming they have equal x-coordinate.
"""
function _addequalx(P::EllipticPoint{T}, Q::EllipticPoint{T}) where T<:Nemo.FieldElem
E = P.curve
a1, a2, a3, a4, a6 = a_invariants(E)
xp, yp, zp = coordinates(P)
@assert zp == 1
xq, yq, zq = coordinates(Q)
@assert zq == 1
#sanity check
@assert xp == xq
denom = yp + yq + a1 * xq + a3
if iszero(denom)
return infinity(E)
else
inverse = 1//denom
lambda = (3 * xp^2 + 2 * a2 * xp + a4 - a1 * yp) * inverse
nu = (- xp^3 + a4 * xp + 2 * a6 - a3 * yp) * inverse
Xplus = lambda^2 + a1 * lambda - a2 - xp - xq
Yplus = -(lambda + a1) * Xplus - nu - a3
Zplus = one(base_ring(P))
return EllipticPoint(Xplus, Yplus, Zplus, E)
end
end
"""
Get the double of a normalized point.
"""
function _double(P::EllipticPoint)
if isinfinity(P)
return infinity(P.curve)
else
return _addequalx(P, P)
end
end
"""
Get the sum of two projective points on the same Weierstrass curve.
"""
function +(P::EllipticPoint{T}, Q::EllipticPoint{T}) where T<:Nemo.FieldElem
P = normalized(P)
Q = normalized(Q)
xp, _, _ = coordinates(P)
xq, _, _ = coordinates(Q)
if isinfinity(P)
return Q
elseif isinfinity(Q)
return P
elseif xp == xq
return _addequalx(P,Q)
else
return _addgeneric(P,Q)
end
end
function -(P::EllipticPoint{T}, Q::EllipticPoint{T}) where T<:FieldElem
return P + (-Q)
end
"""
Get a scalar multiple of a point on a Weierstrass curve.
"""
function *(k::Integer, P::EllipticPoint)
E = P.curve
P = normalized(P)
if k == 0
return infinity(E)
elseif k<0
return (-k) * (-P)
else
if isinfinity(P)
return infinity(E)
else
return _ladder(k, P)
end
end
end
# function *(k::Integer, P::EllipticPoint)
# return k * P
# end
function _ladder(m::Integer, P::EllipticPoint)
p0 = P
for b in Iterators.drop(Iterators.reverse(digits(Int8, m, base=2)), 1)
if (b == 0)
p0 = _double(p0)
else
p0 = P + _double(p0)
end
end
return p0
end
######################################################################
# Projective addition law for Short Weierstrass curves
######################################################################
# P = (x1, y1, z1)
# Q = (x2, y2, z2)
# P + Q = (x3, y3, z3)
# if P != Q then
# x3 = (x2 z1 - x1 z2) [ (y2 z1 - y1 z2)^2 z1 z2 - (x2 z1 - x1 z2)^2 (x2 z1 + x1 z2) ]
# y3 = (y2 z1 - y1 z2) [ (x2 z1 - x1 z2)^2 (x2 z1 + 2 x1 z2) - (y2 z1 - y1 z2)^2 z1 z2 ] - (x2 z1 - x1 z2)^3 y1 z2
# z3 = (x2 z1 - x1 z2)^3 z1 z2
# if P = Q then
# x3 = 2 y1 z1 [ (a z1^2 + 3 x1^2)^2 - 8 x1 y1^2 z1 ]
# y3 = (a z1^2 + 3 x1^2 ) [ 12 x1 y1^2 z1 - a^2 (z1^2 + 3 x1^2)^2 ] - 8 y1^4 z1^2
# z3 = (2 y1 z1)^3
#This function is only to be used with distinct points on the same short Weierstrass curve
#xdet is x2 z1 - x1 z2, and ydet is y2 z1 - y1 z2
function _projective_add_neq(P::EllipticPoint{T}, Q::EllipticPoint{T}, xdet::T, ydet::T) where T
x1, y1, z1 = coordinates(P)
x2, y2, z2 = coordinates(Q)
xdet2 = xdet^2
xdet3 = xdet2 * xdet
ydet2 = ydet^2
z1z2 = z1 * z2
#we could save a few multiplications in what follows
x3 = xdet * ( ydet2 * z1z2 - xdet2 * (x2 * z1 + x1 * z2) )
y3 = ydet * ( xdet2 * (x2 * z1 + 2 * x1 * z2) - ydet2 * z1z2 ) - xdet3 * y1 * z2
z3 = xdet3 * z1z2
res = Point(x3, y3, z3, base_curve(P))
#@assert isvalid(res) #sanity check
return res
end
#This function is only to be used with a point on a short Weierstrass curve
function _projective_dbl(P::EllipticPoint{T}) where T
x, y, z = coordinates(P)
_, _, _, a, b = a_invariants(base_curve(P))
factor = a * z^2 + 3 * x^2
factor2 = factor^2
yz = y * z
xy2z = x * y * yz
#we could save again a few multiplications in what follows
xprime = 2 * yz * ( factor2 - 8 * xy2z )
yprime = factor * ( 12 * xy2z - factor2 ) - 8 * (yz)^2 * y^2
zprime = 8 * (yz)^3
res = Point(xprime, yprime, zprime, base_curve(P))
#@assert isvalid(res) #sanity check
return res
end
#This function is only to be used with two points on the same short Weierstrass curve
#Compute xdet and ydet, if they are both zero go to _projective_dbl
function projective_add(P::EllipticPoint{T}, Q::EllipticPoint{T}) where T
x1, y1, z1 = coordinates(P)
x2, y2, z2 = coordinates(Q)
xdet = x2 * z1 - x1 * z2
ydet = y2 * z1 - y1 * z2
if ((xdet == 0) & (ydet == 0))
return _projective_dbl(P)
else
return _projective_add_neq(P, Q, xdet, ydet)
end
end
#Here we assume v is a positive integer and P lives on a short Weierstrass curve
function _projective_scalar_mul(P::EllipticPoint, v::Integer)
P0 = P
for b in Iterators.drop(Iterators.reverse(digits(Int8, v, base=2)), 1)
P0 = _projective_dbl(P0)
if (b == 1)
P0 = projective_add(P0, P)
end
end
return P0
end
function projective_scalar_mul(P::EllipticPoint, v::Integer)
if v == 0
return infinity(base_curve(E))
elseif v < 0
return - _projective_scalar_mul(P, -v)
else
return _projective_scalar_mul(P, v)
end
end
| [
27,
34345,
29,
10677,
14,
732,
959,
2536,
562,
13033,
13,
20362,
198,
198,
29113,
29113,
4242,
2235,
198,
2,
356,
959,
2536,
562,
13033,
13,
20362,
25,
3060,
653,
3657,
329,
1628,
425,
2173,
319,
775,
959,
2536,
562,
23759,
198,
291... | 2.340261 | 2,983 |
<gh_stars>0
### A Pluto.jl notebook ###
# v0.16.1
using Markdown
using InteractiveUtils
# ╔═╡ 6f9a94bd-dd28-4110-a7ca-0ed84e9c7c3f
begin
import Pkg
# activate the shared project environment
Pkg.activate(Base.current_project())
using Interpolations
using Plots
using Dierckx
using GasChromatographySimulator
using GasChromatographyTools
using Plots
using PlutoUI
TableOfContents()
end
# ╔═╡ 0b51a792-d9b8-4c29-928f-57aaf41f1c20
plotly()
# ╔═╡ 93ba6afc-4e9a-11ec-08d9-81c0a9bc502e
md"""
# Test of the RT lock functions
The test is necessary, because of failing (infinit loop in case of opt_ipt="linear", NaN abort in case of opt_itp="spline").
"""
# ╔═╡ 51ec0223-199a-4a25-8768-cd0b7e9f864d
begin
function stretched_program(n::Float64, par::GasChromatographySimulator.Parameters)
# stretch the temperature program in 'par' by a factor 'n'
if isa(par, Array)==true
error("Select an element of the array of GC-system parameters.")
else
new_tsteps = n.*par.prog.time_steps
new_T_itp = GasChromatographySimulator.temperature_interpolation(new_tsteps, par.prog.temp_steps, par.prog.gf, par.col.L)
new_pin_itp = GasChromatographySimulator.pressure_interpolation(new_tsteps, par.prog.pin_steps)
new_pout_itp = GasChromatographySimulator.pressure_interpolation(new_tsteps, par.prog.pout_steps)
new_prog = GasChromatographySimulator.Program(new_tsteps, par.prog.temp_steps, par.prog.pin_steps, par.prog.pout_steps, par.prog.gf, par.prog.a_gf, new_T_itp, new_pin_itp, new_pout_itp)
new_par = GasChromatographySimulator.Parameters(par.col, new_prog, par.sub, par.opt)
return new_par
end
end
function initial_n(n::Float64, tR_lock::Float64, ii::Int, par::GasChromatographySimulator.Parameters)
par_n = stretched_program(n, par)
sol_n = GasChromatographySimulator.solve_system_multithreads(par_n)
tR_n = sol_n[ii].u[end][1]
if n>1
while tR_n-tR_lock<0 && n<130.0
n = n*2.0
par_n = stretched_program(n, par)
sol_n = GasChromatographySimulator.solve_system_multithreads(par_n)
tR_n = sol_n[ii].u[end][1]
end
elseif n<1
while tR_n-tR_lock>0 && n>0.01
n = n*0.5
par_n = stretched_program(n, par)
sol_n = GasChromatographySimulator.solve_system_multithreads(par_n)
tR_n = sol_n[ii].u[end][1]
end
end
if n>130.0
error("The choosen retention time for locking is to big.")
elseif n<0.01
error("The choosen retention time for locking is to small.")
else
return n
end
end
function RT_locking(par::GasChromatographySimulator.Parameters, tR_lock::Float64, tR_tol::Float64, solute_RT::String; opt_itp="linear")
# estimate the factor 'n' for the temperature program to achieve the retention time 'tR_lock' for 'solute_RT' with the GC-system defined by 'par'
if isa(par, Array)==true
error("Select an element of the array of GC-systems.")
else
# find 'solute_RT' in the substances of 'par'
name = Array{String}(undef, length(par.sub))
for i=1:length(par.sub)
name[i] = par.sub[i].name
end
ii = findfirst(name.==solute_RT)
# calculate the retention time for the original (un-stretched) program
sol₀ = GasChromatographySimulator.solving_odesystem_r(par.col, par.prog, par.sub[ii], par.opt)
tR₀ = sol₀.u[end][1]
# start value for the factor 'n'
if tR₀-tR_lock<0
n₁ = initial_n(2.0, tR_lock, ii, par)
else
n₁ = initial_n(0.5, tR_lock, ii, par)
end
# using a recursive function to estimate 'n'
n = recur_RT_locking(n₁, [1.0], [tR₀], par, tR_lock, tR_tol, ii; opt_itp=opt_itp)
end
return n
end
function recur_RT_locking(n::Float64, n_vec::Array{Float64,1}, tR_vec::Array{Float64,1}, par::GasChromatographySimulator.Parameters, tR_lock::Float64, tR_tol::Float64, ii::Int64; opt_itp="linear")
# recursive function to find the factor 'n' for the temperature program to achieve the retention time 'tR_lock' for solute index 'ii' with the GC-system defined by 'par'
# calculate the retention time with the input guess 'n'
par₁ = stretched_program(n, par)
if par.opt.odesys==true
sol₁ = GasChromatographySimulator.solving_odesystem_r(par₁.col, par₁.prog, par₁.sub[ii], par₁.opt)
tR₁ = sol₁.u[end][1]
else
sol₁ = GasChromatographySimulator.solving_migration(par₁.col, par₁.prog, par₁.sub[ii], par₁.opt)
tR₁ = sol₁.u[end]
end
if abs(tR₁-tR_lock)<tR_tol
# if retention time is less than 'tR_tol' from 'tR_lock' we found the factor 'n'
return n
else
# estimate a new factor 'new_n' by linear interpolation of the factors 'n_vec' + 'n' over the corresponding retention times
new_n_vec = sort([n_vec; n])
new_tR_vec = sort([tR_vec; tR₁])
if opt_itp=="spline"
# Dierckx.jl
if length(new_tR_vec)<4
k = length(new_tR_vec)-1
else
k = 3
end
itp = Spline1D(new_tR_vec, new_n_vec, k=k)
else # opt_itp=="linear"
# Interpolations.jl
itp = LinearInterpolation(sort([tR_vec; tR₁]), sort([n_vec; n]))
end
new_n = itp(tR_lock)
#println("new_n=$(new_n), tR₁=$(tR₁)")
# use the new factor 'new_n' and call the recursive function again
return recur_RT_locking(new_n, new_n_vec, new_tR_vec, par, tR_lock, tR_tol, ii; opt_itp=opt_itp)
end
end
md"""
## Copies of the RT lock functions
In the package GasChromatographyTools.jl the functions where adapted to solve this problem.
"""
end
# ╔═╡ 5e642f09-9a8f-4cca-b61f-b27c8433a2e5
begin
opt = GasChromatographySimulator.Options(OwrenZen5(), 1e-6, 1e-3, "inlet", true)
L = 4.0
d = 0.1e-3
df = 0.1e-6
sp = "SLB5ms" # ["Rxi17SilMS" -> ok, "SLB5ms" -> ERR, "SPB50" -> ok, "Wax" -> ok, "DB5ms" -> ok, "Rxi5MS" -> ok, "genericLB", "genericJL"]
gas = "He"
col = GasChromatographySimulator.constructor_System(L, d, df, sp, gas)
db_path = "/Users/janleppert/Documents/GitHub/Publication_GCsim/data/Databases/"
db_file = "Database_append.csv"
first_alkane = "C8"
last_alkane = "C15"
sub = GasChromatographySimulator.load_solute_database(db_path, db_file, sp, gas, [first_alkane, last_alkane], zeros(2), zeros(2))
Tst = 273.15
Tref = 150.0 + Tst
pn = 101300.0
pin = 300000.0 + pn
pout = 0.0
dimless_rate = 0.4
Theat = 1000.0
Tshift = 40.0
tMref = GasChromatographySimulator.holdup_time(Tref, pin, pout, L, d, gas)
rate = dimless_rate*30/tMref
theat = Theat/rate
Tstart = sub[1].Tchar-Tst-Tshift
ΔT = 30.0
α = -3.0
prog0 = GasChromatographySimulator.constructor_Program([0.0, theat],[Tstart, Tstart+Theat], [pin, pin],[pout, pout],[ΔT, ΔT], [0.0, 0.0], [L, L], [α, α], opt.Tcontrol, L)
par0 = GasChromatographySimulator.Parameters(col, prog0, sub, opt)
tR_lock = 12*tMref
tR_tol = 1e-3
md"""
## Settings
The following settings produce the problem:
"""
end
# ╔═╡ 3de48bb6-8eb7-4a33-9a98-d5fe3a19f6c6
md"""
## The ERROR
"""
# ╔═╡ 19d1abb6-32b6-414f-bc95-55635cbaa73a
n = RT_locking(par0, tR_lock, 1e-3, last_alkane; opt_itp="spline")
# ╔═╡ 2cad8185-4fc5-4009-98df-07a2be2133c6
md"""
## Reproducing the error by stepwise calculations
"""
# ╔═╡ 30207849-cafe-4ec6-af8d-ee7b2e2e6de0
begin
# initial simulation
sol₀ = GasChromatographySimulator.solving_odesystem_r(par0.col, par0.prog, par0.sub[2], par0.opt)
tR₀ = sol₀.u[end][1]
end
# ╔═╡ 84e7e869-0fbf-4590-a8de-28855856661f
tR₀-tR_lock
# ╔═╡ c5ace7ce-cfa3-4c15-bcc1-8b66ad1c16e9
begin
# first stretch of the program
if tR₀-tR_lock<0
n₁ = initial_n(2.0, tR_lock, 2, par0)
else
n₁ = initial_n(0.5, tR_lock, 2, par0)
end
# recur function
par₁ = stretched_program(n₁, par0)
sol₁ = GasChromatographySimulator.solving_odesystem_r(par₁.col, par₁.prog, par₁.sub[2], par₁.opt)
tR₁ = sol₁.u[end][1]
n_vec₁ = sort([1.0; n₁])
tR_vec₁ = sort([tR₀; tR₁])
# first interpolation
itp₁ = Spline1D(tR_vec₁, n_vec₁, k=1)
end
# ╔═╡ c4d53222-03b1-4ce4-a49a-690945347432
tR₁-tR_lock
# ╔═╡ 6acfbf9b-f8ac-4483-8c93-17920d0d9f0e
begin
# second stretch
n₂ = itp₁(tR_lock)
par₂ = stretched_program(n₂, par0)
sol₂ = GasChromatographySimulator.solving_odesystem_r(par₂.col, par₂.prog, par₂.sub[2], par₂.opt)
tR₂ = sol₂.u[end][1]
n_vec₂ = sort([n_vec₁; n₂])
tR_vec₂ = sort([tR_vec₁; tR₂])
itp₂ = Spline1D(tR_vec₂, n_vec₂, k=2)
p1 = plot([tR_lock, tR_lock], [0.95, 0.98], label="tR_lock")
scatter!(p1, [tR₂, tR₂], [n₂, n₂], label="2")
plot!(p1, 51.0:0.001:52.0, itp₂.(51.0:0.001:52.0), xlims=(51.0,52.0), ylims=(0.95, 0.98), label="itp₂")
p1
end
# ╔═╡ 3b31f580-efe2-4a7a-89dc-228b38f2a71e
tR₂-tR_lock
# ╔═╡ b6c5126a-3d41-4809-ab9b-d554262e0668
begin
n₃ = itp₂(tR_lock)
par₃ = stretched_program(n₃, par0)
sol₃ = GasChromatographySimulator.solving_odesystem_r(par₃.col, par₃.prog, par₃.sub[2], par₃.opt)
tR₃ = sol₃.u[end][1]
n_vec₃ = sort([n_vec₂; n₃])
tR_vec₃ = sort([tR_vec₂; tR₃])
k = length(tR_vec₃)-1
itp₃ = Spline1D(tR_vec₃, n_vec₃, k=k)
scatter!(p1, [tR₃, tR₃], [n₃, n₃], label="3")
plot!(p1, 51.0:0.001:52.0, itp₃.(51.0:0.001:52.0), label="itp₃")
end
# ╔═╡ 6c5443bc-3dab-4317-99c9-1bb32b184fbc
tR₃-tR_lock
# ╔═╡ 3b1bb187-66f9-40bd-a0a5-a3c4e1a23819
begin
n₄ = itp₃(tR_lock)
par₄ = stretched_program(n₄, par0)
sol₄ = GasChromatographySimulator.solving_odesystem_r(par₄.col, par₄.prog, par₄.sub[2], par₄.opt)
tR₄ = sol₄.u[end][1]
n_vec₄ = sort([n_vec₃; n₄])
tR_vec₄ = sort([tR_vec₃; tR₄])
itp₄ = Spline1D(tR_vec₄, n_vec₄, k=3)
scatter!(p1, [tR₄, tR₄], [n₄, n₄], label="4")
plot!(p1, 51.0:0.001:52.0, itp₄.(51.0:0.001:52.0), label="itp₄")
end
# ╔═╡ b81b2905-6756-445b-b1be-c54298df8b3f
tR₄-tR_lock
# ╔═╡ eb2d9f6a-8384-427a-a036-b4f0019d8251
md"""
The proposed settings seam to lead to jumping estimations around the searched retention time.
"""
# ╔═╡ 734bae50-12fa-4717-8446-addb963b8673
begin
n₅ = itp₄(tR_lock)
par₅ = GasChromatographyTools.stretched_program(n₅, par0)
sol₅ = GasChromatographySimulator.solving_odesystem_r(par₅.col, par₅.prog, par₅.sub[2], par₅.opt)
tR₅ = sol₅.u[end][1]
n_vec₅ = sort([n_vec₄; n₅])
tR_vec₅ = sort([tR_vec₄; tR₅])
itp₅ = Spline1D(tR_vec₅, n_vec₅, k=3)
scatter!(p1, [tR₅, tR₅], [n₅, n₅], label="5")
plot!(p1, 51.0:0.001:52.0, itp₅.(51.0:0.001:52.0), xlims=(51.45,51.55), ylims=(0.969, 0.972), label="itp₅")
end
# ╔═╡ 8de716b9-ca56-42f5-aebf-39ad467f4613
tR₅-tR_lock
# ╔═╡ dadbefbf-a107-4c89-a4ef-0eb756517c1e
begin
n₆ = itp₅(tR_lock)
par₆ = GasChromatographyTools.stretched_program(n₆, par0)
sol₆ = GasChromatographySimulator.solving_odesystem_r(par₆.col, par₆.prog, par₆.sub[2], par₆.opt)
tR₆ = sol₆.u[end][1]
n_vec₆ = sort([n_vec₅; n₆])
tR_vec₆ = sort([tR_vec₅; tR₆])
tR_vec₆.-tR_lock
itp₆ = Spline1D(tR_vec₆, n_vec₆, k=3)
scatter!(p1, [tR₆, tR₆], [n₆, n₆], label="6")
plot!(p1, 51.0:0.001:52.0, itp₆.(51.0:0.001:52.0), xlims=(51.45,51.55), ylims=(0.969, 0.972), label="itp₆")
end
# ╔═╡ 313d1431-ec39-4d1e-91fd-e025efc1f5c3
tR₆-tR_lock
# ╔═╡ fab59b1e-ba32-49d3-b0f1-564db037400c
begin
n₇ = itp₆(tR_lock)
par₇ = GasChromatographyTools.stretched_program(n₇, par0)
sol₇ = GasChromatographySimulator.solving_odesystem_r(par₇.col, par₇.prog, par₇.sub[2], par₇.opt)
tR₇ = sol₇.u[end][1]
n_vec₇ = sort([n_vec₆; n₇])
tR_vec₇ = sort([tR_vec₆; tR₇])
tR_vec₇.-tR_lock
itp₇ = Spline1D(tR_vec₇, n_vec₇, k=3)
scatter!(p1, [tR₇, tR₇], [n₇, n₇], label="7")
plot!(p1, 51.0:0.001:52.0, itp₇.(51.0:0.001:52.0), xlims=(51.47,51.49), ylims=(0.9699, 0.9701), label="itp₇")
end
# ╔═╡ 25725568-ae9a-4ab0-8f98-87fec12c867a
begin
n₈ = itp₇(tR_lock)
par₈ = GasChromatographyTools.stretched_program(n₈, par0)
sol₈ = GasChromatographySimulator.solving_odesystem_r(par₈.col, par₈.prog, par₈.sub[2], par₈.opt)
tR₈ = sol₈.u[end][1]
n_vec₈ = sort([n_vec₇; n₈])
tR_vec₈ = sort([tR_vec₇; tR₈])
tR_vec₈.-tR_lock
itp₈ = Spline1D(tR_vec₈, n_vec₈, k=3)
scatter!(p1, [tR₈, tR₈], [n₈, n₈], label="8")
plot!(p1, 51.0:0.001:52.0, itp₈.(51.0:0.001:52.0), xlims=(51.47,51.49), ylims=(0.9699, 0.9701), label="itp₈")
end
# ╔═╡ 404c1d12-5217-485e-b3a6-6f024d29b544
md"""
The estimates continue to jump around tR_lock. The last estimate is further away than the one before.
Also, there seems to be some form of discontiuity of the simulation result around tR_lock, which produces the problem in the first place.
"""
# ╔═╡ 57634f00-45d5-4cf9-94f5-76319d4e5436
begin
n₉ = itp₈(tR_lock)
par₉ = GasChromatographyTools.stretched_program(n₉, par0)
sol₉ = GasChromatographySimulator.solving_odesystem_r(par₉.col, par₉.prog, par₉.sub[2], par₉.opt)
tR₉ = sol₉.u[end][1]
n_vec₉ = sort([n_vec₈; n₉])
tR_vec₉ = sort([tR_vec₈; tR₉])
tR_vec₉.-tR_lock
itp₉ = Spline1D(tR_vec₉, n_vec₉, k=3)
scatter!(p1, [tR₉, tR₉], [n₉, n₉], label="9")
plot!(p1, 51.0:0.001:52.0, itp₉.(51.0:0.001:52.0), xlims=(51.47,51.49), ylims=(0.9699, 0.9701), label="itp₉")
end
# ╔═╡ ae803a26-c436-497a-b245-a0afec92e46f
md"""
Make simulations for a range of stretch factors ``n``.
"""
# ╔═╡ 1983258e-e84b-4f39-9ce8-0e20e78a0893
begin
nn = [0.969970, 0.969971, 0.969972, 0.969973, 0.969974, 0.969975, 0.969976, 0.969977]
ttR = Array{Float64}(undef, length(nn))
for i=1:length(nn)
pars = stretched_program(nn[i], par0)
sols = GasChromatographySimulator.solving_odesystem_r(pars.col, pars.prog, pars.sub[2], pars.opt)
ttR[i] = sols.u[end][1]
end
p2 = plot([tR_lock, tR_lock], [0.95, 0.98], label="tR_lock")
plot!(p2, ttR, nn, line=(2,:solid), markers=:square, xlims=(51.479, 51.484), ylims=(0.969969,0.96998))
end
# ╔═╡ 89cbd5c0-0df6-43d3-bd5c-d3d55d669f33
begin
nnn = 0.969974.+collect(0.0000001:0.0000001:0.0000009)
tttR = Array{Float64}(undef, length(nnn))
for i=1:length(nnn)
pars = stretched_program(nnn[i], par0)
sols = GasChromatographySimulator.solving_odesystem_r(pars.col, pars.prog, pars.sub[2], pars.opt)
tttR[i] = sols.u[end][1]
end
plot!(p2, tttR, nnn, ylims=(0.9699739,0.9699751), markers=:circle)
end
# ╔═╡ f541b210-3ef8-4003-b358-065e5c5949ad
par8 = stretched_program(nnn[8], par0)
# ╔═╡ 524b256b-0c1e-48bc-ac0b-2ee7fc1eab2b
par9 = stretched_program(nnn[9], par0)
# ╔═╡ 6d15c2a4-6d1e-4b89-b9e8-ecff004c4730
sol8 = GasChromatographySimulator.solving_odesystem_r(par8.col, par8.prog, par8.sub[2], par8.opt)
# ╔═╡ 665f302d-204b-47ac-90df-c5979350707c
sol9 = GasChromatographySimulator.solving_odesystem_r(par9.col, par9.prog, par9.sub[2], par9.opt)
# ╔═╡ 56095d71-6169-44b3-89d1-7ea7f1b6ddfb
sol8.destats
# ╔═╡ a51cd1dc-50bb-4444-a0f7-c0e4229b1257
sol9.destats
# ╔═╡ 225434d5-c753-4476-8f68-f5761a454852
sol8.retcode
# ╔═╡ 9a6f436e-e50e-4697-a1d1-3d2d43fc62fc
sol9.retcode
# ╔═╡ 8a6eccf0-8740-4d69-b96b-1248557d4c4d
begin
nnnn = sort!(rand(0.9699745:0.000000001:0.9699755, 100))
ttttR = Array{Float64}(undef, length(nnnn))
for i=1:length(nnnn)
pars = stretched_program(nnnn[i], par0)
sols = GasChromatographySimulator.solving_odesystem_r(pars.col, pars.prog, pars.sub[2], pars.opt)
ttttR[i] = sols.u[end][1]
end
plot!(p2, ttttR, nnnn, ylims=(0.9699739,0.9699756), markers=:circle)
md"""
In the range around tR_lock the solution seems to be unstable. A small change in the temperature program leads to a bigger change in the retention time. There seem to be two branches of n(tR), which does not meet.
$(embed_display(p2))
"""
end
# ╔═╡ 08a0972a-55ff-42c9-838b-1a649afe9a46
md"""
## Decreased relative tolerance
The problem is not originated in the RT_lock algorithm but in the simulation itself. For certain settings a small change (like small strectch in the program) can result in bigger changes in retention time.
"""
# ╔═╡ 2d7da55e-2711-44a4-b8b9-bda6321a4c48
begin
# decrease the relative tolerance
opt_1 = GasChromatographySimulator.Options(OwrenZen5(), 1e-6, 1e-4, "inlet", true)
par_1 = GasChromatographySimulator.Parameters(col, prog0, sub, opt_1)
# repeat the simulation from above
nnnn_1 = sort!(rand(0.9699745:0.000000001:0.9699755, 100))
ttttR_1 = Array{Float64}(undef, length(nnnn_1))
for i=1:length(nnnn_1)
pars = stretched_program(nnnn_1[i], par_1)
sols = GasChromatographySimulator.solving_odesystem_r(pars.col, pars.prog, pars.sub[2], pars.opt)
ttttR_1[i] = sols.u[end][1]
end
plot!(p2, ttttR_1, nnnn_1, ylims=(0.9699739,0.9699756), markers=:v, label="reltol=1e-4")
end
# ╔═╡ 89fa3139-1d10-4dd3-90b1-d9c0f65745c6
begin
n_1 = RT_locking(par_1, tR_lock, 1e-3, last_alkane; opt_itp="spline")
par_1_s = stretched_program(n_1, par_1)
sol_1_s = GasChromatographySimulator.solving_odesystem_r(par_1_s.col, par_1_s.prog, par_1_s.sub[2], par_1_s.opt)
tR_1_s = sol_1_s.u[end][1]
scatter!(p2, [tR_1_s, tR_1_s], [n_1, n_1], ylims=(0.9699739, n_1*1.000001))
end
# ╔═╡ e9accaac-587f-498d-ab0c-a8064f573678
begin
# simulation arround n_1
n_range = (n_1-0.0001):0.000001:(n_1+0.0001)
tR_range = Array{Float64}(undef, length(n_range))
for i=1:length(n_range)
pars = stretched_program(n_range[i], par_1)
sols = GasChromatographySimulator.solving_odesystem_r(pars.col, pars.prog, pars.sub[2], pars.opt)
tR_range[i] = sols.u[end][1]
end
plot!(p2, tR_range, n_range, ylims=(n_range[1], n_range[end]), xlims=(tR_range[1], tR_range[end]))
end
# ╔═╡ 4cd795b1-efeb-430b-a025-77d0ee9d2b1b
md"""
## Modified Functions with relative difference
"""
# ╔═╡ 0bf4903c-074d-41b8-a4e8-5834ce8659d5
function recur_RT_locking_rel(n::Float64, n_vec::Array{Float64,1}, tR_vec::Array{Float64,1}, par::GasChromatographySimulator.Parameters, tR_lock::Float64, tR_tol::Float64, ii::Int64; opt_itp="linear")
# recursive function to find the factor 'n' for the temperature program to achieve the retention time 'tR_lock' for solute index 'ii' with the GC-system defined by 'par'
# calculate the retention time with the input guess 'n'
par₁ = stretched_program(n, par)
if par.opt.odesys==true
sol₁ = GasChromatographySimulator.solving_odesystem_r(par₁.col, par₁.prog, par₁.sub[ii], par₁.opt)
tR₁ = sol₁.u[end][1]
else
sol₁ = GasChromatographySimulator.solving_migration(par₁.col, par₁.prog, par₁.sub[ii], par₁.opt)
tR₁ = sol₁.u[end]
end
if abs(tR₁-tR_lock)/tR_lock<tR_tol
# if retention time is less than 'tR_tol' from 'tR_lock' we found the factor 'n'
return n
else
# estimate a new factor 'new_n' by linear interpolation of the factors 'n_vec' + 'n' over the corresponding retention times
new_n_vec = sort([n_vec; n])
new_tR_vec = sort([tR_vec; tR₁])
if opt_itp=="spline"
# Dierckx.jl
if length(new_tR_vec)<4
k = length(new_tR_vec)-1
else
k = 3
end
itp = Spline1D(new_tR_vec, new_n_vec, k=k)
else # opt_itp=="linear"
# Interpolations.jl
itp = LinearInterpolation(sort([tR_vec; tR₁]), sort([n_vec; n]))
end
new_n = itp(tR_lock)
#println("new_n=$(new_n), tR₁=$(tR₁)")
# use the new factor 'new_n' and call the recursive function again
return recur_RT_locking_rel(new_n, new_n_vec, new_tR_vec, par, tR_lock, tR_tol, ii; opt_itp=opt_itp)
end
end
# ╔═╡ be7d3bf5-d682-40e7-8161-4243354373f8
function RT_locking_rel(par::GasChromatographySimulator.Parameters, tR_lock::Float64, tR_tol::Float64, solute_RT::String; opt_itp="linear")
# estimate the factor 'n' for the temperature program to achieve the retention time 'tR_lock' for 'solute_RT' with the GC-system defined by 'par'
if isa(par, Array)==true
error("Select an element of the array of GC-systems.")
elseif tR_tol<par.opt.reltol
error("The relative tolerance for retention time locking tR_tol is to small. Use tR_tol > par.opt.reltol ($(par.opt.reltol)).")
else
# find 'solute_RT' in the substances of 'par'
name = Array{String}(undef, length(par.sub))
for i=1:length(par.sub)
name[i] = par.sub[i].name
end
ii = findfirst(name.==solute_RT)
# calculate the retention time for the original (un-stretched) program
sol₀ = GasChromatographySimulator.solving_odesystem_r(par.col, par.prog, par.sub[ii], par.opt)
tR₀ = sol₀.u[end][1]
# start value for the factor 'n'
if tR₀-tR_lock<0
n₁ = initial_n(2.0, tR_lock, ii, par)
else
n₁ = initial_n(0.5, tR_lock, ii, par)
end
# using a recursive function to estimate 'n'
n = recur_RT_locking_rel(n₁, [1.0], [tR₀], par, tR_lock, tR_tol, ii; opt_itp=opt_itp)
end
return n
end
# ╔═╡ 8a7a24e3-3896-4012-8e50-473a70ec3a44
n_rel = RT_locking_rel(par0, tR_lock, 1e-3, last_alkane; opt_itp="spline")
# ╔═╡ 4cbd8054-d1c7-4132-ae61-ef09ce6ba9e7
par0_rel = stretched_program(n_rel, par0)
# ╔═╡ b842960f-ef61-4a70-8a17-5405ec3e5ed3
sol_rel = GasChromatographySimulator.solving_odesystem_r(par0_rel.col, par0_rel.prog, par0_rel.sub[2], par0_rel.opt)
# ╔═╡ 24269e34-fc1d-4ee2-87b6-afb6a3081596
tR_rel = sol_rel.u[end][1]
# ╔═╡ 0f7e36cb-c4a7-43b5-928f-d364b82d218b
tR_rel-tR_lock
# ╔═╡ 3bdf621e-969b-42b8-85f8-5ecd261646d3
(tR_rel-tR_lock)/tR_lock
# ╔═╡ 42a1fccd-e782-42fb-91de-18c8b33d507d
md"""
# End
"""
# ╔═╡ Cell order:
# ╠═6f9a94bd-dd28-4110-a7ca-0ed84e9c7c3f
# ╠═0b51a792-d9b8-4c29-928f-57aaf41f1c20
# ╠═93ba6afc-4e9a-11ec-08d9-81c0a9bc502e
# ╠═51ec0223-199a-4a25-8768-cd0b7e9f864d
# ╠═5e642f09-9a8f-4cca-b61f-b27c8433a2e5
# ╠═3de48bb6-8eb7-4a33-9a98-d5fe3a19f6c6
# ╠═19d1abb6-32b6-414f-bc95-55635cbaa73a
# ╠═2cad8185-4fc5-4009-98df-07a2be2133c6
# ╠═30207849-cafe-4ec6-af8d-ee7b2e2e6de0
# ╠═84e7e869-0fbf-4590-a8de-28855856661f
# ╠═c5ace7ce-cfa3-4c15-bcc1-8b66ad1c16e9
# ╠═c4d53222-03b1-4ce4-a49a-690945347432
# ╠═6acfbf9b-f8ac-4483-8c93-17920d0d9f0e
# ╠═3b31f580-efe2-4a7a-89dc-228b38f2a71e
# ╠═b6c5126a-3d41-4809-ab9b-d554262e0668
# ╠═6c5443bc-3dab-4317-99c9-1bb32b184fbc
# ╠═3b1bb187-66f9-40bd-a0a5-a3c4e1a23819
# ╠═b81b2905-6756-445b-b1be-c54298df8b3f
# ╟─eb2d9f6a-8384-427a-a036-b4f0019d8251
# ╠═734bae50-12fa-4717-8446-addb963b8673
# ╠═8de716b9-ca56-42f5-aebf-39ad467f4613
# ╠═dadbefbf-a107-4c89-a4ef-0eb756517c1e
# ╠═313d1431-ec39-4d1e-91fd-e025efc1f5c3
# ╠═fab59b1e-ba32-49d3-b0f1-564db037400c
# ╠═25725568-ae9a-4ab0-8f98-87fec12c867a
# ╠═404c1d12-5217-485e-b3a6-6f024d29b544
# ╠═57634f00-45d5-4cf9-94f5-76319d4e5436
# ╠═ae803a26-c436-497a-b245-a0afec92e46f
# ╠═1983258e-e84b-4f39-9ce8-0e20e78a0893
# ╠═89cbd5c0-0df6-43d3-bd5c-d3d55d669f33
# ╠═f541b210-3ef8-4003-b358-065e5c5949ad
# ╠═524b256b-0c1e-48bc-ac0b-2ee7fc1eab2b
# ╠═6d15c2a4-6d1e-4b89-b9e8-ecff004c4730
# ╠═665f302d-204b-47ac-90df-c5979350707c
# ╠═56095d71-6169-44b3-89d1-7ea7f1b6ddfb
# ╠═a51cd1dc-50bb-4444-a0f7-c0e4229b1257
# ╠═225434d5-c753-4476-8f68-f5761a454852
# ╠═9a6f436e-e50e-4697-a1d1-3d2d43fc62fc
# ╠═8a6eccf0-8740-4d69-b96b-1248557d4c4d
# ╟─08a0972a-55ff-42c9-838b-1a649afe9a46
# ╠═2d7da55e-2711-44a4-b8b9-bda6321a4c48
# ╠═89fa3139-1d10-4dd3-90b1-d9c0f65745c6
# ╠═e9accaac-587f-498d-ab0c-a8064f573678
# ╠═4cd795b1-efeb-430b-a025-77d0ee9d2b1b
# ╠═be7d3bf5-d682-40e7-8161-4243354373f8
# ╠═0bf4903c-074d-41b8-a4e8-5834ce8659d5
# ╠═8a7a24e3-3896-4012-8e50-473a70ec3a44
# ╠═4cbd8054-d1c7-4132-ae61-ef09ce6ba9e7
# ╠═b842960f-ef61-4a70-8a17-5405ec3e5ed3
# ╠═24269e34-fc1d-4ee2-87b6-afb6a3081596
# ╠═0f7e36cb-c4a7-43b5-928f-d364b82d218b
# ╠═3bdf621e-969b-42b8-85f8-5ecd261646d3
# ╠═42a1fccd-e782-42fb-91de-18c8b33d507d
| [
27,
456,
62,
30783,
29,
15,
198,
21017,
317,
32217,
13,
20362,
20922,
44386,
198,
2,
410,
15,
13,
1433,
13,
16,
198,
198,
3500,
2940,
2902,
198,
3500,
21365,
18274,
4487,
198,
198,
2,
2343,
243,
242,
28670,
22880,
94,
718,
69,
24,... | 1.828211 | 12,527 |
# MGH problem 35 - Chebyquad function
#
# Source:
# <NAME>, <NAME> and <NAME>
# Testing Unconstrained Optimization Software
# ACM Transactions on Mathematical Software, 7(1):17-41, 1981
#
# <NAME>, Montreal, 05/2018.
export mgh35
"Chebyquad function"
function mgh35(m :: Int = 10, n :: Int = 10)
if m < n
warn(": number of function must be ≥ number of variables. Adjusting to m = n")
m = n
end
nls = Model()
x0 = collect(1:n)/(n+1)
@variable(nls, x[i=1:n], start = x0[i])
function Tsim(x, n)
if n == 0
return 1
elseif n == 1
return x
else
return 2*x*Tsim(x,n-1) - Tsim(x,n-2)
end
end
Ts = Vector{Function}(undef, n)
Tnames = Vector{Symbol}(undef, n)
for i = 1:n
Ts[i] = x -> Tsim(2*x-1, i)
Tnames[i] = gensym()
JuMP.register(nls, Tnames[i], 1, Ts[i], autodiff=true)
end
I = [i%2 == 0 ? -1/(i^2-1) : 0 for i = 1:n]
@NLexpression(nls, F[i=1:n], sum($(Tnames[i])(x[j]) for j = 1:n)/n - I[i])
return MathProgNLSModel(nls, F, name="mgh35")
end
| [
2,
337,
17511,
1917,
3439,
532,
2580,
1525,
47003,
2163,
198,
2,
198,
2,
220,
220,
8090,
25,
198,
2,
220,
220,
1279,
20608,
22330,
1279,
20608,
29,
290,
1279,
20608,
29,
198,
2,
220,
220,
23983,
791,
1102,
2536,
1328,
30011,
1634,
... | 2.13347 | 487 |
using MAT, JuMP, Ipopt
include("../benchmark.jl")
function read_lp2(dir, name)
lp_data = matopen("$(dir)/$(name).mat")
problem = read(lp_data,"Problem")
A = problem["A"]
b = problem["b"][:]
c = problem["aux"]["c"][:]
lb = problem["aux"]["lo"][:]
ub = problem["aux"]["hi"][:]
n = length(c)
m = Model()
@variable(m, lb[i] <= x[i=1:n] <= ub[i])
@NLobjective(m, Min, sum(c[i] * x[i] for i = 1:n))
@constraint(m, A * x .== b)
return m
end
function get_file_names(dir::String,ext::String)
name_list = readdir(dir)
cleaned_name_list = []
for name_full in name_list
try
if name_full[(end-3):end] == ext
name = name_full[1:(end-4)]
end
cleaned_name_list = [cleaned_name_list; name]
catch(e)
@show e
println("ERROR in running " * name_full)
end
end
return cleaned_name_list
end
function run_infeas_netlib(test_name, my_par, solve_func!)
summary = Dict{String, problem_summary2}()
dir = "../data/netlib-infeas"
problem_list = get_file_names(dir,".mat")
if_mkdir("../results/$test_name")
if_mkdir("../results/$test_name/log")
if_mkdir("../results/$test_name/jld")
for problem_name in problem_list
if problem_name != "lpi_cplex2"
m = read_lp2(dir, problem_name)
nlp = MathProgNLPModel(m);
#if nlp.meta.ncon < 2000
summary[problem_name] = solve_func!(nlp, problem_name, test_name, my_par)
#end
end
write_summary("../results/$(test_name)/summary.txt", summary)
save("../results/$(test_name)/summary.jld","summary",summary, "pars", my_par)
end
end
my_par = Class_parameters()
run_infeas_netlib("one_phase/Jan4/infeas-netlib", my_par, one_phase_run_and_store)
run_infeas_netlib("ipopt/Jan4/infeas-netlib", my_par, ipopt_solve_run_and_store)
| [
3500,
36775,
11,
12585,
7378,
11,
314,
79,
8738,
198,
17256,
7203,
40720,
26968,
4102,
13,
20362,
4943,
198,
198,
8818,
1100,
62,
34431,
17,
7,
15908,
11,
1438,
8,
198,
220,
220,
220,
300,
79,
62,
7890,
796,
2603,
9654,
7203,
3,
7... | 2.160508 | 866 |
<filename>src/genotypes.jl<gh_stars>0
import GZip
"""
function gt600()
---
1. Phase the genotypes with beagle
2. Convert results to array
3.
"""
function gt600()
rst = joinpath(dat_dir, "run")
fra = joinpath(dat_dir, "real/nofima/for_CMSEdit")
Nₑ = 100
@info join(["",
"Remove SNP of duplicates and unknown postions",
" - See data/real/nofima/plink.sh"], "\n")
@info "Phasing with beagle.jar"
# make alleles to C/T, and replace positions, so that beagle can phase them
GZip.open("$rst/b.vcf.gz", "w") do io
i = 0
for line in eachline(GZip.open("$rst/a.vcf.gz", "r"))
if line[1] == '#'
write(io, line, "\n")
else
i += 1
t = split(line)
t[4] = "T"
t[5] = "C"
write(io, join(t, "\t"), "\n")
end
end
end
# phasing
run(`java -jar $beagle gt=$rst/b.vcf.gz ne=$Nₑ out=$rst/c`)
end
"""
function vcf2dic(vcf)
---
Read chromosome and haplotypes from a `vcf` file of **gzip** format.
Returns a Dictionary: pos=>[chr, bp], hap=>hap of chr8[][].
Each column of hap is a haplotype across genome.
"""
function vcf2dic(vcf)
GZip.open(vcf, "r") do io
# skip vcf header and determine N_ID
nid = 0
for line in eachline(io)
if line[2] != '#'
nid = length(split(line)) - 9
break
end
end
pos = Int[]
hap = Int8[]
for line in eachline(io)
f = split(line)
append!(pos, parse.(Int, f[1:2]))
append!(hap, parse.(Int8, collect(join(f[10:end], ' ')[1:2:end])))
end
pos = reshape(pos, (2, :))'
r = haldane(pos)
hap = reshape(hap, (nid*2, :))'
Dict(:pos => pos,
:r => r,
:hap => Bool.(hap))
end
end
| [
27,
34345,
29,
10677,
14,
5235,
13567,
13,
20362,
27,
456,
62,
30783,
29,
15,
198,
11748,
402,
41729,
198,
37811,
198,
220,
220,
220,
2163,
308,
83,
8054,
3419,
198,
6329,
198,
16,
13,
18983,
262,
2429,
13567,
351,
307,
19345,
198,
... | 1.826291 | 1,065 |
<reponame>jekbradbury/ChainRules.jl
#####
##### `Thunk`
#####
macro thunk(body)
return :(Thunk(() -> $(esc(body))))
end
struct Thunk{F}
f::F
end
@inline (thunk::Thunk{F})() where {F} = (thunk.f)()
#####
##### `forward_chain`
#####
forward_chain(args...) = materialize(_forward_chain(args...))
@inline _forward_chain(ẋ::Nothing, ∂::Thunk) = false
@inline _forward_chain(ẋ, ∂::Thunk) = broadcasted(*, ẋ, ∂())
_forward_chain(ẋ, ∂::Thunk, args...) = broadcasted(+, _forward_chain(ẋ, ∂), _forward_chain(args...))
#####
##### `reverse_chain!`
#####
@inline reverse_chain!(x̄::Nothing, ∂::Thunk) = false
@inline function reverse_chain!(x̄, ∂::Thunk)
thunk = ∂()
x̄_value = adjoint_value(x̄)
casted = should_increment(x̄) ? broadcasted(+, x̄_value, thunk) : thunk
if should_materialize_into(x̄)
return materialize!(x̄_value, casted)
else
return materialize(casted)
end
end
adjoint_value(x̄) = x̄
should_increment(::Any) = true
should_materialize_into(::Any) = false
#####
##### miscellanous defaults
#####
# TODO: More defaults, obviously!
markup(::Any) = Ignore()
markup(::Real) = RealScalar()
markup(::Complex) = ComplexScalar()
markup(x::Tuple{Vararg{<:Real}}) = RealTensor(layout(x))
markup(x::Tuple{Vararg{<:Complex}}) = ComplexTensor(layout(x))
markup(x::AbstractArray{<:Real}) = RealTensor(layout(x))
markup(x::AbstractArray{<:Complex}) = ComplexTensor(layout(x))
markup(x::AbstractArray) = error("Cannot infer domain of array from eltype", x)
layout(x::Tuple) = Layout(length(x), (length(x),), CPUDevice(), false)
layout(x::Array) = Layout(length(x), size(x), CPUDevice(), true)
should_materialize_into(::Array) = true
| [
27,
7856,
261,
480,
29,
73,
988,
1671,
324,
10711,
14,
35491,
37766,
13,
20362,
198,
4242,
2,
198,
4242,
2,
4600,
817,
2954,
63,
198,
4242,
2,
198,
198,
20285,
305,
294,
2954,
7,
2618,
8,
198,
220,
220,
220,
1441,
36147,
817,
29... | 2.392351 | 706 |
<filename>src/SignalsAPI.jl
module Signals
using JSON, HTTP, Dates, TimeZones
include("Utils.jl")
include("BaseAPI.jl")
export SignalsAPI,
get_leaderboard,
upload_predictions,
submission_status,
public_user_profile,
daily_user_performances,
daily_submissions_performances,
ticker_universe,
download_validation_data,
stake_get
const SIGNALS_DOM = "https://numerai-signals-public-data.s3-us-west-2.amazonaws.com"
const TOURNAMENT = 11
struct SignalsAPI <: BaseAPI
public_id::Union{String,Nothing}
secret_key::Union{String,Nothing}
tournament::Int
signals_dom::String
ticker_universe_url::String
historical_data_url::String
end
function SignalsAPI(public_id::Union{String,Nothing},
secret_key::Union{String,Nothing};
tournament=TOURNAMENT)
return SignalsAPI(public_id, secret_key, tournament, SIGNALS_DOM,
"$(SIGNALS_DOM)/latest_universe.csv",
"$(SIGNALS_DOM)/signals_train_val_bbg.csv")
end
"""
get_account(api::SignalsAPI)::Dict{String,Any}
Get all information about your account
"""
get_account(api::SignalsAPI)::Dict{String,Any} = _get_account(api)
"""
get_models(api::SignalsAPI)::Dict{String,String}
Get mapping of account model names to model ids for convenience
"""
get_models(api::SignalsAPI)::Dict{String,String} = _get_models(api)
"""
get_current_round(api::SignalsAPI)::Union{Real,Nothing}
Get number of the current active round
"""
get_current_round(api::SignalsAPI)::Union{Real,Nothing} = _get_current_round(api)
"""
get_account_transactions(api::SignalsAPI)::Dict{String,Vector}
Get all your account deposits and withdrawals
"""
get_account_transactions(api::SignalsAPI)::Dict{String,Vector} = _get_account_transactions(api)
"""
get_leaderboard(api::SignalsAPI; limit::Int=50, offset::Int=0)::Vector{Dict}
Get the current Numerai Signals leaderboard
# Arguments
- `limit::Int=50`: number of items to return (optional, defaults to 50)
- `offset::Int=0`: number of items to skip (optional, defaults to 0)
# Example
```julia-repl
julia> get_leaderboard(signal_api)
```
"""
function get_leaderboard(api::SignalsAPI; limit::Int=50, offset::Int=0)::Vector{Dict}
query = """
query(\$limit: Int! \$offset: Int!) {
signalsLeaderboard(limit: \$limit offset: \$offset) {
prevRank
rank
sharpe
today
username
mmc
mmcRank
nmrStaked
}
}
"""
variables = Dict( "limit" => limit, "offset" => offset )
data = raw_query(api, query, variables=variables)["data"]["signalsLeaderboard"]
return data
end
"""
upload_predictions(api::SignalsAPI, file_path::String;
model_id::Union{String,Nothing}=nothing)::String
Upload predictions from file
"""
function upload_predictions(api::SignalsAPI, file_path::String;
model_id::Union{String,Nothing}=nothing)::String
@info "uploading predictions..."
auth_query = """
query(\$filename: String! \$modelId: String) {
submissionUploadSignalsAuth(filename: \$filename modelId: \$modelId) {
filename
url
}
}
"""
variables = Dict( "filename" => basename(file_path), "modelId" => model_id )
submission_resp = raw_query(api, auth_query, variables=variables, authorization=true)
submission_auth = submission_resp["data"]["submissionUploadSignalsAuth"]
headers = haskey(ENV, "NUMERAI_COMPUTE_ID") ? ["x_compute_id"=>ENV["NUMERAI_COMPUTE_ID"],] : []
open(file_path, "r") do io
HTTP.request("PUT", submission_auth["url"], headers, read(io))
end
create_query = """
mutation(\$filename: String! \$modelId: String) {
createSignalsSubmission(filename: \$filename modelId: \$modelId) {
id
firstEffectiveDate
}
}
"""
variables = Dict( "filename" => submission_auth["filename"], "modelId" => model_id )
create = raw_query(api, create_query, variables=variables, authorization=true)
return create["data"]["createSignalsSubmission"]["id"]
end
"""
submission_status(api::SignalsAPI; model_id::Union{String,Nothing}=nothing)::Dict
Submission status of the last submission associated with the account
"""
function submission_status(api::SignalsAPI; model_id::Union{String,Nothing}=nothing)::Vector{Dict}
query = """
query(\$modelId: String) {
model(modelId: \$modelId) {
latestSignalsSubmission {
id
filename
firstEffectiveDate
userId
submissionIp
submittedCount
filteredCount
invalidTickers
hasHistoric
historicMean
historicStd
historicSharpe
historicMaxDrawdown
}
}
}
"""
variables = Dict( "modelId" => model_id )
data = raw_query(api, query, variables=variables, authorization=true)
return data["data"]["model"]["latestSignalsSubmission"]
end
"""
public_user_profile(api::SignalsAPI, username::String)::Dict
Fetch the public Numerai Signals profile of a user
"""
function public_user_profile(api::SignalsAPI, username::String)::Dict
query = """
query(\$username: String!) {
signalsUserProfile(username: \$username) {
rank
id
startDate
username
bio
sharpe
totalStake
}
}
"""
variables = Dict( "username" => username )
profile = raw_query(api, query, variables=variables)["data"]["signalsUserProfile"]
profile["startDate"] = parse_datetime_string(profile["startDate"])
profile["totalStake"] = parse_float_string(profile["totalStake"])
return profile
end
"""
daily_user_performances(api::SignalsAPI, username::String)::Vector{Dict}
Fetch daily Numerai Signals performance of a user
"""
function daily_user_performances(api::SignalsAPI, username::String)::Vector{Dict}
query = """
query(\$username: String!) {
signalsUserProfile(username: \$username) {
dailyUserPerformances {
rank
date
sharpe
mmcRep
reputation
}
}
}
"""
variables = Dict( "username" => username )
data = raw_query(api, query, variables=variables)["data"]["signalsUserProfile"]
performances = data["dailyUserPerformances"]
for perf in performances
perf["date"] = parse_datetime_string(perf["date"])
end
return performances
end
"""
daily_submissions_performances(api::SignalsAPI, username::String)::Vector{Dict}
Fetch daily Numerai Signals performance of a user's submissions
"""
function daily_submissions_performances(api::SignalsAPI, username::String)::Vector{Dict}
query = """
query(\$username: String!) {
signalsUserProfile(username: \$username) {
dailySubmissionPerformances {
date
returns
submissionTime
correlation
mmc
roundNumber
corrRep
mmcRep
}
}
}
"""
variables = Dict( "username" => username )
data = raw_query(api, query, variables=variables)["data"]["signalsUserProfile"]
performances = data["dailySubmissionPerformances"]
for perf in performances
perf["date"] = parse_datetime_string(perf["date"])
perf["submissionTime"] = parse_datetime_string(perf["submissionTime"])
end
performances = filter(perf -> !isnothing(perf["date"]), performances)
sort!(performances, by=x -> (x["roundNumber"], x["date"]), rev=true)
return performances
end
"""
ticker_universe(api::SignalsAPI)::Vector{String}
Fetch universe of accepted tickers
"""
function ticker_universe(api::SignalsAPI)::Vector{String}
result = HTTP.request("GET", api.ticker_universe_url)
tickers = String(result.body)
tickers = split(tickers, "\n")
tickers = [strip(ticker) for ticker in tickers
if (strip(ticker) ≠ "bloomberg_ticker") & (strip(ticker) ≠ "")]
return tickers
end
"""
download_validation_data(api::SignalsAPI;
dest_path::String=".",
dest_filename::Union{String,Nothing}=nothing,
show_progress_bar::Bool=true)::String
Download CSV file with historical targets and ticker universe
"""
function download_validation_data(api::SignalsAPI;
dest_path::String=".",
dest_filename::Union{String,Nothing}=nothing,
show_progress_bar::Bool=true)::String
if isnothing(dest_filename)
dest_filename = "numerai_signals_historical.csv"
end
dataset_path = joinpath(dest_path, dest_filename)
ensure_directory_exists(dest_path)
download_file(api.historical_data_url, dataset_path, show_progress_bar=show_progress_bar)
return dataset_path
end
"""
stake_get(api::SignalsAPI, username::String)::Real
Get current stake for a given users
"""
function stake_get(api::SignalsAPI, username::String)::Real
data = public_user_profile(api, username)
return data["totalStake"]
end
end # module | [
27,
34345,
29,
10677,
14,
11712,
874,
17614,
13,
20362,
198,
21412,
5865,
874,
198,
220,
220,
198,
220,
1262,
19449,
11,
14626,
11,
44712,
11,
3862,
57,
1952,
198,
220,
220,
198,
220,
2291,
7203,
18274,
4487,
13,
20362,
4943,
198,
2... | 2.334294 | 4,164 |
using GLVisualize, GeometryTypes, GLAbstraction
using Colors, Reactive, FileIO
if !isdefined(:runtests)
window = glscreen()
timesignal = bounce(linspace(0f0, 1f0, 360))
end
description = """
Showing off the flexibility of the particle system by animating
all kind of atributes for an arbitrary mesh as the particle.
"""
cat = GLNormalMesh(loadasset("cat.obj"))
sphere = GLNormalMesh(Sphere{Float32}(Vec3f0(0), 1f0), 12)
function scale_gen(v0, nv)
l = length(v0)
@inbounds for i=1:l
v0[i] = Vec3f0(1,1,sin((nv*l)/i))/2
end
v0
end
function color_gen(v0, t)
l = length(v0)
@inbounds for x=1:l
v0[x] = RGBA{N0f8}(x/l,(cos(t)+1)/2,(sin(x/l/3)+1)/2.,1.)
end
v0
end
t = const_lift(x->x+0.1, timesignal)
ps = sphere.vertices
scale_start = Vec3f0[Vec3f0(1,1,rand()) for i=1:length(ps)]
scale = foldp(scale_gen, scale_start, t)
colorstart = color_gen(zeros(RGBA{N0f8}, length(ps)), value(t))
color = foldp(color_gen, colorstart, t)
rotation = sphere.normals
cats = visualize((cat, ps), scale = scale, color = color, rotation = rotation)
_view(cats, window)
if !isdefined(:runtests)
renderloop(window)
end
| [
3500,
10188,
36259,
1096,
11,
2269,
15748,
31431,
11,
10188,
4826,
301,
7861,
198,
3500,
29792,
11,
797,
5275,
11,
9220,
9399,
198,
361,
5145,
271,
23211,
7,
25,
81,
2797,
3558,
8,
198,
220,
220,
220,
4324,
796,
1278,
9612,
3419,
19... | 2.184783 | 552 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.