content stringlengths 6 1.03M | input_ids listlengths 4 535k | ratio_char_token float64 0.68 8.61 | token_count int64 4 535k |
|---|---|---|---|
<reponame>anthofflab/paper-2021-scch4
#-------------------------------------------------------------------------------
# This function creates an instance of SNEASY+FUND.
#-------------------------------------------------------------------------------
# Load required packages.
using Mimi
using MimiFUND
using MimiSNEASY
using DataFrames
using CSVFiles
function create_sneasy_fundch4(;rcp_scenario::String="RCP85", start_year::Int=1765, end_year::Int=2300, etminan_ch4_forcing::Bool=true)
# ---------------------------------------------
# Load and clean up necessary data.
# ---------------------------------------------
# Load RCP emissions and concentration scenario values (RCP options = "RCP26" and "RCP85").
rcp_emissions = DataFrame(load(joinpath(@__DIR__, "..", "..", "data", "model_data", rcp_scenario*"_emissions.csv"), skiplines_begin=36))
rcp_concentrations = DataFrame(load(joinpath(@__DIR__, "..", "..", "data", "model_data", rcp_scenario*"_concentrations.csv"), skiplines_begin=37))
rcp_forcing = DataFrame(load(joinpath(@__DIR__, "..", "..", "data", "model_data", rcp_scenario*"_midyear_radforcings.csv"), skiplines_begin=58))
# Load FUND data for RCP non-methane tropospheric ozone radiative forcing.
fund_non_CH₄_O₃forcing = DataFrame(load(joinpath(@__DIR__, "..", "..", "data", "model_data", rcp_scenario*"_nonch4_tropo3_forcing_fund.csv")))
# Find start and end year indices to crop RCP scenario data to correct model time horizon.
rcp_indices = findall((in)(collect(start_year:end_year)), rcp_emissions.YEARS)
# Set pre-industrial atmospheric CO₂, CH₄, and N₂O concentrations to RCP values in 1765.
CO₂_0 = rcp_concentrations[rcp_concentrations.YEARS .== 1765, :CO2][1]
CH₄_0 = rcp_concentrations[rcp_concentrations.YEARS .== 1765, :CH4][1]
N₂O_0 = rcp_concentrations[rcp_concentrations.YEARS .== 1765, :N2O][1]
# Calculate carbon dioxide emissions as well as aerosol and exogenous RCP radiative forcing scenarios.
rcp_co2_emissions = rcp_emissions.FossilCO2 .+ rcp_emissions.OtherCO2
rcp_aerosol_forcing = rcp_forcing.TOTAER_DIR_RF .+ rcp_forcing.CLOUD_TOT_RF
rcp_exogenous_forcing = rcp_forcing.TOTAL_INCLVOLCANIC_RF .- rcp_forcing.CO2_RF .- rcp_forcing.CH4_RF .- rcp_forcing.TROPOZ_RF .+ fund_non_CH₄_O₃forcing.nonch4_forcing .- rcp_forcing.CH4OXSTRATH2O_RF .- rcp_aerosol_forcing
# ------------------------------------------------------------
# Initialize Mimi-SNEASY and add new CH₄ components to model.
# ------------------------------------------------------------
# Get an instance of Mimi-SNEASY.
m = MimiSNEASY.get_model(start_year=start_year, end_year=end_year)
# Remove old radiative forcing components.
delete!(m, :rfco2)
delete!(m, :radiativeforcing)
# Add in new components.
add_comp!(m, rf_total, before = :doeclim)
add_comp!(m, rf_co2_etminan, before = :rf_total)
add_comp!(m, rf_ch4_total_fund, before = :rf_co2_etminan)
add_comp!(m, MimiFUND.climatech4cycle , before= :rf_ch4_total_fund)
# Add in user-specified CH₄ radiative forcing component.
# Note: If not using Etminan et al. equations, use original forcing equations from parent CH₄ model.
if etminan_ch4_forcing == true
add_comp!(m, rf_ch4_etminan, before = :rf_ch4_total_fund)
else
add_comp!(m, rf_ch4_direct_fund, before = :rf_ch4_total_fund)
end
# ---------------------------------------------
# Set component parameters.
# ---------------------------------------------
# ---- Common parameters ---
Mimi.set_external_param!(m, :CH₄_0, CH₄_0)
Mimi.set_external_param!(m, :N₂O_0, N₂O_0)
Mimi.set_external_param!(m, :N₂O, rcp_concentrations.N2O[rcp_indices], param_dims=[:time])
# ---- Carbon Cycle ---- #
update_param!(m, :CO2_emissions, rcp_co2_emissions[rcp_indices])
update_param!(m, :atmco20, CO₂_0)
# ---- Methane Cycle ---- #
set_param!(m, :climatech4cycle, :lifech4, 12.0)
set_param!(m, :climatech4cycle, :ch4pre, CH₄_0)
set_param!(m, :climatech4cycle, :globch4, rcp_emissions.CH4[rcp_indices])
set_param!(m, :climatech4cycle, :acch4_0, CH₄_0)
# ---- Direct Methane Radiative Forcing ---- #
if etminan_ch4_forcing == true
connect_param!(m, :rf_ch4_etminan, :CH₄_0, :CH₄_0)
connect_param!(m, :rf_ch4_etminan, :N₂O_0, :N₂O_0)
set_param!(m, :rf_ch4_etminan, :scale_CH₄, 1.0)
set_param!(m, :rf_ch4_etminan, :a₃, -1.3e-6)
set_param!(m, :rf_ch4_etminan, :b₃, -8.2e-6)
connect_param!(m, :rf_ch4_etminan, :N₂O, :N₂O)
else
connect_param!(m, :rf_ch4_direct_fund, :N₂O_0, :N₂O_0)
connect_param!(m, :rf_ch4_direct_fund, :CH₄_0, :CH₄_0)
set_param!(m, :rf_ch4_direct_fund, :scale_CH₄, 1.0)
end
# ---- Total Methane Radiative Forcing (including indirect effects) ---- #
connect_param!(m, :rf_ch4_total_fund, :CH₄_0, :CH₄_0)
set_param!(m, :rf_ch4_total_fund, :ϕ, 0.4)
# ---- Carbon Dioxide Radiative Forcing ---- #
set_param!(m, :rf_co2_etminan, :a₁, -2.4e-7)
set_param!(m, :rf_co2_etminan, :b₁, 7.2e-4)
set_param!(m, :rf_co2_etminan, :c₁, -2.1e-4)
set_param!(m, :rf_co2_etminan, :CO₂_0, CO₂_0)
connect_param!(m, :rf_co2_etminan, :N₂O_0, :N₂O_0)
connect_param!(m, :rf_co2_etminan, :N₂O, :N₂O)
set_param!(m, :rf_co2_etminan, :rf_scale_CO₂, co2_rf_scale(3.7, CO₂_0, N₂O_0))
# ---- Total Radiative Forcing ---- #
set_param!(m, :rf_total, :α, 1.0)
# TODO It would be nice if `rf_aerosol` didn't exist as an external parameter
# at this point
connect_param!(m, :rf_total, :rf_aerosol, :rf_aerosol)
update_param!(m, :rf_aerosol, rcp_aerosol_forcing[rcp_indices])
set_param!(m, :rf_total, :rf_exogenous, rcp_exogenous_forcing[rcp_indices])
set_param!(m, :rf_total, :rf_O₃, zeros(length(start_year:end_year)))
set_param!(m, :rf_total, :rf_CH₄_H₂O, zeros(length(start_year:end_year)))
# ----------------------------------------------------------
# Create connections between Mimi SNEASY+Hector components.
# ----------------------------------------------------------
connect_param!(m, :doeclim, :forcing, :rf_total, :total_forcing)
connect_param!(m, :ccm, :temp, :doeclim, :temp)
connect_param!(m, :rf_co2_etminan, :CO₂, :ccm, :atmco2)
connect_param!(m, :rf_ch4_total_fund, :CH₄, :climatech4cycle, :acch4)
# Create different connections if using updated Etminan et al. CH₄ forcing equations.
if etminan_ch4_forcing == true
connect_param!(m, :rf_ch4_etminan, :CH₄, :climatech4cycle, :acch4)
connect_param!(m, :rf_ch4_total_fund, :rf_ch4_direct, :rf_ch4_etminan, :rf_CH₄)
else
connect_param!(m, :rf_ch4_direct_fund, :CH₄, :climatech4cycle, :acch4)
connect_param!(m, :rf_ch4_total_fund, :rf_ch4_direct, :rf_ch4_direct_fund, :rf_ch4_direct)
end
connect_param!(m, :rf_total, :rf_CH₄, :rf_ch4_total_fund, :rf_ch4_total)
connect_param!(m, :rf_total, :rf_CO₂, :rf_co2_etminan, :rf_CO₂)
# Return constructed model.
return m
end
| [
27,
7856,
261,
480,
29,
29313,
2364,
23912,
14,
20189,
12,
1238,
2481,
12,
1416,
354,
19,
198,
2,
10097,
24305,
198,
2,
770,
2163,
8075,
281,
4554,
286,
11346,
36,
26483,
10,
42296,
35,
13,
198,
2,
10097,
24305,
198,
198,
2,
8778,... | 2.201095 | 3,287 |
# This file was generated, do not modify it.
import MLJ: schema, std, mean, median, coerce, coerce!, scitype
using DataFrames
using UrlDownload
using PyPlot
ioff() # hide
raw_data = urldownload("https://github.com/tlienart/DataScienceTutorialsData.jl/blob/master/data/wri_global_power_plant_db_be_022020.csv?raw=true")
data = DataFrame(raw_data);
schema(data)
is_active(col) = !occursin(r"source|generation", string(col))
active_cols = [col for col in names(data) if is_active(col)]
select!(data, active_cols);
select!(data, Not([:wepp_id, :url, :owner]))
schema(data)
describe(data)
# The describe() function shows that there are several features with missing values.
capacity = select(data, [:country, :primary_fuel, :capacity_mw]);
first(capacity, 5)
cap_gr = groupby(capacity, [:country, :primary_fuel]);
cap_mean = combine(cap_gr, :capacity_mw => mean)
cap_sum = combine(cap_gr, :capacity_mw => sum)
first(cap_sum, 3)
ctry_selec = r"BEL|FRA|DEU"
tech_selec = r"Solar"
cap_sum_plot = cap_sum[occursin.(ctry_selec, cap_sum.country) .& occursin.(tech_selec, cap_sum.primary_fuel), :]
sort!(cap_sum_plot, :capacity_mw_sum, rev=true)
figure(figsize=(8,6))
plt.bar(cap_sum_plot.country, cap_sum_plot.capacity_mw_sum, width=0.35)
plt.xticks(rotation=90)
savefig(joinpath(@OUTPUT, "D0-processing-g1.svg")) # hide
cap_sum_ctry_gd = groupby(capacity, [:country]);
cap_sum_ctry = combine(cap_sum_ctry_gd, :capacity_mw => sum);
cap_sum = DataFrame(cap_sum);
cap_sum_ctry = DataFrame(cap_sum_ctry);
cap_share = leftjoin(cap_sum, cap_sum_ctry, on = :country, makeunique = true)
cap_share.capacity_mw_share = cap_share.capacity_mw_sum ./ cap_share.capacity_mw_sum_1;
nMissings = length(findall(x -> ismissing(x), data.commissioning_year))
nMissings_share = nMissings/size(data)[1]
typeof(data.commissioning_year)
data_nmiss = dropmissing(data, :commissioning_year);
map!(x -> round(x, digits=0), data_nmiss.commissioning_year, data_nmiss.commissioning_year);
# We can now calculate plant age for each plant (worth remembering that the dataset only contains active plants)
current_year = fill!(Array{Float64}(undef, size(data_nmiss)[1]), 2020);
data_nmiss[:, :plant_age] = current_year - data_nmiss[:, :commissioning_year];
mean_age = mean(skipmissing(data_nmiss.plant_age))
median_age = median(skipmissing(data_nmiss.plant_age))
figure(figsize=(8,6))
plt.hist(data_nmiss.plant_age, color="blue", edgecolor="white", bins=100,
density=true, alpha=0.5)
plt.axvline(mean_age, label = "Mean", color = "red")
plt.axvline(median_age, label = "Median")
plt.legend()
plt.xlim(0,)
savefig(joinpath(@OUTPUT, "D0-processing-g2.svg")) # hide
age = select(data_nmiss, [:country, :primary_fuel, :plant_age])
age_mean = combine(groupby(age, [:country, :primary_fuel]), :plant_age => mean)
coal_means = age_mean[occursin.(ctry_selec, age_mean.country) .& occursin.(r"Coal", age_mean.primary_fuel), :]
gas_means = age_mean[occursin.(ctry_selec, age_mean.country) .& occursin.(r"Gas", age_mean.primary_fuel), :]
width = 0.35 # the width of the bars
fig, (ax1, ax2) = plt.subplots(1,2)
fig.suptitle("Mean plant age by country and technology")
ax1.bar(coal_means.country, coal_means.plant_age_mean, width, label="Coal")
ax2.bar(gas_means.country, gas_means.plant_age_mean, width, label="Gas")
ax1.set_ylabel("Age")
ax1.set_title("Coal")
ax2.set_title("Gas")
savefig(joinpath(@OUTPUT, "D0-processing-g3.svg")) # hide
| [
2,
770,
2393,
373,
7560,
11,
466,
407,
13096,
340,
13,
198,
198,
11748,
10373,
41,
25,
32815,
11,
14367,
11,
1612,
11,
14288,
11,
31255,
344,
11,
31255,
344,
28265,
629,
414,
431,
198,
3500,
6060,
35439,
198,
3500,
8799,
75,
10002,
... | 2.502187 | 1,372 |
abstract type CompilerHint end
abstract type ProgramStructureHint <: CompilerHint end
abstract type AddressingHint <: CompilerHint end
include("static/kernel_hint.jl")
include("static/switch_hint.jl")
include("static/dynamic_address_hint.jl")
| [
397,
8709,
2099,
3082,
5329,
39,
600,
886,
198,
397,
8709,
2099,
6118,
1273,
5620,
39,
600,
1279,
25,
3082,
5329,
39,
600,
886,
198,
397,
8709,
2099,
3060,
11697,
39,
600,
1279,
25,
3082,
5329,
39,
600,
886,
198,
198,
17256,
7203,
... | 3.05 | 80 |
module ConvDiffMIPDECO
using jInv.Mesh
using jInv.ForwardShare
using jInv.Utils
using jInv.LinearSolvers
using jInv.InverseSolve
using KrylovMethods
using LinearAlgebra
using SparseArrays
using Printf
using DSP
function getBICGSTB(;PC=:jac,maxIter=1000,out=0,tol=1e-10)
bicg = (A,b; M=identity,tol=1e-10,maxIter=500,out=1)->
bicgstb(A,b,M1=identity,tol=tol,maxIter=maxIter,out=out,tolRho=1e-60)
return getIterativeSolver(bicg,PC=PC,maxIter=maxIter,out=out,tol=tol)
end
# files are organized in the src (containing functions) and test (containing some unit tests)
import jInv.ForwardShare.ForwardProbType
export ConvDiffParam, getConvDiffParam
"""
type ConvDiffParam <: ForwardProbType
description of stationary convection diffusion forward problem
(- sig*Laplacian + v dot GRAD) u = f
with boundary conditions du/dn = gn on Omega1, u=gd on Omega2
Construct an instance using getConvDiffParam(M,v,kwargs...)
Fields:
M - describes computational mesh
v - velocities at cell-centers
sig - scalar, >0
bc - vector describing boundary conditions
Fields - stores fields, i.e., u
Ainv - factorization of PDE
"""
mutable struct ConvDiffParam <: ForwardProbType
M :: RegularMesh
A :: SparseMatrixCSC{Float64}
P :: AbstractArray{Float64}
sig::Float64
bc::Array{Float64}
Fields::Array{Float64,1}
Ainv::AbstractSolver
end
"""
function getConvDiffParam(M,v)
constructs and returns ConvDiffParam
The PDE is discretized and factorized and stored in field Ainv.
Inputs:
M - mesh
Keyword arguments:
v - velocity, (vector or function)
gd::Function - Dirichlet boundary conditions
gn::Function - Neuman boundary condition
P - measurement operator
sig - viscosity
bc - description of boundary conditions
Fields - storing PDE solutions
Ainv - description of linear solver
"""
function getConvDiffParam(M::RegularMesh,v;
gd::Function=X->zeros(size(X,1)), gn::Function=X->zeros(size(X,1)), P=Diagonal(ones(M.nc)),sig::Number=0.01,bc=(:dir,:neu,:neu,:neu),Fields=zeros(0),Ainv=getJuliaSolver())
# get boundary conditions
iddir, idneu, iddirn, idneun, idint = getBoundaryIndices(M,bc)
A, Adir, Aneu = getConvDiffMatrix(M,sig,v,iddir,idneu,iddirn,idneun,idint)
gdir, gneu = getBoundaryFuncVal(gd,gn,M,bc)
bc = -2*Adir*gdir + Aneu*gneu
return ConvDiffParam(M,A,P,sig,bc,Fields,Ainv)
end
include("getBoundaryCondition.jl")
include("getGhostIndices.jl")
include("getConvDiffMatrix.jl")
include("getBoundaryIndices.jl")
include("getBoundaryFuncVal.jl")
include("getDiffOps.jl")
include("getData.jl")
include("utils.jl")
include("ConvDiffFEMParam.jl")
include("FEM.jl")
include("getDataFEM.jl")
include("getConvDiffFEMConstraintsAMPL.jl")
include("mipdecoHeuristic.jl")
include("rounding.jl")
include("dilation.jl")
include("regularizers.jl")
end
| [
21412,
34872,
28813,
44,
4061,
41374,
46,
198,
198,
3500,
474,
19904,
13,
37031,
198,
3500,
474,
19904,
13,
39746,
11649,
198,
3500,
474,
19904,
13,
18274,
4487,
198,
3500,
474,
19904,
13,
14993,
451,
36949,
690,
198,
3500,
474,
19904,
... | 2.539146 | 1,124 |
module TestAcquisition
using Test
using LinearAlgebra
using GaussianDistributions
using CovarianceFunctions
const Kernel = CovarianceFunctions
using SARA: ucb, inner_sampling, random_sampling, uncertainty_sampling,
integrated_uncertainty_sampling
@testset "acquisition" begin
l = 1/2
k = Kernel.Lengthscale(Kernel.EQ(), l)
G = Gaussian(sin, k)
n = 256
@testset "random sampling" begin
x = randn(n)
@test random_sampling(G, x) isa Real
@test random_sampling(G, x) in x
end
@testset "upper confidence bound" begin
# test on unconditioned GP
x = range(0, π, length = n)
α = 3.
xi = ucb(G, x, α)
@test isapprox(xi, π/2, atol = 5e-2)
# create 2d synthetic data and condition
l = 1/2
k = Kernel.Lengthscale(Kernel.EQ(), l)
G = Gaussian(k)
f(x) = sum(sin, x)
x = [randn(2) for _ in 1:n]
σ = .01
y = @. f(x) + σ*randn()
C = G | (x, y, σ^2)
# test ucb sampling on conditioned process
ns = 1024
xs = [randn(2) for _ in 1:ns]
x0 = ucb(C, xs, 0.) # without uncertainty term, chooses point close to optimum
@test isapprox(f(x0), 2, atol = 3e-1)
xi = ucb(C, xs, α) # with uncertainty term,
@test f(x0) > f(xi) # trades off value with uncertainty
@test f(xi) > 1 # still chooses xi with moderately large value
end
@testset "integrated uncertainty sampling" begin
l = .1
k = Kernel.Lengthscale(Kernel.EQ(), l)
G = Gaussian(k)
# test on unconditioned GP
n = 32
x = range(0, π, length = n)
σ = 1e-2
y = @. sin(x) + σ * randn()
C = G | (x, y, σ^2)
m = 256
xs = range(0, π, length = m)
xi = integrated_uncertainty_sampling(G, xs, σ)
xk = uncertainty_sampling(G, xs)
xi = integrated_uncertainty_sampling(C, xs, σ)
xk = uncertainty_sampling(C, xs)
end
@testset "SARA inner loop sampling" begin
l = .1
k = Kernel.Lengthscale(Kernel.EQ(), l)
G = Gaussian(sin, k)
# test on unconditioned GP
n = 32
x = range(0, π, length = n)
σ = 1e-2
y = @. sin(x) + σ * randn()
C = G | (x, y, σ^2)
m = 256
xs = range(0, π, length = m)
xi = inner_sampling(G, xs, σ)
xi = inner_sampling(C, xs, σ)
# @test isapprox(xi, π/2, atol = 5e-2)
end
end
end
| [
21412,
6208,
12832,
421,
10027,
198,
3500,
6208,
198,
3500,
44800,
2348,
29230,
198,
3500,
12822,
31562,
20344,
2455,
507,
198,
3500,
39751,
2743,
590,
24629,
2733,
198,
9979,
32169,
796,
39751,
2743,
590,
24629,
2733,
198,
3500,
311,
244... | 1.957198 | 1,285 |
<reponame>angus-lewis/SFFM
using Plots, SFFM
# include("../../src/SFFM.jl")
cme_9 = SFFM.MakeME(SFFM.CMEParams[9])
f = SFFM.pdf(cme_9)
F(x) = 1 - SFFM.cdf(cme_9)(x)
x = 0:0.05:1.5
plot(x,f.(x), label = "α exp(Sz) s")
plot!(x,f.(x.+0.3)./F.(0.3), label = "α exp(S(0.3+z)) s/α exp(S 0.3) e")
plot!(x,f.(x.+0.6)./F.(0.6), label = "α exp(S(0.6+z)) s/α exp(S 0.6) e")
plot!(
xlabel = "z",
ylabel = "Density",
legend = :outertop,
)
savefig("examples/meNumerics/ME_residual_life_density.pdf") | [
27,
7856,
261,
480,
29,
648,
385,
12,
293,
86,
271,
14,
50,
5777,
44,
198,
3500,
1345,
1747,
11,
311,
5777,
44,
198,
198,
2,
2291,
7203,
40720,
40720,
10677,
14,
50,
5777,
44,
13,
20362,
4943,
198,
198,
66,
1326,
62,
24,
796,
... | 1.725086 | 291 |
<reponame>JuliaPackageMirrors/Polyhedra.jl
function simplextest{Lib<:PolyhedraLibrary}(lib::Lib)
A = [1 1; -1 0; 0 -1]
b = [1, 0, 0]
linset = IntSet([1])
V = [0 1; 1 0]
ine = SimpleHRepresentation(A, b, linset)
poly1 = polyhedron(ine, lib)
@test !isempty(poly1)
inequality_fulltest(poly1, A, b, linset)
generator_fulltest(poly1, V)
ext = SimpleVRepresentation(V)
poly2 = polyhedron(ext, lib)
@test !isempty(poly2)
inequality_fulltest(poly2, A, b, linset)
generator_fulltest(poly2, V)
# x_1 cannot be 2
@test isempty(polyhedron(SimpleHRepresentation([A; 1 0], [b; 2], union(linset, IntSet([4]))), lib))
# We now add the vertex (0, 0)
V0 = [0 0]
ext0 = SimpleVRepresentation(V0)
push!(poly1, ext0)
inequality_fulltest(poly1, A, b, IntSet([]))
generator_fulltest(poly1, [V0; V])
push!(poly2, ext0)
inequality_fulltest(poly2, A, b, IntSet([]))
generator_fulltest(poly2, [V0; V])
# nonnegative orthant cut by x_1 + x_2 = 1
Vray = [1 0; 0 1]
extray = SimpleVRepresentation(Matrix{Int}(0,2), Vray)
poly3 = polyhedron(extray, lib)
Acut = [1 1]
bcut = [1]
linsetcut = IntSet([1])
inecut = SimpleHRepresentation(Acut, bcut, linsetcut)
push!(poly3, inecut)
inequality_fulltest(poly3, A, b, linset)
generator_fulltest(poly3, V)
poly4 = project(poly1, [1; 0])
inequality_fulltest(poly4, [-1; 1], [0, 1], IntSet())
generator_fulltest(poly4, [0; 1], [])
#\
# \
# |\
# |_\
# \
Alin = [1 1]
blin = [1]
linsetlin = IntSet(1)
Vlin = [1 0]
Rlin = [1 -1]
inelin = SimpleHRepresentation([1 1; -1 -1], [1, -1], IntSet())
plin = polyhedron(inelin, lib)
inequality_fulltest(plin, Alin, blin, linsetlin)
generator_fulltest(plin, Vlin, Rlin, IntSet(), IntSet(1))
ineout = getinequalities(plin)
@test ineout.linset == IntSet(1)
Vlin = [1 0]
Rlin = [1 -1]
extlin = SimpleVRepresentation(Vlin, [1 -1; -1 1])
plin = polyhedron(extlin, lib)
inequality_fulltest(plin, Alin, blin, linsetlin)
generator_fulltest(plin, Vlin, Rlin, IntSet(), IntSet(1))
extout = SimpleVRepresentation(getgenerators(plin))
@test extout.Rlinset == IntSet(1)
end
| [
27,
7856,
261,
480,
29,
16980,
544,
27813,
27453,
5965,
14,
34220,
704,
430,
13,
20362,
198,
8818,
2829,
742,
395,
90,
25835,
27,
25,
34220,
704,
430,
23377,
92,
7,
8019,
3712,
25835,
8,
198,
220,
317,
796,
685,
16,
352,
26,
532,
... | 2.301826 | 931 |
<reponame>phyjonas/ImpuritySGPE<gh_stars>1-10
__precompile__
@everywhere module OneDim
using Random
using FFTW
include("helper.jl")
include("NewtonImp.jl")
include("SGPE.jl")
include("solver.jl")
include("modelA.jl")
include("modelA_fourier_galerkin.jl")
include("SGPE_fourier_galerkin.jl")
export NewtonImp,
phi_step_sgpe,
tuple_stencil_bc,
V_fct,
V_fct_pinned,
sampler_complex,
sampler,
solver,
phi_step_modelA,
observables,
phi_galerk_modelA,
phi_galerk_sgpe
end
| [
27,
7856,
261,
480,
29,
6883,
46286,
292,
14,
26950,
1684,
38475,
11401,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
834,
3866,
5589,
576,
834,
198,
198,
31,
16833,
3003,
8265,
1881,
29271,
198,
198,
3500,
14534,
198,
3500,
376,
9792,... | 2.154812 | 239 |
<filename>backend/anime_data/snapshots_10805.jl<gh_stars>1-10
{"score": 7.38, "score_count": 45917, "timestamp": 1562557262.0}
{"score": 7.39, "score_count": 44143, "timestamp": 1545775577.0}
{"score": 7.41, "score_count": 36841, "timestamp": 1492413902.0}
{"score": 7.42, "score_count": 34604, "timestamp": 1478750823.0}
{"score": 7.43, "score_count": 32502, "timestamp": 1466354767.0}
{"score": 7.44, "score_count": 31902, "timestamp": 1463150884.0}
{"score": 7.44, "score_count": 31473, "timestamp": 1460904298.0}
{"score": 7.44, "score_count": 31163, "timestamp": 1459491256.0}
{"score": 7.44, "score_count": 30969, "timestamp": 1458653345.0}
{"score": 7.44, "score_count": 30929, "timestamp": 1458371272.0}
{"score": 7.4, "score_count": 41503, "timestamp": 1522339569.0}
{"score": 7.43, "score_count": 32608, "timestamp": 1466869616.0}
{"score": 7.44, "score_count": 30090, "timestamp": 1453682311.0}
{"score": 7.44, "score_count": 31539, "timestamp": 1461254769.0}
{"score": 7.44, "score_count": 31792, "timestamp": 1462455699.0}
{"score": 7.44, "score_count": 31041, "timestamp": 1459020818.0}
{"score": 7.45, "score_count": 29149, "timestamp": 1449168737.0}
{"score": 7.42, "score_count": 34688, "timestamp": 1479314131.0}
| [
27,
34345,
29,
1891,
437,
14,
272,
524,
62,
7890,
14,
45380,
20910,
62,
24045,
20,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
4895,
26675,
1298,
767,
13,
2548,
11,
366,
26675,
62,
9127,
1298,
604,
3270,
1558,
11,
366,
... | 2.362764 | 521 |
#!/usr/bin/env julia
using Luxor, Random
Random.seed!(42)
using Test
function test_circular_arrows_1(pos)
gsave()
froma = rescale(rand(1:100), 1, 100, 0, 2pi)
toa = rescale(rand(1:100), (1, 100), (0, 2pi))
sethue("black")
arrow(pos, 100, froma, toa, linewidth=rand(1:6), arrowheadlength=rand(10:30))
text(string("from: ", round(rad2deg(froma), digits=1)), pos)
text(string("to: ", round(rad2deg(toa), digits=1)), pos.x, pos.y+10)
sethue("magenta")
arrow(pos, 100, toa, froma; linewidth=rand(1:6), arrowheadlength=rand(10:30))
text(string("from: ", round(rad2deg(toa), digits=1)), pos.x, pos.y+20)
text(string("to: ", round(rad2deg(froma), digits=1)), pos.x, pos.y+30)
grestore()
end
function test_circular_arrows_2(pos, w)
sethue("black")
for a in 50:10:w
randomhue()
starta = rand(0:pi/12:2pi)
finisha = rand(0:pi/12:2pi)
arrow(pos, a, starta, finisha, linewidth=rand(1:6), arrowheadlength=rand(1:20))
end
end
function arrow_arc_test(fname)
pagewidth, pageheight = 2000, 2000
Drawing(pagewidth, pageheight, fname)
origin() # move 0/0 to center
background(1, 1, 0.9, 1)
setopacity(0.5)
setline(2)
pagetiles = Tiler(pagewidth, pageheight, 4, 4, margin=50)
for (pos, n) in pagetiles
if isodd(n)
test_circular_arrows_1(pos)
else
test_circular_arrows_2(pos, pagetiles.tilewidth/2)
end
end
# test Bezier arrow
setopacity(1.0)
sethue("red")
arrow(pagetiles[1][1], pagetiles[3][1], pagetiles[6][1], pagetiles[7][1],
linewidth=5,
arrowheadlength=50,
arrowheadfill = false,
arrowheadangle=π/4,
startarrow=true)
@test finish() == true
println("...finished arrow-test: saved in $(fname)")
end
fname = "arrow-arctest.png"
arrow_arc_test(fname)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
474,
43640,
198,
198,
3500,
17145,
273,
11,
14534,
198,
198,
29531,
13,
28826,
0,
7,
3682,
8,
198,
198,
3500,
6208,
198,
198,
8818,
1332,
62,
21170,
934,
62,
6018,
82,
62,
16,
7,
1930,
8,
19... | 2.211634 | 808 |
function EventBasedManeuverTriggers(arg0::AbstractDetector, arg1::AbstractDetector)
return EventBasedManeuverTriggers((AbstractDetector, AbstractDetector), arg0, arg1)
end
function event_occurred(obj::EventBasedManeuverTriggers, arg0::SpacecraftState, arg1::EventDetector, arg2::jboolean)
return jcall(obj, "eventOccurred", Action, (SpacecraftState, EventDetector, jboolean), arg0, arg1, arg2)
end
function get_events_detectors(obj::EventBasedManeuverTriggers)
return jcall(obj, "getEventsDetectors", Stream, ())
end
function get_field_events_detectors(obj::EventBasedManeuverTriggers, arg0::Field)
return jcall(obj, "getFieldEventsDetectors", Stream, (Field,), arg0)
end
function get_start_firing_detector(obj::EventBasedManeuverTriggers)
return jcall(obj, "getStartFiringDetector", AbstractDetector, ())
end
function get_stop_firing_detector(obj::EventBasedManeuverTriggers)
return jcall(obj, "getStopFiringDetector", AbstractDetector, ())
end
function get_triggered_end(obj::EventBasedManeuverTriggers)
return jcall(obj, "getTriggeredEnd", AbsoluteDate, ())
end
function get_triggered_start(obj::EventBasedManeuverTriggers)
return jcall(obj, "getTriggeredStart", AbsoluteDate, ())
end
function init(obj::EventBasedManeuverTriggers, arg0::SpacecraftState, arg1::AbsoluteDate)
return jcall(obj, "init", void, (SpacecraftState, AbsoluteDate), arg0, arg1)
end
function is_firing(obj::EventBasedManeuverTriggers, arg0::AbsoluteDate)
return jcall(obj, "isFiring", jboolean, (AbsoluteDate,), arg0)
end
function is_firing(obj::EventBasedManeuverTriggers, arg0::AbsoluteDate, arg1::Vector{jdouble})
return jcall(obj, "isFiring", jboolean, (AbsoluteDate, Vector{jdouble}), arg0, arg1)
end
function is_firing(obj::EventBasedManeuverTriggers, arg0::FieldAbsoluteDate, arg1::Vector{RealFieldElement})
return jcall(obj, "isFiring", jboolean, (FieldAbsoluteDate, Vector{RealFieldElement}), arg0, arg1)
end
function reset_state(obj::EventHandler, arg0::EventDetector, arg1::SpacecraftState)
return jcall(obj, "resetState", SpacecraftState, (EventDetector, SpacecraftState), arg0, arg1)
end
function set_firing(obj::EventBasedManeuverTriggers, arg0::jboolean, arg1::AbsoluteDate)
return jcall(obj, "setFiring", void, (jboolean, AbsoluteDate), arg0, arg1)
end
| [
8818,
8558,
15001,
44,
1531,
84,
332,
2898,
328,
5355,
7,
853,
15,
3712,
23839,
11242,
9250,
11,
1822,
16,
3712,
23839,
11242,
9250,
8,
198,
220,
220,
220,
1441,
8558,
15001,
44,
1531,
84,
332,
2898,
328,
5355,
19510,
23839,
11242,
... | 2.789157 | 830 |
module StressTest
"""
dream(seconds)
Like Base.sleep() except maxes out the thread for a specified number of seconds. The minimum dream time is 1
millisecond or input of `0.001`.
"""
function dream(sec::Real)
sec ≥ 0 || throw(ArgumentError("cannot dream for $sec seconds"))
t = Timer(sec)
while isopen(t)
yield()
end
nothing
end
export dream
end # module
| [
21412,
36957,
14402,
198,
198,
37811,
198,
220,
220,
220,
4320,
7,
43012,
8,
198,
198,
7594,
7308,
13,
42832,
3419,
2845,
3509,
274,
503,
262,
4704,
329,
257,
7368,
1271,
286,
4201,
13,
383,
5288,
4320,
640,
318,
352,
198,
17805,
27... | 2.854015 | 137 |
"""
qubits(N::Int; mixed::Bool=false)
qubits(sites::Vector{<:Index}; mixed::Bool=false)
Initialize qubits to:
- An MPS wavefunction `|ψ⟩` if `mixed = false`
- An MPO density matrix `ρ` if `mixed = true`
"""
qubits(N::Int; mixed::Bool=false) = qubits(siteinds("Qubit", N); mixed=mixed)
function qubits(sites::Vector{<:Index}; mixed::Bool=false)
@warn "Method `qubits` is deprecated, use `productstate` or `productoperator` instead."
ψ = productMPS(sites, "0")
mixed && return MPO(ψ)
return ψ
end
"""
qubits(M::Union{MPS,MPO,LPDO}; mixed::Bool=false)
Initialize qubits on the Hilbert space of a reference state,
given as `MPS`, `MPO` or `LPDO`.
"""
qubits(M::Union{MPS,MPO,LPDO}; mixed::Bool=false) = qubits(hilbertspace(M); mixed=mixed)
"""
qubits(N::Int, states::Vector{String}; mixed::Bool=false)
qubits(sites::Vector{<:Index}, states::Vector{String};mixed::Bool = false)
Initialize the qubits to a given single-qubit product state.
"""
function qubits(N::Int, states::Vector{String}; mixed::Bool=false)
return qubits(siteinds("Qubit", N), states; mixed=mixed)
end
function qubits(sites::Vector{<:Index}, states::Vector{String}; mixed::Bool=false)
@warn "Method `qubits` is deprecated, use `productstate` or `productoperator` instead."
N = length(sites)
@assert N == length(states)
ψ = productMPS(sites, "0")
if N == 1
s1 = sites[1]
state1 = state(states[1])
if eltype(state1) <: Complex
ψ[1] = complex(ψ[1])
end
for j in 1:dim(s1)
ψ[1][s1 => j] = state1[j]
end
mixed && return MPO(ψ)
return ψ
end
# Set first site
s1 = sites[1]
l1 = linkind(ψ, 1)
state1 = state(states[1])
if eltype(state1) <: Complex
ψ[1] = complex(ψ[1])
end
for j in 1:dim(s1)
ψ[1][s1 => j, l1 => 1] = state1[j]
end
# Set sites 2:N-1
for n in 2:(N - 1)
sn = sites[n]
ln_1 = linkind(ψ, n - 1)
ln = linkind(ψ, n)
state_n = state(states[n])
if eltype(state_n) <: Complex
ψ[n] = complex(ψ[n])
end
for j in 1:dim(sn)
ψ[n][sn => j, ln_1 => 1, ln => 1] = state_n[j]
end
end
# Set last site N
sN = sites[N]
lN_1 = linkind(ψ, N - 1)
state_N = state(states[N])
if eltype(state_N) <: Complex
ψ[N] = complex(ψ[N])
end
for j in 1:dim(sN)
ψ[N][sN => j, lN_1 => 1] = state_N[j]
end
mixed && return MPO(ψ)
return ψ
end
| [
198,
37811,
198,
220,
220,
220,
627,
9895,
7,
45,
3712,
5317,
26,
7668,
3712,
33,
970,
28,
9562,
8,
198,
220,
220,
220,
220,
198,
220,
220,
220,
627,
9895,
7,
49315,
3712,
38469,
90,
27,
25,
15732,
19629,
7668,
3712,
33,
970,
28... | 2.213556 | 1,077 |
<reponame>americast/GPUArrays.jl<filename>src/fft.jl
import CLFFT
# figure out a gc safe way to store plans.
# weak refs won't work, since the caching should keep them alive.
# But at the end, we need to free all of these, otherwise CLFFT will crash
# at closing time.
# An atexit hook here, which will empty the dictionary seems to introduce racing
# conditions.
#const plan_dict = Dict()
import Base: *, plan_ifft!, plan_fft!, plan_fft, plan_ifft, size, plan_bfft, plan_bfft!
struct CLFFTPlan{Direction, Inplace, T, N} <: Base.FFTW.FFTWPlan{T, Direction, Inplace}
plan::CLFFT.Plan{T}
function CLFFTPlan{Direction, Inplace}(A::CLArray{T, N}) where {T, N, Direction, Inplace}
ctx = context(A)
p = CLFFT.Plan(T, ctx.context, size(A))
CLFFT.set_layout!(p, :interleaved, :interleaved)
if Inplace
CLFFT.set_result!(p, :inplace)
else
CLFFT.set_result!(p, :outofplace)
end
CLFFT.set_scaling_factor!(p, Direction, 1f0)
CLFFT.bake!(p, ctx.queue)
new{Direction, Inplace, T, N}(p)
end
end
size(x::CLFFTPlan) = (CLFFT.lengths(x.plan)...,)
# ignore flags, but have them to make it base compatible.
# TODO can we actually implement the flags?
function plan_fft(A::CLArray; flags = nothing, timelimit = Inf)
CLFFTPlan{:forward, false}(A)
end
function plan_fft!(A::CLArray; flags = nothing, timelimit = Inf)
CLFFTPlan{:forward, true}(A)
end
function plan_bfft(A::CLArray, region; flags = nothing, timelimit = Inf)
CLFFTPlan{:backward, false}(A)
end
function plan_bfft!(A::CLArray, region; flags = nothing, timelimit = Inf)
CLFFTPlan{:backward, true}(A)
end
const _queue_ref = Vector{cl.CmdQueue}(1)
function *(plan::CLFFTPlan{Direction, true, T, N}, A::CLArray{T, N}) where {T, N, Direction}
_queue_ref[] = context(A).queue
CLFFT.enqueue_transform(plan.plan, Direction, _queue_ref, buffer(A), nothing)
A
end
function *(plan::CLFFTPlan{Direction, false, T, N}, A::CLArray{T, N}) where {T, N, Direction}
_queue_ref[] = context(A).queue
y = typeof(A)(size(plan))
CLFFT.enqueue_transform(plan.plan, Direction, _queue_ref, buffer(A), buffer(y))
y
end
| [
27,
7856,
261,
480,
29,
2382,
291,
459,
14,
33346,
3163,
20477,
13,
20362,
27,
34345,
29,
10677,
14,
487,
83,
13,
20362,
198,
11748,
7852,
5777,
51,
198,
198,
2,
3785,
503,
257,
308,
66,
3338,
835,
284,
3650,
3352,
13,
198,
2,
4... | 2.372017 | 922 |
<gh_stars>0
include("myfile.jl")
| [
27,
456,
62,
30783,
29,
15,
198,
17256,
7203,
1820,
7753,
13,
20362,
4943,
198
] | 2.2 | 15 |
# Stubs - Can be used as references
struct Account <: QBObject end
struct ItemBasedExpenseLineDetail; end
struct Employee <: QBObject end
struct Vendor <: QBObject end
struct Customer <: QBObject end
struct Item <: QBObject end
struct Company <: QBObject
Id::Maybe{Int}
end
from_json(::Type{ItemBasedExpenseLineDetail}, data) = ItemBasedExpenseLineDetail()
# Class
struct Class <: QBObject
Id::Maybe{Int64}
SyncToken::Maybe{Int64}
Parent::Optional{QboRef{Class}}
Name::Maybe{String}
Active::Maybe{Bool}
end
@eval @make_default_methods $Class
# PaymentMethod
mutable struct PaymentMethod
Id::Maybe{Int}
SyncToken::Maybe{Int}
Name::Maybe{String}
Type::Maybe{String}
end
@eval @make_default_methods $PaymentMethod
### Purchases
struct AccountBasedExpenseLineDetail
Account::QboRef{Account}
Class::Optional{QboRef{Class}}
BillableStatus::String
end
@eval @make_default_methods $AccountBasedExpenseLineDetail
const DetailType = Union{ItemBasedExpenseLineDetail,
AccountBasedExpenseLineDetail}
struct Line
Id::Maybe{Int}
Amount::Maybe{Decimal}
Detail::DetailType
end
Line(Amount::Decimal, Detail::DetailType) = Line(missing, Amount, Detail)
@eval from_json(::Type{Line}, data) = @from_json $Line $(Dict(
:Detail => quote
if data["DetailType"] == "AccountBasedExpenseLineDetail"
from_json(AccountBasedExpenseLineDetail,
data["AccountBasedExpenseLineDetail"])
elseif data["DetailType"] == "ItemBasedExpenseLineDetail"
from_json(ItemBasedExpenseLineDetail,
data["ItemBasedExpenseLineDetail"])
end
end
))
@eval to_json(data::Line) = @to_json $Line $(Dict(
:Detail => quote
if isa(data.Detail, AccountBasedExpenseLineDetail)
ret["DetailType"] = "AccountBasedExpenseLineDetail"
ret["AccountBasedExpenseLineDetail"] = to_json(data.Detail)
elseif isa(data.Detail, ItemBasedExpenseLineDetail)
ret["DetailType"] = "ItemBasedExpenseLineDetail"
ret["ItemBasedExpenseLineDetail"] = to_json(data.Detail)
end
end
))
mutable struct Purchase <: QBObject
Id::Maybe{Int}
SyncToken::Maybe{Int}
Account::Maybe{QboRef{Account}}
PaymentMethod::Optional{QboRef{PaymentMethod}}
PaymentType::Maybe{String}
Entity::Maybe{QboRef{<:QBObject}}
TxnDate::Maybe{Date}
Lines::Vector{Line}
end
@eval @make_default_methods $Purchase $(Dict(
:Lines => "Line"
))
# Type Mapping
function key_to_type(key::String)
Dict{String, Type}(
"Purchase" => Purchase,
"Class" => Class,
"Employee" => Employee,
"PaymentMethod" => PaymentMethod
)[key]
end | [
2,
520,
23161,
532,
1680,
307,
973,
355,
10288,
198,
7249,
10781,
1279,
25,
16135,
10267,
886,
198,
7249,
9097,
15001,
16870,
1072,
13949,
11242,
603,
26,
886,
198,
7249,
36824,
1279,
25,
16135,
10267,
886,
198,
7249,
39896,
1279,
25,
... | 2.480144 | 1,108 |
function area_balance(
psi_container::PSIContainer,
expression::Symbol,
area_mapping::Dict{String, Array{PSY.Bus, 1}},
branches,
)
time_steps = model_time_steps(psi_container)
remove_undef!(psi_container.expressions[expression])
nodal_net_balance = psi_container.expressions[expression]
constraint_bal = JuMPConstraintArray(undef, keys(area_mapping), time_steps)
participation_assignment_up = JuMPConstraintArray(undef, keys(area_mapping), time_steps)
participation_assignment_dn = JuMPConstraintArray(undef, keys(area_mapping), time_steps)
assign_constraint!(psi_container, "area_dispatch_balance", constraint_bal)
area_balance = get_variable(psi_container, ActivePowerVariable, PSY.Area)
for (k, buses_in_area) in area_mapping
for t in time_steps
area_net = model_has_parameters(psi_container) ? zero(PGAE) : JuMP.AffExpr(0.0)
for b in buses_in_area
JuMP.add_to_expression!(area_net, nodal_net_balance[PSY.get_number(b), t])
end
constraint_bal[k, t] =
JuMP.@constraint(psi_container.JuMPmodel, area_balance[k, t] == area_net)
end
end
expr_up = get_expression(psi_container, :emergency_up)
expr_dn = get_expression(psi_container, :emergency_dn)
assign_constraint!(
psi_container,
"participation_assignment_up",
participation_assignment_up,
)
assign_constraint!(
psi_container,
"participation_assignment_dn",
participation_assignment_dn,
)
for area in keys(area_mapping), t in time_steps
participation_assignment_up[area, t] =
JuMP.@constraint(psi_container.JuMPmodel, expr_up[area, t] == 0)
participation_assignment_dn[area, t] =
JuMP.@constraint(psi_container.JuMPmodel, expr_dn[area, t] == 0)
end
return
end
| [
8818,
1989,
62,
20427,
7,
198,
220,
220,
220,
46231,
62,
34924,
3712,
3705,
2149,
756,
10613,
11,
198,
220,
220,
220,
5408,
3712,
13940,
23650,
11,
198,
220,
220,
220,
1989,
62,
76,
5912,
3712,
35,
713,
90,
10100,
11,
15690,
90,
3... | 2.294903 | 824 |
export Transition;
struct Transition
move::Move
label::String
end
| [
39344,
40658,
26,
198,
198,
7249,
40658,
198,
220,
1445,
3712,
21774,
198,
220,
6167,
3712,
10100,
198,
437,
628
] | 3.6 | 20 |
using ROCKS
using Documenter
makedocs(;
modules = [ROCKS],
authors = "<NAME>",
repo = "https://github.com/DaymondLing/ROCKS.jl/blob/{commit}{path}#L{line}",
sitename = "ROCKS.jl",
format = Documenter.HTML(;
prettyurls = get(ENV, "CI", "false") == "true",
canonical = "https://DaymondLing.github.io/ROCKS.jl/stable",
assets = String[],
),
pages = [
"Home" => "index.md",
"User's Guide" => [
"KS Test" => "man/kstest.md",
"ROC" => "man/roc.md",
"Plots and Tables" => "man/bcdiag.md",
],
"Function Reference" => "Reference.md",
],
)
deploydocs(; repo = "github.com/DaymondLing/ROCKS.jl.git", devbranch = "main")
| [
3500,
41320,
50,
198,
3500,
16854,
263,
198,
198,
76,
4335,
420,
82,
7,
26,
198,
220,
220,
220,
13103,
796,
685,
49,
11290,
50,
4357,
198,
220,
220,
220,
7035,
796,
33490,
20608,
29,
1600,
198,
220,
220,
220,
29924,
796,
366,
5450... | 2.030387 | 362 |
<filename>src/rules/1 Algebraic functions/1.2 Trinomial products/1.2.1 Quadratic/.jl
include("1.2.1.1 (a+b x+c x^2)^p.jl")
include("1.2.1.2 (d+e x)^m (a+b x+c x^2)^p.jl")
include("1.2.1.3 (d+e x)^m (f+g x) (a+b x+c x^2)^p.jl")
include("1.2.1.4 (d+e x)^m (f+g x)^n (a+b x+c x^2)^p.jl")
include("1.2.1.5 (a+b x+c x^2)^p (d+e x+f x^2)^q.jl")
include("1.2.1.6 (g+h x)^m (a+b x+c x^2)^p (d+e x+f x^2)^q.jl")
include("1.2.1.7 (a+b x+c x^2)^p (d+e x+f x^2)^q (A+B x+C x^2).jl")
include("1.2.1.8 P(x) (a+b x+c x^2)^p.jl")
include("1.2.1.9 P(x) (d+e x)^m (a+b x+c x^2)^p.jl")
| [
27,
34345,
29,
10677,
14,
38785,
14,
16,
978,
29230,
291,
5499,
14,
16,
13,
17,
33822,
49070,
3186,
14,
16,
13,
17,
13,
16,
20648,
81,
1512,
11757,
20362,
198,
17256,
7203,
16,
13,
17,
13,
16,
13,
16,
357,
64,
10,
65,
2124,
10... | 1.507979 | 376 |
@testset "'Design' ............................. " begin
srand(1234)
function simonsDesign(r1, n1, r, n)
nvec = [[n1 for x1 in 0:r1]; [n for x1 in (r1 + 1):n1]]
cvec = [[Inf for x1 in 0:r1]; [r for x1 in (r1 + 1):n1]]
return Design(nvec, cvec)
end
# Simon's designs for beta = .2, alpha = .05, p1 = p0 +0.2
# cf. Simon, R "Optimal Two-Stage Designs for Phase II Clinical Trials",
# Controlled Clinical Trials 10:1-10 (1989). (p. 4)
p0 = collect(linspace(.1, .7, 7))
sd = [
simonsDesign( 1, 10, 5, 29), # p0 = 0.1
simonsDesign( 3, 13, 12, 43), # p0 = 0.2
simonsDesign( 5, 15, 18, 46), # p0 = 0.3
simonsDesign( 7, 16, 23, 46), # p0 = 0.4
simonsDesign( 8, 15, 26, 43), # p0 = 0.5
simonsDesign( 7, 11, 30, 43), # p0 = 0.6
simonsDesign( 4, 6, 22, 27) # p0 = 0.7
]
@test typeof(sd[1]) == Design{typeof(1), typeof(Inf), NoParameters}
@test DataFrames.DataFrame(sd[1]) == DataFrames.DataFrame(
x1 = 0:interimsamplesize(sd[1]),
n = samplesize(sd[1]),
c = criticalvalue(sd[1])
)
@test interimsamplesize(sd[1]) == 10
@test parameters(sd[1]) == NoParameters()
@test samplesize(sd[1]) == [[10 for x1 in 0:1]; [29 for x1 in (1 + 1):10]]
@test samplesize(sd[1], 1) == 10
@test samplesize(sd[1], 2) == 29
@test criticalvalue(sd[1]) == [[Inf for x1 in 0:1]; [5 for x1 in (1 + 1):10]]
@test criticalvalue(sd[1], 1) == Inf
@test criticalvalue(sd[1], 2) == 5
supp_ = support(sd[1])
for p in linspace(0, 1, 11)
@test pdf(sd[1], 1, 2, p) == 0.0
pdf_ = pdf.(sd[1], support(sd[1])[:,1], support(sd[1])[:,2], p)
@test all(pdf_ .>= 0.0)
@test isapprox(sum(pdf_), 1.0, atol = 0.00001)
end
@test power(sd[1], p0[1]) <= .05
@test power(sd[1], p0[1] + .2) >= .8
prior = p -> 1
@test expectedpower(sd[1], prior, mcrv = p0[1] + .1) < power(sd[1], .8)
@test expectedpower(sd[1], prior, mcrv = p0[1] + .1) > power(sd[1], .2)
@test expectedpower(sd[1], 1, prior, mcrv = p0[1] + .1) < expectedpower(sd[1], 2, prior, mcrv = p0[1] + .1)
@test isapprox(
stoppingforfutility(sd[1], .2), pdf(sd[1], 0, 0, .2) + pdf(sd[1], 1, 0, .2), atol = .00001
)
@test !test(sd[1], 0, 0)
@test test(sd[1], 5, 11)
sim_ = simulate(sd[1], p0[1], 1000)
@test mean(sim_[:rejectedH0]) < .05
@test all(sim_[:n] .<= 29)
@test all(sim_[:n] .>= 0)
@test isapprox(quadgk(jeffreysprior(sd[1]), 0, 1)[1], 1.0, atol = 0.0001)
save("test.jls", sd[1])
@test true
rm("test.jls")
writecsv("test.csv", sd[1]; label = "test")
@test true
rm("test.csv")
end
| [
31,
9288,
2617,
24018,
23067,
6,
220,
27754,
12359,
366,
2221,
628,
220,
19677,
392,
7,
1065,
2682,
8,
628,
220,
2163,
985,
684,
23067,
7,
81,
16,
11,
299,
16,
11,
374,
11,
299,
8,
198,
220,
220,
220,
299,
35138,
796,
16410,
77,... | 2.126541 | 1,217 |
module MPSKit
using TensorKit,KrylovKit,Parameters, Base.Threads,OptimKit
using LinearAlgebra:diag,Diagonal;
import LinearAlgebra
#bells and whistles for mpses
export InfiniteMPS,FiniteMPS,MPSComoving,PeriodicArray,MPSMultiline
export transfer_left,transfer_right
export leftorth,rightorth,leftorth!,rightorth!,poison!,uniform_leftorth,uniform_rightorth
export r_LL,l_LL,r_RR,l_RR,r_RL,r_LR,l_RL,l_LR #should be properties
export hamcat
#useful utility functions?
export spinmatrices,add_util_leg,full,nonsym_spintensors,nonsym_bosonictensors
export max_Ds,virtualspace
#hamiltonian things
export Hamiltonian,Operator,Cache
export MPOHamiltonian,contains,PeriodicMPO,ComAct,commutator,anticommutator
export ac_prime,c_prime,params,ac2_prime,expectation_value,effective_excitation_hamiltonian
export leftenv,rightenv
#algos
export find_groundstate, Vumps, Dmrg, Dmrg2, GradDesc, Idmrg1, Idmrg2, GradientGrassmann
export leading_boundary, PowerMethod
export quasiparticle_excitation
export timestep,Tdvp,Tdvp2
export splitham,mpo2mps,mps2mpo,infinite_temperature
export changebonds,VumpsSvdCut,OptimalExpand,SvdCut,UnionTrunc
export entropy
export dynamicaldmrg
export fidelity_susceptibility
#models
export nonsym_xxz_ham,nonsym_ising_ham,su2_xxx_ham,nonsym_ising_mpo,u1_xxz_ham,su2u1_grossneveu
#default settings
module Defaults
const eltype = ComplexF64
const maxiter = 100
const tolgauge = 1e-14
const tol = 1e-12
const verbose = true
_finalize(iter,state,opp,pars) = (state,pars,true);
end
include("utility/periodicarray.jl")
include("utility/utility.jl") #random utility functions
#maybe we should introduce an abstract state type
include("states/abstractmps.jl")
include("states/transfer.jl") # mps transfer matrices
include("states/infinitemps.jl")
include("states/multiline.jl")
include("states/finitemps.jl")
include("states/comoving.jl")
include("states/orthoview.jl")
include("states/quasiparticle_state.jl")
abstract type Operator end
abstract type Hamiltonian <: Operator end
include("operators/mpohamiltonian/mpohamiltonian.jl") #the mpohamiltonian objects
include("operators/umpo.jl")
include("operators/commutator.jl")
abstract type Cache end #cache "manages" environments
include("environments/FinEnv.jl")
include("environments/abstractinfenv.jl")
include("environments/permpoinfenv.jl")
include("environments/mpohaminfenv.jl")
include("environments/simpleenv.jl")
include("environments/overlapenv.jl")
include("environments/qpenv.jl")
abstract type Algorithm end
include("algorithms/derivatives.jl")
include("algorithms/expval.jl")
include("algorithms/toolbox.jl") #maybe move to utility, or move some utility functions to toolbox?
include("algorithms/ortho.jl")
include("algorithms/changebonds/optimalexpand.jl")
include("algorithms/changebonds/vumpssvd.jl")
include("algorithms/changebonds/svdcut.jl")
include("algorithms/changebonds/union.jl")
include("algorithms/timestep/tdvp.jl")
include("algorithms/groundstate/vumps.jl")
include("algorithms/groundstate/idmrg.jl")
include("algorithms/groundstate/dmrg.jl")
include("algorithms/groundstate/gradient_grassmann.jl")
include("algorithms/propagator/corvector.jl")
include("algorithms/excitation/quasiparticleexcitation.jl")
include("algorithms/statmech/vumps.jl")
include("algorithms/statmech/power.jl")
include("algorithms/fidelity_susceptibility.jl")
include("models/xxz.jl")
include("models/ising.jl")
include("models/grossneveu.jl")
end
| [
21412,
337,
3705,
20827,
198,
220,
220,
220,
1262,
309,
22854,
20827,
11,
42,
563,
27086,
20827,
11,
48944,
11,
7308,
13,
16818,
82,
11,
27871,
320,
20827,
628,
220,
220,
220,
1262,
44800,
2348,
29230,
25,
10989,
363,
11,
18683,
27923... | 2.601509 | 1,458 |
"""
get_potential(kout, kin, P, s::ShapeParams) -> sigma_mu
Given a shape `s` with `2N` discretization nodes, outer and inner wavenumbers
`kout`,`kin`, and the cylindrical harmonics parameter `P`, returns the potential
densities `sigma_mu`. Each column contains the response to a different harmonic,
where the first `2N` entries contain the single-layer potential density
(``\\sigma``), and the lower entries contain the double-layer density (``\\mu``).
"""
function get_potential(kout, kin, P, t, ft, dft)
N = length(t) #N here is 2N elesewhere.
A = SDNTpotentialsdiff(kout, kin, t, ft, dft)
LU = lu(A)
sigma_mu = Array{Complex{Float64}}(undef, 2*N, 2*P+1)
#assuming the wave is sampled on the shape
nz = sqrt.(sum(abs2, ft, dims=2))
θ = atan.(ft[:,2], ft[:,1])
ndz = sqrt.(sum(abs2, dft, dims=2))
nzndz = nz.*ndz
wro = dft[:,2].*ft[:,1] - dft[:,1].*ft[:,2]
zz = dft[:,1].*ft[:,1] + dft[:,2].*ft[:,2]
bessp = besselj.(-P-1, kout*nz)
bess = similar(bessp)
du = Array{Complex{Float64}}(undef, length(bessp))
rhs = Array{Complex{Float64}}(undef, 2*length(bessp))
for p = -P:P
bess[:] = besselj.(p, kout*nz)
du[:] = kout*bessp.*wro - (p*bess./nz).*(wro + 1im*zz)
rhs[:] = -[bess.*exp.(1.0im*p*θ);
(du./nzndz).*exp.(1.0im*p*θ)]
sigma_mu[:,p + P + 1] = LU\rhs
copyto!(bessp, bess)
end
return sigma_mu
end
"""
get_potential(kout, kin, P, t, ft, dft) -> sigma_mu
Same, but with the `ShapeParams` supplied directly.
"""
get_potential(kout, kin, P, s::ShapeParams) =
get_potential(kout, kin, P, s.t, s.ft, s.dft)
function SDNTpotentialsdiff(k1, k2, t, ft, dft)
#now just returns system matrix, utilizes similarities between upper and lower triangles
iseven(length(t)) ?
(N = div(length(t),2)) : (error("length(t) must be even"))
(Rvec, kmlogvec) = KM_weights(N)
A = Array{Complex{Float64}}(undef, 4*N, 4*N)
ndft = sqrt.(vec(sum(abs2,dft,dims=2)))
rij = Array{Float64}(undef, 2)
for i=1:2*N, j=1:i
if i == j
T1 = -(k1^2 - k2^2)
T2 = k1^2*(π*1im - 2MathConstants.γ + 1 - 2*log(k1*ndft[i]/2)) -
k2^2*(π*1im - 2MathConstants.γ + 1 - 2*log(k2*ndft[i]/2))
A[i,j] = (-ndft[i]/2/N)*log(k1/k2) #dS (dM1=0)
A[i,j+2*N] = 1 #dD (dL=0)
A[i+2*N,j] = -1 #dN=0
A[i+2*N,j+2*N] = (ndft[i]/8π)*(Rvec[1]*T1 + (π/N)*T2) #dT
continue
end
rij[1] = ft[i,1]-ft[j,1]
rij[2] = ft[i,2]-ft[j,2] #ridiculous but much faster than ft[i,:]-ft[j,:]
r = sqrt(rij[1]^2 + rij[2]^2)
didj = dft[i,1]*dft[j,1] + dft[i,2]*dft[j,2]
J01 = besselj(0, k1*r)
J02 = besselj(0, k2*r)
H01 = besselh(0, 1, k1*r)
H02 = besselh(0, 1, k2*r)
k2J0 = k1^2*J01 - k2^2*J02
k1J1 = k1*besselj(1,k1*r) - k2*besselj(1,k2*r)
k2H0 = k1^2*H01 - k2^2*H02
k1H1 = k1*besselh(1,k1*r) - k2*besselh(1,k2*r)
kmlog = kmlogvec[abs(i-j)+1]
R = Rvec[abs(i-j)+1]
N2 = 1im*pi*k1H1 - k1J1*kmlog
P1 = (-didj/π)*k2J0
P2 = 1im*didj*k2H0 - P1*kmlog
Qtilde = (dft[i,1]*rij[1] + dft[i,2]*rij[2])*(dft[j,1]*rij[1] + dft[j,2]*rij[2])/r^2
Q1 = (-Qtilde*k2J0 + (1/r)*k1J1*(2*Qtilde - didj))/π
Q2 = 1im*Qtilde*k2H0 + (-1im/r)*k1H1*(2*Qtilde - didj) - Q1*kmlog
M1 = (J02 - J01)/π
L1 = k1J1/π
M2 = 1im*(H01 - H02) - M1*kmlog
L2 = 1im*k1H1 - L1*kmlog
wro_ij = (dft[j,2]*rij[1] - dft[j,1]*rij[2])/r
wro_ji = -(dft[i,2]*rij[1] - dft[i,1]*rij[2])/r
cross_ij = -wro_ji*ndft[j]/ndft[i]
cross_ji = -wro_ij*ndft[i]/ndft[j]
#edited to remove division by wro which might be 0
A[i,j] = (0.25*ndft[j])*(R*M1 + (π/N)*M2) #dS
A[j,i] = A[i,j]*(ndft[i]/ndft[j]) #dS
A[i,j+2*N] = (0.25*wro_ij)*(R*L1 + (π/N)*L2) #dD
A[j,i+2*N] = (0.25*wro_ji)*(R*L1 + (π/N)*L2) #dD
A[i+2*N,j] = (-0.25*cross_ij/π)*(R*k1J1 + (π/N)*N2) #dN
A[j+2*N,i] = (-0.25*cross_ji/π)*(R*k1J1 + (π/N)*N2) #dN
A[i+2*N,j+2*N] = (R*(P1-Q1) + (π/N)*(P2-Q2))/(4*ndft[i]) #dT
A[j+2*N,i+2*N] = A[i+2*N,j+2*N]*(ndft[i]/ndft[j]) #dT
end
any(isnan.(A)) && error("SDNTpotentialsdiff: encountered NaN, check data and division by ndft.")
return A
end
function KM_weights(N)
#computes the weights necessary for Kussmaul-Martensen quadrature (evenly
#spaced).
#Input: N (integer>=1)
#Output: R,K (float vectors of length 2N)
arg1 = Float64[cos(m*j*π/N)/m for m=1:N-1, j=0:2*N-1]
R = vec((-2π/N)*sum(arg1,dims=1)) - (π/N^2)*Float64[cos(j*π) for j=0:2*N-1]
K = Float64[2*log(2*sin(0.5π*j/N)) for j = 0:2*N-1]
return (R,K)
end
"""
get_potentialPW(kout, kin, s::ShapeParams, θ_i) -> sigma_mu
Given a shape `s` with `2N` discretization nodes, outer and inner wavenumbers
`kout`,`kin`, and an incident plane-wave angle, returns the potential
densities vector `sigma_mu`. The first `2N` entries contain the single-layer
potential density (``\\sigma``), and the lower entries contain the double-layer
density (``\\mu``).
"""
function get_potentialPW(kout, kin, s, θ_i)
N = length(s.t) #N here is different...
A = SDNTpotentialsdiff(kout, kin, s.t, s.ft, s.dft)
LU = lu(A)
ndft = sqrt.(sum(abs2, s.dft, dims=2))
ui = exp.(1.0im*kout*(cos(θ_i)*s.ft[:,1] + sin(θ_i)*s.ft[:,2]))
rhs = -[ui;
(1.0im*kout*ui).*((cos(θ_i)*s.dft[:,2] - sin(θ_i)*s.dft[:,1])./ndft)]
sigma_mu = LU\rhs
end
"""
scatteredfield(sigma_mu, k, s::ShapeParams, p) -> u_s
Computes field scattered by the particle `s` with pre-computed potential
densities `sigma_mu` at points `p`. All points must either be inside `k = kin`
or outside `k = kout` the particle.
"""
scatteredfield(sigma_mu, k, s::ShapeParams, p) =
scatteredfield(sigma_mu, k, s.t, s.ft, s.dft, p)
"""
scatteredfield(sigma_mu, k, t, ft, dft, p) -> u_s
Same, but with the `ShapeParams` supplied directly. Useful for computing `u_s`
for rotated shapes.
"""
function scatteredfield(sigma_mu, k, t, ft, dft, p)
#calculates the scattered field of a shape with parametrization ft(t),...,dft(t)
#in space with wavenumber k at points p *off* the boundary. For field on the boundary,
#SDpotentials function must be used.
if size(p,2) == 1 #single point, rotate it
p = transpose(p)
end
N = length(t)
M = size(p,1)
r = zeros(Float64,2)
#loop is faster here:
SDout = Array{Complex{Float64}}(undef, M, 2*N)
for j = 1:N
ndft = hypot(dft[j,1],dft[j,2])
for i = 1:M
r[:] = [p[i,1] - ft[j,1];p[i,2] - ft[j,2]]
nr = hypot(r[1],r[2])
if nr < eps()
#TODO: use SDNTpotentialsdiff here
@warn("Encountered singularity in scatteredfield.")
SDout[i,j] = 0
SDout[i,j+N] = 0
continue
end
SDout[i,j] = (2*pi/N)*0.25im*besselh(0,1, k*nr)*ndft
SDout[i,j+N] = (2*pi/N)*0.25im*k*besselh(1,1, k*nr)*(dft[j,2]*r[1] - dft[j,1]*r[2])/nr
end
end
u_s = SDout*sigma_mu
end
function shapeMultipoleExpansion(k, t, ft, dft, P)
#unlike others (so far), this does *not* assume t_j=pi*j/N
N = div(length(t),2)
nz = vec(sqrt.(sum(abs2, ft, dims=2)))
θ = atan.(ft[:,2], ft[:,1])
ndz = vec(sqrt.(sum(abs2, dft, dims=2)))
AB = Array{Complex{Float64}}(undef, 2*P + 1, 4*N)
bessp = besselj.(-P-1,k*nz)
bess = similar(bessp)
for l = -P:0
bess[:] = besselj.(l,k*nz)
for j = 1:2*N
AB[l+P+1,j] = 0.25im*(π/N)*bess[j]*exp(-1.0im*l*θ[j])*ndz[j]
l != 0 && (AB[-l+P+1,j] = 0.25im*((-1.0)^l*π/N)*bess[j]*exp(1.0im*l*θ[j])*ndz[j])
wro = ft[j,1]*dft[j,2] - ft[j,2]*dft[j,1]
zdz = -1.0im*(ft[j,1]*dft[j,1] + ft[j,2]*dft[j,2])
b1 = (-l*bess[j]/nz[j])*(zdz + wro)
b1_ = (-l*bess[j]/nz[j])*(zdz - wro)
b2 = k*bessp[j]*wro
AB[l+P+1,j+2*N] = 0.25im*(π/N)*(exp(-1.0im*l*θ[j])/nz[j])*(b1 + b2)
l != 0 && (AB[-l+P+1,j+2*N] = 0.25im*((-1.0)^l*π/N)*(exp(1.0im*l*θ[j])/nz[j])*(-b1_ + b2))
end
copyto!(bessp,bess)
end
return AB
end
function solvePotential_forError(kin, kout, shape, ls_pos, ls_amp, θ_i)
#plane wave outside, line sources inside
N = length(shape.t) #N here is different...
A = SDNTpotentialsdiff(kout, kin, shape.t, shape.ft, shape.dft)
LU = lu(A)
ndft = sqrt.(sum(abs2, shape.dft, dims=2))
r = sqrt.((shape.ft[:,1] .- ls_pos[1,1]).^2 +
(shape.ft[:,2] .- ls_pos[1,2]).^2)
uls = (-ls_amp[1]*0.25im)*besselh.(0,kout*r)
duls = (ls_amp[1]*0.25im*kout)*besselh.(1,kout*r).*((shape.ft[:,1].-ls_pos[1,1]).*shape.dft[:,2]-(shape.ft[:,2].-ls_pos[1,2]).*shape.dft[:,1])./r./ndft
for i = 2:length(ls_amp)
r = sqrt.((shape.ft[:,1] - ls_pos[i,1]).^2 + (shape.ft[:,2] - ls_pos[i,2]).^2)
uls -= ls_amp[i]*0.25im*besselh.(0,kout*r)
duls -= -ls_amp[i]*0.25im*kout*besselh.(1,kout*r).*((shape.ft[:,1]-ls_pos[i,1]).*shape.dft[:,2]-(shape.ft[:,2]-ls_pos[i,2]).*shape.dft[:,1])./r./ndft
end
#outer plane wave
ui = exp.(1.0im*kin*(cos(θ_i)*shape.ft[:,1] + sin(θ_i)*shape.ft[:,2]))
dui = (1.0im*kin)*(ui.*(cos(θ_i)*shape.dft[:,2] - sin(θ_i)*shape.dft[:,1])./ndft)
rhs = -[ui+uls;dui+duls]
sigma_mu = LU\rhs
return sigma_mu
end
| [
37811,
201,
198,
220,
220,
220,
651,
62,
13059,
1843,
7,
74,
448,
11,
18967,
11,
350,
11,
264,
3712,
33383,
10044,
4105,
8,
4613,
264,
13495,
62,
30300,
201,
198,
201,
198,
15056,
257,
5485,
4600,
82,
63,
351,
4600,
17,
45,
63,
... | 1.700748 | 5,751 |
using LinearAlgebra
using OpenCL
const sum_kernel = "
__kernel void sum(__global float *a,
__global const float *b)
{
int gid = get_global_id(0);
a[gid] = a[gid] + b[gid];
}
"
a = zeros(Float32, 50_000)
b = ones(Float32, 50_000)
device, ctx, queue = cl.create_compute_context()
a_buff = cl.Buffer(Float32, ctx, (:rw, :copy), hostbuf=a)
b_buff = cl.Buffer(Float32, ctx, (:r, :copy), hostbuf=b)
p = cl.Program(ctx, source=sum_kernel) |> cl.build!
k = cl.Kernel(p, "sum")
queue(k, size(a), nothing, a_buff, b_buff)
r = cl.read(queue, a_buff)
if isapprox(r, fill(Float32(1), 50000))
@info "Success!"
else
@error "Norm should be 0.0f"
end | [
3500,
44800,
2348,
29230,
198,
3500,
4946,
5097,
198,
198,
9979,
2160,
62,
33885,
796,
366,
198,
220,
220,
11593,
33885,
7951,
2160,
7,
834,
20541,
12178,
1635,
64,
11,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220... | 2.129231 | 325 |
<reponame>grahamstark/ScottishTaxBenefitModel
module TheEqualiser
#
# This module automatically adjusts taxes (it and ni, optionally)
# so the net cost of benefit or other changes
# is close to zero.
#
# TODO needs a lot of work:
#
# - more options - so basic rate only etc;
# - use passed-in functions to equalise (like op_tax! below);
# - check results are in sensible bounds (e.g. >= 0 < 100 tax rates).
#
using Roots
using UUIDs
using Observables
using ScottishTaxBenefitModel
using .Definitions
using .Monitor
using .Results
using .Runner
using .RunSettings
using .STBOutput
using .STBParameters
using .Utils
@enum EqTargets eq_it eq_ni eq_it_ni
export EqTargets,eq_it,eq_ni,eq_it_ni
export equalise
#
# Roots only allows 1 parameter, I think, so:
#
mutable struct RunParameters{T<:AbstractFloat}
params :: TaxBenefitSystem{T}
settings :: Settings
base_cost :: T
target :: EqTargets
obs :: Observable
end
# TODO another possible approach is to pass in editing
# functions such as:
function op_tax!( sys :: TaxBenefitSystem{T}, r :: T ) where T <: AbstractFloat
sys.it.non_savings_rates .+= r
end
function run( x :: T, rparams :: RunParameters{T} ) where T <: AbstractFloat
nsr = deepcopy( rparams.params.it )
nsi = deepcopy( rparams.params.ni )
if rparams.target in [eq_it, eq_it_ni]
rparams.params.it.non_savings_rates .+= x
end
# TODO check sensible it rates
if rparams.target in [eq_ni, eq_it_ni]
rparams.params.ni.primary_class_1_rates .+= x
rparams.params.ni.class_4_rates .+= x
end
# TODO check sensible ni rates
results = do_one_run(rparams.settings, [rparams.params], rparams.obs )
# restore
rparams.params.it = nsr
rparams.params.ni = nsi
summary = summarise_frames(results, rparams.settings)
nc = summary.income_summary[1][1,:net_cost]
return round( nc - rparams.base_cost, digits=0 )
end
"""
Adjust the thing in `target` so that the net cost of the changes in `sys`
is close to `base_cost`
"""
function equalise(
target :: EqTargets,
sys :: TaxBenefitSystem{T},
settings :: Settings,
base_cost :: T,
observer :: Observable ) :: T where T<:AbstractFloat
zerorun = ZeroProblem( run, 0.0 ) # fixme guess at 0.0 ?
rparams = RunParameters( sys, settings, base_cost, target, observer )
incch = solve( zerorun, rparams )
#
# TODO test incch is sensible
return incch
end
end # module | [
27,
7856,
261,
480,
29,
70,
13220,
301,
668,
14,
19040,
680,
27017,
42166,
270,
17633,
198,
21412,
383,
36,
13255,
5847,
198,
2,
198,
2,
770,
8265,
6338,
46094,
5704,
357,
270,
290,
37628,
11,
42976,
8,
198,
2,
523,
262,
2010,
157... | 2.713341 | 907 |
<reponame>kailaix/NNFEM.jl<gh_stars>10-100
include("hyperelasticity.jl")
#
ts = ExplicitSolverTime(Δt, NT)
ubd, abd = compute_boundary_info(domain, globaldata, ts)
Fext = compute_external_force(domain, globaldata, ts)
d0 = zeros(2domain.nnodes)
v0 = zeros(2domain.nnodes)
a0 = zeros(2domain.nnodes)
mode = "consistent_tangent"
if length(ARGS)>=1
global mode = ARGS[1]
end
if mode=="linear"
global Dc, nn_law
Dc = Variable(rand(3,3))
Dc = spd(Dc)
function nn_law(strain)
strain_tensor = strain_voigt_to_tensor(strain)
stress = strain*Dc
stress
end
elseif mode=="consistent_tangent"
global Dc, nn_law
Dc = Variable(rand(3,3))
Dc = spd(Dc)
function nn_law(strain)
coef = ae(strain, [20,20,20,6])
coef = [coef constant(ones(size(coef,1), 1))]
H = consistent_tangent_matrix(coef, Dc)
stress = batch_matmul(H, strain)
stress
end
elseif mode=="nn"
global Dc, nn_law
# general neural network
function nn_law(ε)
ae(ε, [20,20,20,3])
end
elseif mode=="free_energy"
global Dc, nn_law
# free energy
function nn_law(ε)
φ = squeeze(ae(ε, [20,20,20,1]))
tf.gradients(φ, ε)[1]
end
else
error("$mode not valid")
end
d, v, a= ExplicitSolver(globaldata, domain, d0, v0, a0, Δt, NT, nn_law, Fext, ubd, abd; strain_type="finite")
# test = open("test.txt", "w")
sess = Session(); init(sess)
# run(sess, d)
idx = @. (0:n)*m + 1
idx = [idx; (@. idx + domain.nnodes)]
loss = sum((d[:, idx] - d_[:, idx])^2)
sess = Session(); init(sess)
@info run(sess, loss)
# error()
!isdir(mode) && mkdir(mode)
for i = 1:100
loss_ = BFGS!(sess, loss, 1000)
d0 = run(sess, d)
# visualize
close("all")
p = visualize_displacement(d0, domain)
saveanim(p, "$mode/$i.gif")
close("all")
plot(d_[:,1], "-", color="C1")
plot(d0[:,1], "--", color="C1")
plot(d_[:,1+domain.nnodes], color="C2")
plot(d0[:,1+domain.nnodes], "--", color="C2")
savefig("$mode/$i.png")
open("$mode/loss.txt", "a") do io
writedlm(io, loss_)
end
ADCME.save(sess, "$mode/$i.mat")
if length(loss_)<1000
break
end
end
| [
27,
7856,
261,
480,
29,
74,
39460,
844,
14,
6144,
37,
3620,
13,
20362,
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
17256,
7203,
71,
2981,
2411,
3477,
414,
13,
20362,
4943,
198,
2,
220,
198,
912,
796,
11884,
50,
14375,
7575,
7,
1... | 2.115619 | 986 |
@testset "Decreasing2LP: $(fct_type), dimension $(dim), $(T)" for fct_type in ["vector of variables", "vector affine function"], dim in [2, 3], T in [Int, Float64]
mock = MOIU.MockOptimizer(MILPModel{T}())
model = COIB.Decreasing2LP{T}(mock)
if T == Int
@test MOI.supports_constraint(model, MOI.VariableIndex, MOI.Integer)
end
@test MOI.supports_constraint(
model,
MOI.ScalarAffineFunction{T},
MOI.EqualTo{T},
)
@test MOIB.supports_bridging_constraint(
model,
MOI.VectorAffineFunction{T},
CP.Decreasing,
)
if T == Int
x, _ = MOI.add_constrained_variables(model, [MOI.Integer() for _ in 1:dim])
elseif T == Float64
x = MOI.add_variables(model, dim)
end
fct = if fct_type == "vector of variables"
MOI.VectorOfVariables(x)
elseif fct_type == "vector affine function"
MOIU.vectorize(x)
else
@assert false
end
c = MOI.add_constraint(model, fct, CP.Decreasing(dim))
for i in 1:dim
@test MOI.is_valid(model, x[i])
end
@test MOI.is_valid(model, c)
bridge = MOIBC.bridges(model)[MOI.ConstraintIndex{MOI.VectorOfVariables, CP.Decreasing}(-1)]
@testset "Bridge properties" begin
@test MOIBC.concrete_bridge_type(typeof(bridge), MOI.VectorOfVariables, CP.Decreasing) == typeof(bridge)
@test MOIB.added_constrained_variable_types(typeof(bridge)) == Tuple{Type}[]
@test MOIB.added_constraint_types(typeof(bridge)) == [(MOI.ScalarAffineFunction{T}, MOI.GreaterThan{T})]
@test MOI.get(bridge, MOI.NumberOfVariables()) == 0
@test MOI.get(bridge, MOI.NumberOfConstraints{MOI.ScalarAffineFunction{T}, MOI.GreaterThan{T}}()) == dim - 1
@test Set(MOI.get(bridge, MOI.ListOfConstraintIndices{MOI.ScalarAffineFunction{T}, MOI.GreaterThan{T}}())) == Set(collect(values(bridge.cons)))
end
@testset "Set of constraints" begin
@test length(bridge.cons) == dim - 1
for i in 1:(dim - 1)
@test MOI.is_valid(model, bridge.cons[i])
f = MOI.get(model, MOI.ConstraintFunction(), bridge.cons[i])
@test length(f.terms) == 2
@test MOI.get(model, MOI.ConstraintSet(), bridge.cons[i]) == MOI.GreaterThan(zero(T))
t1 = f.terms[1]
@test t1.coefficient === one(T)
@test t1.variable == x[i]
t2 = f.terms[2]
@test t2.coefficient === -one(T)
@test t2.variable == x[i + 1]
end
end
end
| [
31,
9288,
2617,
366,
43198,
2313,
17,
19930,
25,
29568,
69,
310,
62,
4906,
828,
15793,
29568,
27740,
828,
29568,
51,
16725,
329,
277,
310,
62,
4906,
287,
14631,
31364,
286,
9633,
1600,
366,
31364,
1527,
500,
2163,
33116,
5391,
287,
68... | 2.111296 | 1,204 |
<gh_stars>10-100
using Statistics
using Distributions
using ProgressMeter
#=
References
----------
[1] <NAME>. (2001). "Global sensitivity indices for nonlinear
mathematical models and their Monte Carlo estimates." Mathematics
and Computers in Simulation, 55(1-3):271-280,
doi:10.1016/S0378-4754(00)00270-6.
[2] <NAME>. (2002). "Making best use of model evaluations to
compute sensitivity indices." Computer Physics Communications,
145(2):280-297, doi:10.1016/S0010-4655(02)00280-1.
[3] <NAME>., <NAME>, <NAME>, <NAME>, <NAME>, and
<NAME> (2010). "Variance based sensitivity analysis of model
output. Design and estimator for the total sensitivity index."
Computer Physics Communications, 181(2):259-270,
doi:10.1016/j.cpc.2009.09.018.
=#
"""
analyze(data::SobolData, model_output::AbstractArray{<:Number, S}; num_resamples::Union{Nothing, Int} = 1_000, conf_level::Union{Nothing, Number} = 0.95, progress_meter::Bool = true, N_override::Union{Nothing, Integer}=nothing) where S
Performs a Sobol Analysis on the `model_output` produced with the problem
defined by the information in `data` and returns the a dictionary of results
with the sensitivity indices and respective confidence intervals for each of the
parameters defined using the `num_resamples` and `conf_level` keyword args. If these
are Nothing than no confidence intervals will be calculated. The `progress_meter`
keyword argument indicates whether a progress meter will be displayed and defaults
to true. The `N_override` keyword argument allows users to override the `N` used in
a specific `analyze` call to analyze just a subset (useful for convergence graphs).
"""
function analyze(data::SobolData, model_output::AbstractArray{<:Number, S}; num_resamples::Union{Nothing, Int} = 1_000, conf_level::Union{Nothing, Number} = 0.95, progress_meter::Bool = true, N_override::Union{Nothing, Integer}=nothing) where S
# handle confidence interval flag
num_nothings = (num_resamples === nothing) + (conf_level === nothing)
if num_nothings == 1
error("Number of resamples is $num_resamples, while confidence level is $conf_level ... either none or both must be nothing")
elseif num_nothings == 2
conf_flag = false
else
conf_flag = true
end
# define constants
calc_second_order = data.calc_second_order
D = length(data.params) # number of uncertain parameters in problem
# deal with overriding N
if N_override === nothing
N = data.N # number of samples
else
N_override > data.N ? error("N_override ($N_override) cannot be greater than original N used in sampling ($(data.N))") : nothing
N = N_override # number of samples
# reduce the output to just what should be considered for this N
if data.calc_second_order
lastrow = N * ((2 * D) + 2)
else
lastrow = N * (D + 2)
end
model_output = model_output[1:lastrow]
end
# values for CI calculations
if conf_flag
r = rand(1:N, N, num_resamples)
Z = quantile(Normal(0.0, 1.0),1 - (1 - conf_level)/2) # calculate z* for CI
end
# normalize model output
model_output = (model_output .- mean(model_output)) ./ std(model_output)
# separate the model_output into results from matrices "A". "B" and "AB"
A, B, AB, BA = split_output(model_output, N, D, calc_second_order)
# preallocate arrays for indices
firstorder = Array{Float64}(undef, D)
totalorder = Array{Float64}(undef, D)
calc_second_order ? secondorder = fill!(Array{Union{Float64, Missing}}(undef, D, D), missing) : nothing
# preallocate arrays for confidence intervals
if conf_flag
firstorder_conf = Array{Float64}(undef, D)
totalorder_conf = Array{Float64}(undef, D)
calc_second_order ? secondorder_conf = fill!(Array{Union{Float64, Missing}}(undef, D, D), missing) : nothing
end
# set up progress meter
counter = 0
progress_meter ? p = Progress(D, counter, "Calculating indices for $D parameters ...") : nothing
for i in 1:D
# increment progress meter
counter += 1
progress_meter ? ProgressMeter.update!(p, counter) : nothing
# first order and total order indices
firstorder[i] = first_order(A, AB[:, i], B)[1] # array to scalar with [1]
totalorder[i] = total_order(A, AB[:, i], B)[1] # array to scalar with [1]
# first order and total order indice confidence intervals
conf_flag ? firstorder_conf[i] = Z * std(first_order(A[r], AB[r, i], B[r])) : nothing
conf_flag ? totalorder_conf[i] = Z * std(total_order(A[r], AB[r, i], B[r])) : nothing
# second order indices
if calc_second_order
for j in (i+1):D
secondorder[i, j] = second_order(A, AB[:, i], AB[:, j], BA[:, i], B)[1] # array to scalar with [1]
conf_flag ? secondorder_conf[i,j] = Z * std(skipmissing(second_order(A[r], AB[r, i], AB[r, j], BA[r, i], B[r]))) : nothing
end
end
end
if calc_second_order
if conf_flag
results = Dict(
:firstorder => firstorder,
:firstorder_conf => firstorder_conf,
:totalorder => totalorder,
:totalorder_conf => totalorder_conf,
:secondorder => secondorder,
:secondorder_conf => secondorder_conf
)
else
results = Dict(
:firstorder => firstorder,
:totalorder => totalorder,
:secondorder => secondorder,
)
end
else
if conf_flag
results = Dict(
:firstorder => firstorder,
:firstorder_conf => firstorder_conf,
:totalorder => totalorder,
:totalorder_conf => totalorder_conf
)
else
results = Dict(
:firstorder => firstorder,
:totalorder => totalorder,
)
end
end
return results
end
"""
first_order(A::AbstractArray{<:Number, N}, AB::AbstractArray{<:Number, N}, B::AbstractArray{<:Number, N})
Calculate the first order sensitivity indices for model outputs given model outputs
separated out into `A`, `AB`, and `A` and normalize by the variance of `[A B]`. [Saltelli et al.,
2010 Table 2 eq (b)]
"""
function first_order(A::AbstractArray{<:Number, N}, AB::AbstractArray{<:Number, N}, B::AbstractArray{<:Number, N}) where N
return (mean(B .* (AB .- A), dims = 1) ./ var(vcat(A, B), dims = 1, corrected = false))
end
"""
second_order(A::AbstractArray{<:Number, N}, ABi::AbstractArray{<:Number, N}, ABj::AbstractArray{<:Number, N}, BAi::AbstractArray{<:Number, N}, B::AbstractArray{<:Number, N}) where N
Calculate the second order sensitivity index between two parameters for model outputs
given model outputs separated out into `A`, `AB`, `BA`, and `B` and normalize by
the variance of `[A B]`. [Saltelli et al. , 2002]
"""
function second_order(A::AbstractArray{<:Number, N}, ABi::AbstractArray{<:Number, N}, ABj::AbstractArray{<:Number, N}, BAi::AbstractArray{<:Number, N}, B::AbstractArray{<:Number, N}) where N
Vj = (mean(BAi .* ABj .- A .* B, dims = 1) ./ var(vcat(A, B), dims = 1, corrected = false))
Si = first_order(A, ABi, B)
Sj = first_order(A, ABj, B)
return Vj .- Si .- Sj
end
"""
total_order(A::AbstractArray{<:Number, N}, AB::AbstractArray{<:Number, N}, B::AbstractArray{<:Number, N})
Calculate the total order sensitivity indices for model outputs given model outputs
separated out into `A`, `AB`, and `A` and normalize by the variance of `[A B]`. [Saltelli et al.,
2010 Table 2 eq (f)].
"""
function total_order(A::AbstractArray{<:Number, N}, AB::AbstractArray{<:Number, N}, B::AbstractArray{<:Number, N}) where N
return (0.5 * mean((A .- AB).^2, dims = 1) ./ var(vcat(A, B), dims = 1, corrected = false))
end
"""
split_output(model_output::AbstractArray{<:Number, S}, N, D, calc_second_order)
Separate the `model_outputs` into matrices "A", "B", "AB", and "BA" for calculation
of sensitvity indices and return those four matrices. If `calc_second_order` is
`False`, `BA` will be `nothing`.
"""
function split_output(model_output::AbstractArray{<:Number, S}, N, D, calc_second_order::Bool) where S
if calc_second_order
stepsize = 2 * D + 2
else
stepsize = D + 2
end
A = model_output[1:stepsize:end]
B = model_output[stepsize:stepsize:end]
#preallocate
AB = Array{Float64}(undef, N, D)
if calc_second_order
BA = Array{Float64}(undef, N, D)
else
BA = nothing
end
for i in 1:D
AB[:, i] = model_output[i+1:stepsize:end, :]
if calc_second_order
BA[:, i] = model_output[i + D + 1:stepsize:end, :]
end
end
return A, B, AB, BA
end
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
3500,
14370,
198,
3500,
46567,
507,
198,
3500,
18387,
44,
2357,
198,
198,
2,
28,
198,
19927,
198,
35937,
198,
220,
220,
220,
685,
16,
60,
1279,
20608,
28401,
357,
14585,
737,
220,
366,
2... | 2.459131 | 3,707 |
<filename>test/utils_test.jl<gh_stars>10-100
@testset "Auxiliary Functions Test" begin
@testset "check constant columns" begin
@test_throws Exception PartialLeastSquaresRegressor.check_constant_cols([1.0 1;1 2;1 3])
@test_throws Exception PartialLeastSquaresRegressor.check_constant_cols([1.0;1;1][:,:])
@test_throws Exception PartialLeastSquaresRegressor.check_constant_cols([1.0 2 3])
@test_throws Exception PartialLeastSquaresRegressor.check_constant_cols([1.0; 1; 1][:,:])
@test PartialLeastSquaresRegressor.check_constant_cols([1.0 1;2 2;3 3])
@test PartialLeastSquaresRegressor.check_constant_cols([1.0;2;3][:,:])
end
@testset "checkparams" begin
#@test_logs PLSRegressor.check_params(2,1,"linear")
@test_throws Exception PartialLeastSquaresRegressor.check_params(-1,2,"linear")
@test_throws Exception PartialLeastSquaresRegressor.check_params(1,2,"x")
@test PartialLeastSquaresRegressor.check_params(1,2,"linear")
end
@testset "checkdata" begin
@test_throws Exception PartialLeastSquaresRegressor.check_data(zeros(0,0), 0)
@test_throws Exception PartialLeastSquaresRegressor.check_data(zeros(1,1), 10)
@test PartialLeastSquaresRegressor.check_data(zeros(1,1), 1)
end
end
| [
27,
34345,
29,
9288,
14,
26791,
62,
9288,
13,
20362,
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
31,
9288,
2617,
366,
32,
2821,
28129,
40480,
6208,
1,
2221,
198,
220,
220,
220,
2488,
9288,
2617,
366,
9122,
6937,
15180,
1,
2221,
19... | 2.621795 | 468 |
<filename>src/AdjustQuasiGLM.jl
"""
AdjustQuasiGLM(model, ϕ; level)
Estimates dispersion parameter, adjusts original GLM to reflect the dispersion and returns results in a pretty DataFrame.
Usage:
```julia-repl
AdjustQuasiGLM(model, ϕ; level)
```
Arguments:
- `model` : The `GLM` model.
- `data` : The `DataFrame` containing data that was used as input to the model.
- `level` : The desired degree of confidence.
"""
function AdjustQuasiGLM(model::StatsModels.TableRegressionModel, data::DataFrame; level::Real=0.95)
# Calculate Pearson residuals
resids = PearsonResiduals(model, data)
# Estimate dispersion parameter ϕ and take √ to convert to multiplier
ϕ = √EstimateDispersionParameter(resids, model)
# Correct standard errors and calculate updated test statistics, p-values, and confidence intervals
CorrectedOutputs = coefarray(model, ϕ; level)
levstr = isinteger(level * 100) ? string(Integer(level * 100)) : string(level * 100)
header = (["Parameter", "Estimate", "Std. Error", "t value", "Pr(>|t|)", "Lower $levstr%", "Upper $levstr%"])
#--------------------------------------------
# Organise results in a neat coeftable format
#--------------------------------------------
# Table formatting
ctf = TextFormat(
up_right_corner = ' ',
up_left_corner = ' ',
bottom_left_corner = ' ',
bottom_right_corner = ' ',
up_intersection = '─',
left_intersection = ' ',
right_intersection = ' ',
middle_intersection = '─',
bottom_intersection = '─',
column = ' ',
hlines = [ :begin, :header, :end]
)
# Render table
println("\nCoefficients:")
CorrectedOutputsPretty = PrettyTables.pretty_table(CorrectedOutputs; header = header, tf = ctf)
# Return results in a DataFrame for further use
CorrectedOutputs = DataFrame(CorrectedOutputs, :auto)
CorrectedOutputs = rename!(CorrectedOutputs, [:x1, :x2, :x3, :x4, :x5, :x6, :x7] .=> [Symbol(header[1]), Symbol(header[2]), Symbol(header[3]), Symbol(header[4]), Symbol(header[5]), Symbol(header[6]), Symbol(header[7])])
# Recode column types from `Any` to `String` for parameter names and `Float64` for values columns
for i in 2:size(header, 1)
CorrectedOutputs[!, i] = convert(Array{Float64, 1}, CorrectedOutputs[!, i])
end
CorrectedOutputs[!, 1] = convert(Array{String, 1}, CorrectedOutputs[!, 1])
return CorrectedOutputs
end
| [
27,
34345,
29,
10677,
14,
39668,
4507,
17053,
8763,
44,
13,
20362,
198,
37811,
198,
220,
220,
220,
20292,
4507,
17053,
8763,
44,
7,
19849,
11,
18074,
243,
26,
1241,
8,
198,
22362,
26748,
4596,
6900,
11507,
11,
46094,
2656,
10188,
44,
... | 2.867371 | 852 |
struct Snowflake
n::UInt64
end
Snowflake(s::AbstractString) = Snowflake(parse(UInt64, s))
Base.show(io::IO, s::Snowflake) = print(io, string(s.n; base=10))
Base.:(==)(s::Snowflake, n::Integer) = s.n == n
Base.:(==)(n::Integer, s::Snowflake) = n == s.n
StructTypes.StructType(::Type{Snowflake}) = StructTypes.StringType()
HTTP.escapeuri(s::Snowflake) = escapeuri(s.n)
snowflake2datetime(s::Snowflake) = unix2datetime(((s.n >> 22) + 1420070400000) / 1000)
worker_id(s::Snowflake) = (s.n & 0x3e0000) >> 17
process_id(s::Snowflake) = (s.n & 0x1f000) >> 12
increment(s::Snowflake) = s.n & 0xfff
| [
7249,
7967,
47597,
198,
220,
220,
220,
299,
3712,
52,
5317,
2414,
198,
437,
198,
198,
28974,
47597,
7,
82,
3712,
23839,
10100,
8,
796,
7967,
47597,
7,
29572,
7,
52,
5317,
2414,
11,
264,
4008,
198,
14881,
13,
12860,
7,
952,
3712,
9... | 2.384 | 250 |
for quad_degree = 1:20 # Exceeding degree 20 seems unnecessary at this time
@eval begin
# Square
@generated function gauss_quadrature(form::Val{:legendre},
shape::RefSquare,
degree::Val{$quad_degree},
type::Type{T}) where {T}
# Is there a way to use form and degree here instead?
line_weights, line_points = gauss_quadrature(Val(:legendre),
RefLine(),
Val($quad_degree),
T)
weights = Expr[]
points = Expr[]
for i = 1:$quad_degree
for j = 1:$quad_degree
push!(weights, :($(line_weights[i]) * $(line_weights[j])))
push!(points, :(tuple($(line_points[i][1]), $(line_points[j][1]))))
end
end
return quote
return (SVector{$(length(line_weights)^2), $T}(tuple($(weights...))),
SVector{$(length(line_weights)^2), NTuple{2,$T}}(tuple($(points...))))
end
end
# Cube
@generated function gauss_quadrature(form::Val{:legendre},
shape::RefCube,
degree::Val{$quad_degree},
type::Type{T}) where {T}
# Is there a way to use form and degree here instead?
line_weights, line_points = gauss_quadrature(Val(:legendre),
RefLine(),
Val($quad_degree),
T)
weights = Expr[]
points = Expr[]
for i = 1:$quad_degree
for j = 1:$quad_degree
for k = 1:$quad_degree
push!(weights, :($(line_weights[i]) *
$(line_weights[j]) *
$(line_weights[k])
))
push!(points, :(tuple($(line_points[i][1]),
$(line_points[j][1]),
$(line_points[k][1]),
)))
end
end
end
return quote
return (SVector{$(length(line_weights)^3), $T}(tuple($(weights...))),
SVector{$(length(line_weights)^3), NTuple{3,$T}}(tuple($(points...))))
end
end
end
end
| [
1640,
15094,
62,
16863,
796,
352,
25,
1238,
1303,
1475,
2707,
278,
4922,
1160,
2331,
13114,
379,
428,
640,
198,
220,
220,
220,
2488,
18206,
2221,
198,
220,
220,
220,
220,
220,
220,
220,
1303,
9276,
198,
220,
220,
220,
220,
220,
220,... | 1.522114 | 1,854 |
<reponame>IgorKohan/NormalHermiteSplines.jl<filename>src/Interpolate.jl<gh_stars>1-10
function _prepare(nodes::Matrix{T},
kernel::RK
) where {T <: AbstractFloat, RK <: ReproducingKernel_0}
n = size(nodes, 1)
n_1 = size(nodes, 2)
min_bound = Vector{T}(undef, n)
compression::T = 0
@inbounds for i = 1:n
min_bound[i] = nodes[i,1]
maxx::T = nodes[i,1]
for j = 2:n_1
min_bound[i] = min(min_bound[i], nodes[i,j])
maxx = max(maxx, nodes[i,j])
end
compression = max(compression, maxx - min_bound[i])
end
if compression <= eps(T(1.0))
error("Cannot prepare the spline: `nodes` data are not correct.")
end
t_nodes = similar(nodes)
@inbounds for j = 1:n_1
for i = 1:n
t_nodes[i,j] = (nodes[i,j] - min_bound[i]) / compression
end
end
if T(kernel.ε) == T(0.0)
ε = _estimate_ε(t_nodes)
if isa(kernel, RK_H0)
kernel = RK_H0(ε)
elseif isa(kernel, RK_H1)
ε *= T(1.5)
kernel = RK_H1(ε)
elseif isa(kernel, RK_H2)
ε *= T(2.0)
kernel = RK_H2(ε)
else
error("incorrect `kernel` type.")
end
end
gram = _gram(t_nodes, kernel)
chol = nothing
try
chol = cholesky(gram)
catch
error("Cannot prepare the spline: Gram matrix is degenerate.")
end
cond = _estimate_cond(gram, chol)
spline = NormalSpline(kernel,
compression,
t_nodes,
nothing,
nothing,
nothing,
nothing,
min_bound,
gram,
chol,
nothing,
cond
)
return spline
end
function _construct(spline::NormalSpline{T, RK},
values::Vector{T},
cleanup::Bool = false
) where {T <: AbstractFloat, RK <: ReproducingKernel_0}
if(length(values) != size(spline._nodes, 2))
error("Number of data values does not correspond to the number of nodes.")
end
if isnothing(spline._chol)
error("Gram matrix was not factorized.")
end
mu = Vector{T}(undef, size(spline._gram, 1))
ldiv!(mu, spline._chol, values)
spline = NormalSpline(spline._kernel,
spline._compression,
spline._nodes,
values,
nothing,
nothing,
nothing,
spline._min_bound,
cleanup ? nothing : spline._gram,
cleanup ? nothing : spline._chol,
mu,
spline._cond
)
return spline
end
###################
function _prepare(nodes::Matrix{T},
d_nodes::Matrix{T},
es::Matrix{T},
kernel::RK
) where {T <: AbstractFloat, RK <: ReproducingKernel_1}
n = size(nodes, 1)
n_1 = size(nodes, 2)
n_2 = size(d_nodes, 2)
if(size(es, 2) != n_2)
error("Number of derivative directions does not correspond to the number of derivative nodes.")
end
t_es = similar(es)
try
@inbounds for i = 1:n_2
t_es[:,i] = es[:,i] ./ norm(es[:,i])
end
catch
error("Cannot normalize derivative direction: zero direction vector.")
end
min_bound = Vector{T}(undef, n)
compression::T = 0
@inbounds for i = 1:n
min_bound[i] = nodes[i,1]
maxx::T = nodes[i,1]
for j = 2:n_1
min_bound[i] = min(min_bound[i], nodes[i,j])
maxx = max(maxx, nodes[i,j])
end
for j = 1:n_2
min_bound[i] = min(min_bound[i], d_nodes[i,j])
maxx = max(maxx, d_nodes[i,j])
end
compression = max(compression, maxx - min_bound[i])
end
if compression <= eps(T(1.0))
error("Cannot prepare the spline: `nodes` data are not correct.")
end
t_nodes = similar(nodes)
t_d_nodes = similar(d_nodes)
@inbounds for j = 1:n_1
for i = 1:n
t_nodes[i,j] = (nodes[i,j] - min_bound[i]) / compression
end
end
@inbounds for j = 1:n_2
for i = 1:n
t_d_nodes[i,j] = (d_nodes[i,j] - min_bound[i]) / compression
end
end
if T(kernel.ε) == T(0.0)
ε = _estimate_ε(t_nodes, t_d_nodes)
if isa(kernel, RK_H1)
ε *= T(2.0)
kernel = RK_H1(ε)
elseif isa(kernel, RK_H2)
ε *= T(2.5)
kernel = RK_H2(ε)
else
error("incorrect `kernel` type.")
end
end
gram = _gram(t_nodes, t_d_nodes, t_es, kernel)
chol = nothing
try
chol = cholesky(gram)
catch
error("Cannot prepare the spline: Gram matrix is degenerate.")
end
cond = _estimate_cond(gram, chol)
spline = NormalSpline(kernel,
compression,
t_nodes,
nothing,
t_d_nodes,
t_es,
nothing,
min_bound,
gram,
chol,
nothing,
cond
)
return spline
end
function _construct(spline::NormalSpline{T, RK},
values::Vector{T},
d_values::Vector{T},
cleanup::Bool = false
) where {T <: AbstractFloat, RK <: ReproducingKernel_0}
if(length(values) != size(spline._nodes, 2))
error("Number of data values does not correspond to the number of nodes.")
end
if(length(d_values) != size(spline._d_nodes, 2))
error("Number of derivative values does not correspond to the number of derivative nodes.")
end
if isnothing(spline._chol)
error("Gram matrix was not factorized.")
end
mu = Vector{T}(undef, size(spline._gram, 1))
ldiv!(mu, spline._chol, [values; spline._compression .* d_values])
spline = NormalSpline(spline._kernel,
spline._compression,
spline._nodes,
values,
spline._d_nodes,
spline._es,
d_values,
spline._min_bound,
cleanup ? nothing : spline._gram,
cleanup ? nothing : spline._chol,
mu,
spline._cond
)
return spline
end
function _evaluate(spline::NormalSpline{T, RK},
points::Matrix{T},
do_parallel::Bool = false
) where {T <: AbstractFloat, RK <: ReproducingKernel_0}
if isnothing(spline)
error("Spline was not prepared.")
end
if isnothing(spline._mu)
error("Spline coefficients were not calculated.")
end
if size(points, 1) != size(spline._nodes, 1)
if size(points, 1) == 1 && size(points, 2) > 1
error("Incorrect first dimension of the `points` parameter (use 'evaluate_one' function for evaluating the spline at one point).")
else
error("Incorrect first dimension of the `points` parameter (the spline was built in the space of different dimension).")
end
end
n = size(spline._nodes, 1)
n_1 = size(spline._nodes, 2)
m = size(points, 2)
pts = similar(points)
@inbounds for j = 1:m
for i = 1:n
pts[i,j] = (points[i,j] - spline._min_bound[i]) / spline._compression
end
end
spline_values = Vector{T}(undef, m)
iend1 = iend2 = iend3 = 0
istart2 = istart3 = istart4 = 0
mu = spline._mu[1:n_1]
d_mu = spline._mu[(n_1 + 1):end]
if do_parallel && m >= 1000 && Threads.nthreads() >= 4
step = m ÷ 4
iend1 = 1 + step
istart2 = iend1 + 1
iend2 = istart2 + step
istart3 = iend2 + 1
iend3 = (istart3 + step) < m ? (istart3 + step) : m
istart4 = iend3 + 1
@inbounds Threads.@threads for t = 1:4
if t == 1
_do_work(1, iend1, pts, spline._nodes, mu, spline._kernel, spline_values)
if !isnothing(spline._d_nodes)
_do_work_d(1, iend1, pts, spline._d_nodes, spline._es, d_mu, spline._kernel, spline_values)
end
elseif t == 2
_do_work(istart2, iend2, pts, spline._nodes, mu, spline._kernel, spline_values)
if !isnothing(spline._d_nodes)
_do_work_d(istart2, iend2, pts, spline._d_nodes, spline._es, d_mu, spline._kernel, spline_values)
end
elseif t == 3
_do_work(istart3, iend3, pts, spline._nodes, mu, spline._kernel, spline_values)
if !isnothing(spline._d_nodes)
_do_work_d(istart3, iend3, pts, spline._d_nodes, spline._es, d_mu, spline._kernel, spline_values)
end
elseif t == 4
if istart4 <= m
_do_work(istart4, m, pts, spline._nodes, mu, spline._kernel, spline_values)
if !isnothing(spline._d_nodes)
if istart4 <= m
_do_work_d(istart4, m, pts, spline._d_nodes, spline._es, d_mu, spline._kernel, spline_values)
end
end
end
end
end
else
_do_work(1, m, pts, spline._nodes, mu, spline._kernel, spline_values)
if !isnothing(spline._d_nodes)
_do_work_d(1, m, pts, spline._d_nodes, spline._es, d_mu, spline._kernel, spline_values)
end
end
return spline_values
end
function _do_work(istart::Int,
iend::Int,
points::Matrix{T},
nodes::Matrix{T},
mu::Vector{T},
kernel::RK,
spline_values::Vector{T}
) where {T <: AbstractFloat, RK <: ReproducingKernel_0}
n_1 = size(nodes, 2)
h_values = Vector{T}(undef, n_1)
@inbounds for p = istart:iend
for i = 1:n_1
h_values[i] = _rk(kernel, points[:,p], nodes[:,i])
end
spline_values[p] = sum(mu .* h_values)
end
end
function _do_work_d(istart::Int,
iend::Int,
pts::Matrix{T},
d_nodes::Matrix{T},
es::Matrix{T},
d_mu::Vector{T},
kernel::RK,
spline_values::Vector{T}
) where {T <: AbstractFloat, RK <: ReproducingKernel_1}
n_2 = size(d_nodes, 2)
d_h_values = Vector{T}(undef, n_2)
@inbounds for p = istart:iend
for i = 1:n_2
d_h_values[i] = _∂rk_∂e(kernel, pts[:,p], d_nodes[:,i], es[:,i])
end
spline_values[p] += sum(d_mu .* d_h_values)
end
end
function _evaluate_gradient(spline::NormalSpline{T, RK},
point::Vector{T}
) where {T <: AbstractFloat, RK <: ReproducingKernel_0}
if isnothing(spline._mu)
error("Spline coefficients were not calculated.")
end
n = size(spline._nodes, 1)
n_1 = size(spline._nodes, 2)
pt = Vector{T}(point)
@inbounds for i = 1:n
pt[i] = (point[i] - spline._min_bound[i]) / spline._compression
end
d_h_values = Vector{T}(undef, n_1)
grad = Vector{T}(undef, n)
mu = spline._mu[1:n_1]
@inbounds for k = 1:n
for i = 1:n_1
d_h_values[i] = _∂rk_∂η_k(spline._kernel, pt, spline._nodes[:,i], k)
end
grad[k] = sum(mu .* d_h_values)
end
if !isnothing(spline._d_nodes)
n_2 = size(spline._d_nodes, 2)
d_h_values = Vector{T}(undef, n_2)
d_mu = spline._mu[n_1+1:end]
@inbounds for k = 1:n
for i = 1:n_2
d_h_values[i] = T(0.0)
for l = 1:n
d_h_values[i] += (_∂²rk_∂η_r_∂ξ_k(spline._kernel, pt, spline._d_nodes[:,i], k, l) * spline._es[l,i])
end
end
grad[k] += sum(d_mu .* d_h_values)
end
end
@inbounds for k = 1:n
grad[k] /= spline._compression
end
return grad
end
| [
27,
7856,
261,
480,
29,
40,
7053,
42,
22436,
14,
26447,
48523,
578,
26568,
1127,
13,
20362,
27,
34345,
29,
10677,
14,
9492,
16104,
378,
13,
20362,
27,
456,
62,
30783,
29,
16,
12,
940,
198,
8818,
4808,
46012,
533,
7,
77,
4147,
3712... | 1.692278 | 7,770 |
<filename>src/util.jl<gh_stars>10-100
Mcdf(f,fmin,fmax) = (1.0./f - 1.0/fmax) ./ (1.0/fmin - 1.0/fmax)
function selection(λ, f, tend, t1)
#define the equation for selection as above
s = (λ .* t1 + log.(f ./ (1 .- f))) ./ (λ .* (tend - t1))
return s
end
function selection2clone(λ, f1, f2, tend, t1, t2)
#define the equation for selection as above
s1 = zeros(Float64, length(f1))
s2 = zeros(Float64, length(f1))
for i in 1:length(f1)
if (f2[i] + f1[i]) < 1.0
s1[i] = (λ .* t1[i] + log.(f1[i] ./ (1 - f1[i] - f2[i]))) ./ (λ .* (tend[i] - t1[i]))
s2[i] = (λ .* t2[i] + log.(f2[i] ./ (1 - f1[i] - f2[i]))) ./ (λ .* (tend[i] - t2[i]))
else
s1[i] = (λ .* t1[i] + log.((f1[i] - f2[i]) ./ (1 - f1[i]))) ./ (λ .* (tend[i] - t1[i]))
s2[i] = (λ .* t2[i] + log.(f2[i] ./ (1 - f1[i]))) ./ (λ .* (tend[i] - t2[i]))
end
end
return s1, s2
end
function collectoutput1clone(abcres; Nmax = 10^10)
scmuts = map(x -> x.other[2], abcres.particles)
scdivs = map(x -> x.other[3], abcres.particles)
scfreq = map(x -> x.other[4], abcres.particles)
mu = abcres.parameters[:, 1]
weights = abcres.weights
# eulergamma/log(2) is stochastic correction see Durrett Branching Process Models of Cancer, needed for selection calculation
t1 = ((shuffle(scmuts) ./ mu) / (2 * log.(2))) .- MathConstants.eulergamma/log.(2) #shuffle so that t1 posterior is not correlated
tend = (log.(Nmax .* (1 .- scfreq)) / log.(2))
s = selection(log.(2), scfreq, tend, t1)
DF = DataFrame(mu = mu,
clonalmutations = abcres.parameters[:, 2],
s = s,
t = t1,
cellularity = abcres.parameters[:, 5],
frequency = scfreq,
scmuts = map(x -> Float64(x), scmuts),
weight = weights)
return DF
end
function swapvalues(x1, x2, indeces)
swap1 = x1[indeces]
swap2 = x2[indeces]
x1[indeces] = swap2
x2[indeces] = swap1
return x1, x2
end
function clonesize(scfreq1, scfreq2)
#assume that if scfreq1 + scfreq2 > 1 then clones are nested
freqfactor = zeros(Float64, length(scfreq1))
for i in 1:length(scfreq1)
if (scfreq1[i] + scfreq2[i]) .> 1.0
freqfactor[i] = 1 - scfreq1[i]
freqfactor[i] = maximum([0.001, freqfactor[i]])
else
freqfactor[i] = 1 - scfreq1[i] - scfreq2[i]
freqfactor[i] = maximum([0.001, freqfactor[i]])
end
end
return freqfactor
end
function collectoutput2clone(abcres; Nmax = 10^10)
scmuts1 = map(x -> x.other[2], abcres.particles)
scmuts2 = map(x -> x.other[3], abcres.particles)
scdivs1 = map(x -> x.other[4], abcres.particles)
scdivs2 = map(x -> x.other[5], abcres.particles)
scfreq1 = map(x -> x.other[6], abcres.particles)
scfreq2 = map(x -> x.other[7], abcres.particles)
mu = abcres.parameters[:, 1]
s1 = abcres.parameters[:, 3]
t1 = abcres.parameters[:, 4]
s2 = abcres.parameters[:, 5]
t2 = abcres.parameters[:, 6]
weights = abcres.weights
scmuts1 = map(x -> Float64(x), scmuts1)
scmuts2 = map(x -> Float64(x), scmuts2)
indeces = .!(scfreq1 .> scfreq2)
#identify subclone 1 as the highest frequency subclone
scfreq1, scfreq2 = swapvalues(scfreq1, scfreq2, indeces)
scmuts1, scmuts2 = swapvalues(scmuts1, scmuts2, indeces)
scdivs1, scdivs2 = swapvalues(scdivs1, scdivs2, indeces)
s1, s2 = swapvalues(s1, s2, indeces)
t1, t2 = swapvalues(t1, t2, indeces)
freqfactor = clonesize(scfreq1, scfreq2)
t1a = ((shuffle(scmuts1) ./ mu) / (2 * log.(2))) .- MathConstants.eulergamma/log.(2)
t1b = ((shuffle(scmuts2) ./ mu) / (2 * log.(2))) .- MathConstants.eulergamma/log.(2)
tend = (log.(Nmax .* (freqfactor)) / log.(2))
s1, s2 = selection2clone(log.(2), scfreq1, scfreq2, tend, t1a, t1b)
DF = DataFrame(mu = abcres.parameters[:, 1],
clonalmutations = abcres.parameters[:, 2],
s1 = s1,
t1 = t1a,
s2 = s2,
t2 = t1b,
cellularity = abcres.parameters[:, 7],
frequency1 = scfreq1,
frequency2 = scfreq2,
scmuts1 = scmuts1,
scmuts2 = scmuts2,
weight = weights)
return DF
end
function collectoutput0clone(abcres)
mupost = abcres.parameters[:, 1]
cmpost = abcres.parameters[:, 2]
cellularity = abcres.parameters[:, 3]
weights = abcres.weights
DFpost = DataFrame(mu = mupost,
clonalmutations = cmpost,
cellularity = cellularity,
weight = weights)
end
function cumulativedistribution(VAF; fmin = 0.1, fmax = 0.3)
#calculate cumulative sum
steps = fmax:-0.001:fmin
cumsum = Array{Int64}(0)
v = Array{Float64}(0)
for i in steps
push!(cumsum, sum(VAF .>= i))
push!(v, i)
end
cumsum = cumsum - cumsum[1]
DF = DataFrame(cumsum = map(Float64, cumsum), v = v)
DF[:invf] = 1 ./ DF[:v] - 1 ./ fmax
DF[:theory] = Mcdf(DF[:v], fmin, fmax)
DF[:normalized] = DF[:cumsum] ./ maximum(DF[:cumsum])
lmfit = fit(LinearModel, @formula(cumsum ~ invf + 0), DF)
DF[:prediction] = predict(lmfit)
return CancerSeqSim.AnalysedData(DF, VAF)
end
function averagehistogram(particles, model, VAF)
x = 0.005:0.01:1.005
y = fit(Histogram, VAF, x, closed=:right)
DFhist = DataFrame(VAF = x[1:end-1], freq = y.weights)
particles = particles[map(x -> x.model, particles).==model]
wts = map(x -> x.weight, particles)
N = length(particles)
M = zeros(Int64, 100, N)
i = 1
for j in 1:N
M[:, i] = convert(Array, particles[i].other[1][:freq])
i = i + 1
end
mvalues = Float64[]
meanvalues = Float64[]
lquant = Float64[]
lquart = Float64[]
uquant = Float64[]
uquart = Float64[]
for i in 1:size(M, 1)
push!(mvalues, median(vec(collect(M[i, :]')), weights(wts)))
push!(meanvalues, mean(vec(collect(M[i, :]')), weights(wts)))
push!(lquant, quantile(vec(collect(M[i, :]')), weights(wts), 0.025))
push!(uquant, quantile(vec(collect(M[i, :]')), weights(wts), 0.975))
push!(lquart, quantile(vec(collect(M[i, :]')), weights(wts), 0.25))
push!(uquart, quantile(vec(collect(M[i, :]')), weights(wts), 0.75))
end
DFr = DataFrame(median = mvalues,
mean = meanvalues,
lowerq95 = lquant,
upperq95 = uquant,
lowerquartile = lquart,
upperquartile = uquart,
VAF = DFhist[:VAF],
truecounts = DFhist[:freq])
return DFr
end
function saveresults(res::Results; resultsdirectory = "output")
makedirectories(joinpath(resultsdirectory, res.SampleName, "finalpopulation"))
getresults(res.ABCresults, resultsdirectory, res.SampleName, res.VAF, save = true)
return
end
function getresults(abcres, resultsdirectory, sname, VAF; save = false, Nmaxinf = 10^10, savepopulations = false, popnum = 1)
if savepopulations
resultsdirectory = joinpath(resultsdirectory, sname, "populations", "population_$popnum")
else
resultsdirectory = joinpath(resultsdirectory, sname, "finalpopulation")
end
posteriors = Posterior[]
#save model posterior
DFmp = DataFrame(Model = map(x -> string(x),0:length(abcres.modelprob) - 1), Probability = abcres.modelprob)
if abcres.modelprob[1] > 0.0
DFpost0 = collectoutput0clone(getmodel(abcres, 1))
DFr = averagehistogram(abcres.particles, 1, VAF)
if save == true
CSV.write(joinpath(resultsdirectory, "posterior", "$(sname)-parameters-clone0.csv"), DFpost0)
CSV.write(joinpath(resultsdirectory, "posterior", "$(sname)-histogram-clone0.csv"), DFr)
end
push!(posteriors, Posterior(DFr, DFpost0, abcres.modelprob[1]))
else
push!(posteriors, Posterior(DataFrame(), DataFrame(), abcres.modelprob[1]))
end
if abcres.modelprob[2] > 0.0
DFpost1 = collectoutput1clone(getmodel(abcres, 2), Nmax = Nmaxinf)
DFr = averagehistogram(abcres.particles, 2, VAF)
if save == true
CSV.write(joinpath(resultsdirectory, "posterior", "$(sname)-histogram-clone1.csv"), DFr)
CSV.write(joinpath(resultsdirectory, "posterior", "$(sname)-parameters-clone1.csv"), DFpost1)
end
push!(posteriors, Posterior(DFr, DFpost1, abcres.modelprob[2]))
else
push!(posteriors, Posterior(DataFrame(), DataFrame(), abcres.modelprob[2]))
end
if (length(abcres.modelprob) > 2) && (abcres.modelprob[3] > 0.0)
DFpost2 = collectoutput2clone(getmodel(abcres, 3), Nmax = Nmaxinf)
DFr = averagehistogram(abcres.particles, 3, VAF)
if save == true
CSV.write(joinpath(resultsdirectory, "posterior", "$(sname)-parameters-clone2.csv"), DFpost2)
CSV.write(joinpath(resultsdirectory, "posterior", "$(sname)-histogram-clone2.csv"), DFr)
end
push!(posteriors, Posterior(DFr, DFpost2, abcres.modelprob[3]))
else
if (length(abcres.modelprob) > 2)
push!(posteriors, Posterior(DataFrame(), DataFrame(), abcres.modelprob[3]))
end
end
DF = DataFrame(Model = map(x -> string(x),0:length(abcres.modelprob) - 1), Probability = abcres.modelprob)
if save == true
CSV.write(joinpath(resultsdirectory, "posterior", "$(sname)-modelprobabilities.csv"), DF)
end
return posteriors, DFmp
end
function makedirectories(resultsdirectory)
if isdir(resultsdirectory) == false
mkpath(resultsdirectory)
end
if isdir(joinpath(resultsdirectory, "plots")) == false
mkpath(joinpath(resultsdirectory, "plots"))
end
if isdir(joinpath(resultsdirectory, "processed")) == false
mkpath(joinpath(resultsdirectory, "processed"))
end
if isdir(joinpath(resultsdirectory, "posterior")) == false
mkpath(joinpath(resultsdirectory, "posterior"))
end
if isdir(joinpath(resultsdirectory, "data")) == false
mkpath(joinpath(resultsdirectory, "data"))
end
end
function makeplotsdirectories(resultsdirectory)
if isdir(joinpath(resultsdirectory)) == false
mkpath(joinpath(resultsdirectory))
end
if isdir(joinpath(resultsdirectory, "plots")) == false
mkpath(joinpath(resultsdirectory, "plots"))
end
end
function makedirectory(resultsdirectory)
if isdir(joinpath(resultsdirectory)) == false
mkpath(joinpath(resultsdirectory))
end
end
function show(io::IO, res::Results)
show(res.ABCresults)
end
function show(io::IO, ABCresults::ApproxBayes.ABCSMCmodelresults)
posteriors, DFmp = getresults(ABCresults, "nothing", "nothing", [1.0, 2.0]; save = false, Nmaxinf = ABCresults.setup.other)
@printf("Total number of simulations: %.2e\n", sum(ABCresults.numsims))
println("Cumulative number of simulations = $(cumsum(ABCresults.numsims))")
@printf("Acceptance ratio: %.2e\n\n", ABCresults.accratio)
println("Tolerance schedule = $(round.(ABCresults.ϵ, digits = 2))\n")
print("Model probabilities:\n")
for j in 1:length(ABCresults.modelprob)
@printf("\tModel %d (%d subclones): %.3f\n", j, j-1, ABCresults.modelprob[j])
end
print("\nParameters:\n\n")
for j in 1:length(ABCresults.parameters)
if ABCresults.modelprob[j] > 0.0
print("Model $j ($(j-1) subclones)\n")
upperci = zeros(Float64, size(posteriors[j].Parameters, 2) - 1)
lowerci = zeros(Float64, size(posteriors[j].Parameters, 2) - 1)
parametermeans = zeros(Float64, size(posteriors[j].Parameters, 2) - 1)
parametermedians = zeros(Float64, size(posteriors[j].Parameters, 2) - 1)
for i in 1:(size(posteriors[j].Parameters, 2) - 1)
parametermeans[i] = mean(posteriors[j].Parameters[:, i],
weights(posteriors[j].Parameters[:weight]))
parametermedians[i] = median(posteriors[j].Parameters[:, i],
weights(posteriors[j].Parameters[:weight]))
(lowerci[i], upperci[i]) = quantile(posteriors[j].Parameters[:, i],
weights(posteriors[j].Parameters[:weight]), [0.025,0.975])
end
print("\tMedian (95% intervals):\n")
for i in 1:length(parametermeans)
if j == 1
parameternames = ["μ/β", "Clonal Mutations", "Cellularity"]
elseif j == 2
parameternames = ["μ/β", "Clonal Mutations", "Fitness", "Time (tumour doublings)", "Cellularity", "Subclone Frequency", "Subclone Mutations"]
elseif j == 3
parameternames = ["μ/β", "Clonal Mutations", "Fitness - Subclone 1", "Time (tumour doublings) - Subclone 1", "Fitness - Subclone 2", "Time (tumour doublings) - Subclone 2", "Cellularity", "Subclone 1 Frequency", "Subclone 2 Frequency", "Subclone 1 Mutations", "Subclone 2 Mutations"]
end
@printf("\tParameter %d - %s: %.2f (%.2f,%.2f)\n", i, parameternames[i], parametermedians[i], lowerci[i], upperci[i])
end
end
end
end
| [
27,
34345,
29,
10677,
14,
22602,
13,
20362,
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
9742,
7568,
7,
69,
11,
69,
1084,
11,
69,
9806,
8,
796,
357,
16,
13,
15,
19571,
69,
532,
352,
13,
15,
14,
69,
9806,
8,
24457,
357,
16,
13... | 2.286129 | 5,508 |
<gh_stars>0
# <NAME>, 2022
# Codes for chapter 11
# Code for section 11.1
# deserialization of source data frame
using DataFrames
using Serialization
walk = deserialize("walk.bin")
# Code for a note on conversion
x = [1.5]
x[1] = 1
x
# Code from section 11.1.1
Matrix(walk)
Matrix{Any}(walk)
Matrix{String}(walk)
using Plots
plot(walk)
plot(Matrix(walk); labels=["x" "y"] , legend=:topleft)
# Code from section 11.1.2
Tables.columntable(walk)
using BenchmarkTools
function mysum(table)
s = 0 #A
for v in table.x #B
s += v
end
return s
end
df = DataFrame(x=1:1_000_000);
@btime mysum($df)
tab = Tables.columntable(df);
@btime mysum($tab)
@code_warntype mysum(df)
@code_warntype mysum(tab)
typeof(tab)
function barrier_mysum2(x)
s = 0
for v in x
s += v
end
return s
end
mysum2(table) = barrier_mysum2(table.x)
@btime mysum2($df)
df = DataFrame(a=[1, 1, 2], b=[1, 1, 2])
unique(df)
tab = Tables.columntable(df)
unique(tab)
# Code from section 11.1.3
Tables.rowtable(walk)
nti = Tables.namedtupleiterator(walk)
for v in nti
println(v)
end
er = eachrow(walk)
er[1]
er[end]
ec = eachcol(walk)
ec[1]
ec[end]
identity.(eachcol(walk))
df = DataFrame(x=1:2, b=["a", "b"])
identity.(eachcol(df))
# Code from section 11.2
using CSV
raw_data = """
city,date,rainfall
Olecko,2020-11-16,2.9
Olecko,2020-11-17,4.1
Olecko,2020-11-19,4.3
Olecko,2020-11-20,2.0
Olecko,2020-11-21,0.6
Olecko,2020-11-22,1.0
Ełk,2020-11-16,3.9
Ełk,2020-11-19,1.2
Ełk,2020-11-20,2.0
Ełk,2020-11-22,2.0
""";
rainfall_df = CSV.read(IOBuffer(raw_data), DataFrame)
gdf_city = groupby(rainfall_df, "city")
gdf_city_date = groupby(rainfall_df, Not("rainfall"))
keys(gdf_city_date)
gk1 = keys(gdf_city_date)[1]
g1_t = Tuple(gk1)
g1_nt = NamedTuple(gk1)
g1_dict = Dict(gk1)
gdf_city_date[1]
gdf_city_date[gk1]
gdf_city_date[g1_t]
gdf_city_date[g1_nt]
gdf_city_date[g1_dict]
gdf_city[("Olecko",)]
gdf_city[(city="Olecko",)]
using BenchmarkTools
bench_df = DataFrame(id=1:10^8);
bench_gdf = groupby(bench_df, :id);
@btime groupby($bench_df, :id);
bench_i = 1_000_000;
bench_gk = keys(bench_gdf)[bench_i];
bench_t = Tuple(bench_gk);
bench_nt = NamedTuple(bench_gk);
bench_dict = Dict(bench_gk);
@btime $bench_gdf[$bench_i];
@btime $bench_gdf[$bench_gk];
@btime $bench_gdf[$bench_t];
@btime $bench_gdf[$bench_nt];
@btime $bench_gdf[$bench_dict];
gdf_city[[2, 1]]
gdf_city[[1]]
[nrow(df) for df in gdf_city]
for p in pairs(gdf_city)
println(p)
end
Dict(key.city => nrow(df) for (key, df) in pairs(gdf_city))
combine(gdf_city, nrow)
| [
27,
456,
62,
30783,
29,
15,
198,
2,
1279,
20608,
22330,
33160,
198,
198,
2,
44380,
329,
6843,
1367,
198,
198,
2,
6127,
329,
2665,
1367,
13,
16,
198,
198,
2,
748,
48499,
1634,
286,
2723,
1366,
5739,
198,
198,
3500,
6060,
35439,
198... | 2.031832 | 1,288 |
#useful functions that do no directly call C code
function getAllVertCoords()
numV = num_entities[1]
vertCoords = zeros(3, numV) # storage for all vertex coordinates
coords_tmp = zeros(3,1) # temporary storage for vetex coordinates
for i=1:numV
apf.getVertCoords(coords_tmp, 3, 1)
vertCoords[:, i] = coords_tmp
incrementVertIt()
end
resetVertIt()
return vertCoords
end
function makeSBPMatrix()
# make matrix needed for summation by parts
numEl = num_entities[4] # number of elements
numPerEl = downward_counts[4, 1] # nodes per element
coords_tmp = zeros(3, numPerEl)
sbpMatrix = zeros(3, numPerEl, numEl)
for i = 1:numEl
apf.getElCoords( coords_tmp, 3, numPerEl)
sbpMatrix[:, :, i] = coords_tmp
incrementElIt()
end
resetElIt()
return sbpMatrix
end
function incrementIt(dim::Int)
# increment an iterator of a given dimension
if (dim == 0)
incrementVertIt()
elseif (dim==1)
incrementEdgeIt()
elseif (dim == 2)
incrementElIt()
elseif (dim == 3)
incrementElIt()
end
end # end function
| [
2,
1904,
913,
5499,
326,
466,
645,
3264,
869,
327,
2438,
198,
198,
8818,
651,
3237,
42369,
7222,
3669,
3419,
198,
22510,
53,
796,
997,
62,
298,
871,
58,
16,
60,
198,
198,
1851,
7222,
3669,
796,
1976,
27498,
7,
18,
11,
997,
53,
8... | 2.672727 | 385 |
const hotkeys = Hotkey[
Hotkey(
"ctrl + alt + shift + s",
SettingsWindow.showSettings
),
Hotkey(
"ctrl + shift + s",
showFileSaveDialog
),
Hotkey(
"ctrl + s",
menuFileSave
),
Hotkey(
"ctrl + o",
showFileOpenDialog
),
Hotkey(
"ctrl + n",
createNewMap
),
Hotkey(
"ctrl + m",
MetadataWindow.configureMetadata
),
Hotkey(
"ctrl + shift + t",
RoomWindow.configureRoom
),
Hotkey(
"ctrl + t",
RoomWindow.createRoom
),
Hotkey(
"ctrl + shift + z",
History.redo!
),
Hotkey(
"ctrl + z",
History.undo!
),
Hotkey(
"ctrl + f",
focusFilterEntry!
),
Hotkey(
"ctrl + plus",
zoomIn!
),
Hotkey(
"ctrl + minus",
zoomOut!
)
] | [
9979,
3024,
13083,
796,
6964,
2539,
58,
198,
220,
220,
220,
6964,
2539,
7,
198,
220,
220,
220,
220,
220,
220,
220,
366,
44755,
1343,
5988,
1343,
6482,
1343,
264,
1600,
198,
220,
220,
220,
220,
220,
220,
220,
16163,
27703,
13,
12860,... | 1.815476 | 504 |
<reponame>agdestein/DiscreteFiltering.jl<filename>src/filter/filter.jl
"""
Abstract continuous filter.
"""
abstract type Filter end
"""
IdentityFilter()
Identity filter, which does not filter.
"""
struct IdentityFilter <: Filter end
"""
TopHatFilter(width)
Top hat filter, parameterized by a variable filter width.
"""
struct TopHatFilter <: Filter
width::Function
end
"""
ConvolutionalFilter(kernel)
Convolutional filter, parameterized by a filter kernel.
"""
struct ConvolutionalFilter <: Filter
width::Function
kernel::Function
end
"""
GaussianFilter(h, σ) -> ConvolutionalFilter
GaussianFilter(σ) -> ConvolutionalFilter
Create Gaussian ConvolutionalFilter with domain width `2h` and variance `σ^2`.
"""
GaussianFilter(h, σ) = ConvolutionalFilter(h, x -> 1 / √(2π * σ^2) * exp(-x^2 / 2σ^2))
GaussianFilter(σ) = GaussianFilter(x -> 10σ, σ)
| [
27,
7856,
261,
480,
29,
363,
16520,
68,
259,
14,
15642,
8374,
11928,
20212,
13,
20362,
27,
34345,
29,
10677,
14,
24455,
14,
24455,
13,
20362,
198,
37811,
198,
23839,
12948,
8106,
13,
198,
37811,
198,
397,
8709,
2099,
25853,
886,
628,
... | 2.972881 | 295 |
# Converts a String to Languages.Language (using STR_TO_LANG)
convert(::Type{L}, lang::S) where {L<:Languages.Language, S<:AbstractString} = begin
TypeLang = get(STR_TO_LANG, strip(lowercase(lang)), Languages.English)
return TypeLang()
end
# Converts Languages.Language to String (using LANG_TO_STR)
convert(::Type{S}, lang::Type{L}) where {L<:Languages.Language, S<:AbstractString} =
get(LANG_TO_STR, lang, string(L))
convert(::Type{S}, lang::L) where {L<:Languages.Language, S<:AbstractString} =
convert(S, L)
# Overload ismatch to work matching any value within a vector
occursin(r::Regex, strings::T) where T<:AbstractArray{<:AbstractString} =
any(occursin(r, si) for si in sv);
# Overload lowercase function to work with vectors of strings
lowercase(v::T) where T<:AbstractArray{S} where S<:AbstractString =
Base.lowercase.(v)
"""
detect_language(text [; default=DEFAULT_LANGUAGE])
Detects the language of a piece of `text`. Returns a language of
type `Languages.Language`. If the text is empty of the confidence
is low, return the `default` language.
"""
function detect_language(text::AbstractString; default=DEFAULT_LANGUAGE)
isempty(text) && return default
detector = LanguageDetector()
l, _, c = detector(text) # returns (language, script, confidence)
if c < 0.15
return default()
else
return l
end
end
"""
summarize(sentences [;ns=1, flags=DEFAULT_SUMMARIZATION_STRIP_FLAGS])
Build a summary of the text's `sentences`. The resulting summary will be
a `ns` sentence document; each sentence is pre-procesed using the
`flags` option.
"""
function summarize(sentences::Vector{S};
ns::Int=1,
flags::UInt32=DEFAULT_SUMMARIZATION_STRIP_FLAGS
) where S<:AbstractString
# Get document term matrix
s = prepare.(sentences, flags)
filter!(d->occursin(r"[a-zA-Z0-9]", d), s)
t = dtm(DocumentTermMatrix{Float32}(s))
tf_idf!(t)
# Page rank
α = 0.85 # damping factor
n = 100 # number of iterations
ϵ = 1.0e-6 # convergence threhshold
G = Graph(t' * t)
try
p = pagerank(G, α, n, ϵ)
# Sort sentences and return
text_summary = sentences[sort(sortperm(p, rev=true)[1:min(ns, length(p))])]
return text_summary
catch
@warn "Summarization failed during TextRank. No summarization done."
return sentences
end
end
"""
Post-processes a string to fit a certain length, adding … if necessary
at the end of its choped represenation.
"""
function chop_to_length(input, len)
input = replace(input, "\n" => "")
idxs = collect(eachindex(input))
_idx = findlast(Base.:<=(len), idxs)
if _idx == nothing
_len=0
else
_len = idxs[findlast(Base.:<=(len), idxs)]
end
length(input) > len ? input[1:_len] * "…" : input
end
| [
2,
1482,
24040,
257,
10903,
284,
42860,
13,
32065,
357,
3500,
19269,
62,
10468,
62,
43,
15567,
8,
198,
1102,
1851,
7,
3712,
6030,
90,
43,
5512,
42392,
3712,
50,
8,
810,
1391,
43,
27,
25,
43,
33213,
13,
32065,
11,
311,
27,
25,
23... | 2.527972 | 1,144 |
<reponame>mforets/NeuralVerification.jl
"""
Planet(optimizer, eager::Bool)
Planet integrates a SAT solver (`PicoSAT.jl`) to find an activation pattern that maps a feasible input to an infeasible output.
# Problem requirement
1. Network: any depth, ReLU activation
2. Input: hyperrectangle or bounded hpolytope
3. Output: PolytopeComplement
# Return
`BasicResult`
# Method
Binary search of activations (0/1) and pruning by optimization. Our implementation is non eager.
- `optimizer` default `GLPKSolverMIP()`;
- `eager` default `false`;
# Property
Sound and complete.
# Reference
[<NAME>, "Formal Verification of Piece-Wise Linear Feed-Forward Neural Networks,"
in *International Symposium on Automated Technology for Verification and Analysis*, 2017.](https://arxiv.org/abs/1705.01320)
[https://github.com/progirep/planet](https://github.com/progirep/planet)
"""
@with_kw struct Planet <: Solver
optimizer = GLPK.Optimizer
eager::Bool = false
end
function solve(solver::Planet, problem::Problem)
@assert ~solver.eager "Eager implementation not supported yet"
isbounded(problem.input) || UnboundedInputError("Planet does not accept unbounded input sets.")
# Refine bounds. The bounds are values after activation
status, bounds = tighten_bounds(problem, solver.optimizer)
status == OPTIMAL || return CounterExampleResult(:holds)
ψ = init_ψ(problem.network, bounds)
δ = PicoSAT.solve(ψ)
opt = solver.optimizer
# Main loop to compute the SAT problem
while δ != :unsatisfiable
status, conflict = elastic_filtering(problem, δ, bounds, opt)
status == INFEASIBLE || return CounterExampleResult(:violated, conflict)
push!(ψ, conflict)
δ = PicoSAT.solve(ψ)
end
return CounterExampleResult(:holds)
end
function init_ψ(nnet::Network, bounds::Vector{Hyperrectangle})
ψ = Vector{Vector{Int64}}()
index = 0
for i in 1:length(bounds)-1
index = set_activation_pattern!(ψ, nnet.layers[i], bounds[i], index)
end
return ψ
end
function set_activation_pattern!(ψ::Vector{Vector{Int64}}, L::Layer{ReLU}, bound::Hyperrectangle, index::Int64)
before_act_bound = approximate_affine_map(L, bound)
lower = low(before_act_bound)
upper = high(before_act_bound)
for j in 1:length(lower)
index += 1
lower[j] > 0 && push!(ψ, [index])
upper[j] < 0 && push!(ψ, [-index])
lower[j] <= 0 <= upper[j] && push!(ψ, [index, -index])
end
return index
end
function set_activation_pattern!(ψ::Vector{Vector{Int64}}, L::Layer{Id}, bound::Hyperrectangle, index::Int64)
n = n_nodes(L)
for j in 1:n
index += 1
push!(ψ, [index])
end
return index
end
function elastic_filtering(problem::Problem, δ::Vector{Vector{Bool}}, bounds::Vector{Hyperrectangle}, optimizer)
model = Model(optimizer)
model[:bounds] = bounds
model[:δ] = δ
z = init_vars(model, problem.network, :z, with_input=true)
slack = init_vars(model, problem.network, :slack)
add_set_constraint!(model, problem.input, first(z))
add_complementary_set_constraint!(model, problem.output, last(z))
encode_network!(model, problem.network, TriangularRelaxedLP())
encode_network!(model, problem.network, SlackLP())
min_sum!(model, slack)
conflict = Vector{Int64}()
act = get_activation(problem.network, bounds)
while true
optimize!(model)
termination_status(model) == OPTIMAL || return (INFEASIBLE, conflict)
(m, index) = max_slack(slack, act)
m > 0.0 || return (:Feasible, value(first(z)))
# activated z get a factor of (-1)
coeff = δ[index[1]][index[2]] ? -1 : 1
node = coeff * get_node_id(problem.network, index)
push!(conflict, node)
@constraint(model, slack[index[1]][index[2]] == 0.0)
end
end
function elastic_filtering(problem::Problem,
list::Vector{Int64},
bounds::Vector{Hyperrectangle},
optimizer)
return elastic_filtering(problem,
get_assignment(problem.network, list),
bounds,
optimizer)
end
function max_slack(x::Vector{<:Vector}, act)
m = 0.0
index = (0, 0)
for i in 1:length(x), j in 1:length(x[i])
if act[i][j] == 0 # Only return undetermined nodes
val = value(x[i][j])
if val > m
m = val
index = (i, j)
end
end
end
return (m, index)
end
# Only use tighten_bounds for feasibility check
function tighten_bounds(problem::Problem, optimizer)
model = Model(optimizer)
model[:bounds] = bounds = get_bounds(problem)
z = init_vars(model, problem.network, :z, with_input=true)
add_set_constraint!(model, problem.input, first(z))
add_complementary_set_constraint!(model, problem.output, last(z))
encode_network!(model, problem.network, TriangularRelaxedLP())
new_bounds = Vector{Hyperrectangle}(undef, length(z))
for i in 1:length(z)
lower = low(bounds[i])
upper = high(bounds[i])
for j in 1:length(z[i])
zᵢⱼ = z[i][j]
@objective(model, Min, zᵢⱼ)
optimize!(model)
termination_status(model) == OPTIMAL || return (INFEASIBLE, bounds)
lower[j] = value(zᵢⱼ)
@objective(model, Max, zᵢⱼ)
optimize!(model)
upper[j] = value(zᵢⱼ)
end
new_bounds[i] = Hyperrectangle(low = lower, high = upper)
end
return (OPTIMAL, new_bounds)
end
function get_assignment(nnet::Network, list::Vector{Int64})
p = Vector{Vector{Bool}}(undef, length(nnet.layers))
ℓ_start = 1
for (i, layer) in enumerate(nnet.layers)
ℓ_next = ℓ_start + n_nodes(layer)
p[i] = get_assignment(layer, list[ℓ_start:ℓ_next-1])
ℓ_start = ℓ_next
end
return p
end
get_assignment(L::Layer{ReLU}, list::Vector{Int64}) = list .> 0
get_assignment(L::Layer{Id}, list::Vector{Int64}) = trues(length(list))
function get_node_id(nnet::Network, x::Tuple{Int64, Int64})
# All the nodes in the previous layers
n = sum(n_nodes.(nnet.layers[1:x[1]-1]))
# Plus the previous nodes in the current layer
return n + x[2]
end
# NOTE: not used -
# function get_node_id(nnet::Network, n::Int64)
# i = 0
# j = n
# while j > length(nnet.layers[i+1].bias)
# i += 1
# j = j - length(nnet.layers[i].bias)
# end
# return (i, j)
# end | [
27,
7856,
261,
480,
29,
76,
754,
912,
14,
8199,
1523,
13414,
2649,
13,
20362,
198,
37811,
198,
220,
220,
220,
11397,
7,
40085,
7509,
11,
11069,
3712,
33,
970,
8,
198,
198,
41801,
48105,
257,
29020,
1540,
332,
357,
63,
47,
3713,
50... | 2.290896 | 2,867 |
<filename>Julia-Packages/Plot3D/src/Face.jl<gh_stars>10-100
## Code dealing with Face
mutable struct Face
nvertex::Int64
X::Array{Float64,1}
Y::Array{Float64,1}
Z::Array{Float64,1}
I::Array{Int64,1}
J::Array{Int64,1}
K::Array{Int64,1}
end
"""Default Constructor for Face
"""
function Face()
return Face(0,Float64[], Float64[],Float64[],Int64[],Int64[],Int64[])
end
""" Add a vertex to a face
Args
f: Face
x: x-vertex
y: y-vertex
z: z-vertex
i: index of x
j: index of y
k: index of z
"""
function add_face_vertex(f::Face, x::Float64, y::Float64, z::Float64,i::Int64,j::Int64,k::Int64)
push!(f.X,x)
push!(f.Y,y)
push!(f.Z,z)
push!(f.I,i)
push!(f.J,j)
push!(f.K,k)
f.nvertex+=1
end
""" Check to see if two faces are the same by matching vertices
Args
f: another face
Returns
match_indices e.g. [[1,2],[1,4]] Face 1 matches Face 2, Face 1 matches Face 4
"""
function match_face_indicies(f1::Face,f2::Face)
tol = 1E-6
matchedIndices = Any[]
for i in 1:f1.nvertex
for j in 1:f1.nvertex
dx = abs(f1.X[i] - f2.X[j])
dy = abs(f1.Y[i] - f2.Y[j])
dz = abs(f1.Z[i] - f2.Z[j])
if ((dx<tol) && (dy<tol) && (dz<tol))
push!(matchedIndices,[i j])
end
end
end
return matchedIndices
end
Base.:(==)(lhs::Face, rhs::Face) = (length(match_face_indicies(lhs,rhs)) == rhs.nvertex)
Base.:(!=)(lhs::Face, rhs::Face) = (length(match_face_indicies(lhs,rhs)) != lhs.nvertex)
## End Face | [
27,
34345,
29,
16980,
544,
12,
11869,
1095,
14,
43328,
18,
35,
14,
10677,
14,
32388,
13,
20362,
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
2235,
6127,
7219,
351,
15399,
220,
198,
76,
18187,
2878,
15399,
198,
220,
220,
220,
299,
3... | 1.890698 | 860 |
"""
reflect(v, n)
Reflect direction `v` at plane with normal `n`.
"""
function reflect(v, n)
@assert(abs(norm(n) - 1) < 1e-11, "surface normal must be normalized")
return v - 2*dot(v, n)*n
end
"""
refract(v, n, ni_over_nt)
Compute direction of refracted ray according to Snell's law,
or return `nothing` if no solution exists.
"""
function refract(v, n, ni_over_nt)
@assert(abs(norm(v) - 1) < 1e-11, "input ray direction must be normalized")
@assert(abs(norm(n) - 1) < 1e-11, "surface normal must be normalized")
dt = dot(v, n)
discriminant = 1 - ni_over_nt^2 * (1 - dt^2)
if discriminant > 0
return ni_over_nt*(v - n*dt) - sqrt(discriminant)*n
else
return nothing
end
end
"""
schlick(cosine, ref_idx)
Schlick's approximation of specular reflection coefficient.
"""
function schlick(cosine, ref_idx)
r0 = ((1 - ref_idx) / (1 + ref_idx))^2
return r0 + (1 - r0) * (1 - cosine)^5
end
abstract type Material end
"""
Lambertian surface (ideal diffusive reflection),
specified by albedo (reflectance) per color channel.
"""
struct Lambertian <: Material
albedo::Vector
end
"""
scatter(lambertian, ray, rec)
Compute scattered ray and color attenuation factors for a ray hitting a lambertian surface.
"""
function scatter(lambertian::Lambertian, ray::Ray, rec::HitRecord)
scattered = Ray(rec.point, rec.normal + random_in_unit_sphere())
return (scattered, lambertian.albedo)
end
"""
Metal surface, specified by albedo (reflectance) per color channel
and fuzziness factor (scales random additive permutation of reflected ray).
"""
struct Metal <: Material
# reflectance per color channel
albedo::Vector
# fuzziness factor
fuzz::Real
Metal(albedo, fuzz) = new(albedo, min(fuzz, 1))
end
"""
scatter(metal, ray, rec)
Compute scattered ray and color attenuation factors for a ray hitting a metal surface.
"""
function scatter(metal::Metal, ray::Ray, rec::HitRecord)
nraydir = unitvector(ray.direction)
reflected = reflect(nraydir, rec.normal)
scattered = Ray(rec.point, reflected + metal.fuzz*random_in_unit_sphere())
if dot(scattered.direction, rec.normal) > 0
return (scattered, metal.albedo)
else
return (nothing, metal.albedo)
end
end
"""
Dielectric surface, specified by ratio of the indices of refraction.
"""
struct Dielectric <: Material
ref_idx::Real
end
"""
scatter(dielectric, ray, rec)
Compute scattered ray and color attenuation factors for a ray hitting a dielectric surface material.
"""
function scatter(dielectric::Dielectric, ray::Ray, rec::HitRecord)
# normalized ray direction
nraydir = unitvector(ray.direction)
reflected = reflect(nraydir, rec.normal)
cosine = dot(nraydir, rec.normal)
if cosine > 0
refracted = refract(nraydir, -rec.normal, dielectric.ref_idx)
else
refracted = refract(nraydir, rec.normal, 1 / dielectric.ref_idx)
cosine = -cosine
end
if refracted != nothing
reflect_prob = schlick(cosine, dielectric.ref_idx)
else
reflect_prob = 1
end
# randomly choose between reflection or refraction
if rand() < reflect_prob
return (Ray(rec.point, reflected), ones(3))
else
return (Ray(rec.point, refracted), ones(3))
end
end
| [
198,
37811,
198,
220,
220,
220,
4079,
7,
85,
11,
299,
8,
198,
198,
8134,
801,
4571,
4600,
85,
63,
379,
6614,
351,
3487,
4600,
77,
44646,
198,
37811,
198,
8818,
4079,
7,
85,
11,
299,
8,
198,
220,
220,
220,
2488,
30493,
7,
8937,
... | 2.662679 | 1,254 |
module SparseIR
import PyCall: pyimport, PyNULL, PyVector, PyObject
const sparse_ir = PyNULL()
const pyspr = PyNULL()
const pyaugment = PyNULL()
const pysampling = PyNULL()
function __init__()
copy!(sparse_ir, pyimport_conda("sparse_ir", "sparse-ir", "spm-lab"))
copy!(pyspr, pyimport("sparse_ir.spr"))
copy!(pyaugment, pyimport("sparse_ir.augment"))
copy!(pysampling, pyimport("sparse_ir.sampling"))
return check_version_sparse_ir()
end
include("backend.jl")
include("types.jl")
include("kernel.jl")
include("poly.jl")
include("basis.jl")
include("augment.jl")
include("composite.jl")
include("sampling.jl")
include("spr.jl")
include("basis_set.jl")
include("exports.jl")
end
| [
21412,
1338,
17208,
4663,
198,
198,
11748,
9485,
14134,
25,
12972,
11748,
11,
9485,
33991,
11,
9485,
38469,
11,
9485,
10267,
198,
198,
9979,
29877,
62,
343,
796,
9485,
33991,
3419,
198,
9979,
279,
893,
1050,
796,
9485,
33991,
3419,
198,... | 2.552727 | 275 |
<gh_stars>0
#= Instructions:
- Pkg.add("PkgBenchmark.jl")
- using PkgBenchmark
- results = benchmarkpkg("IntervalArithmetic")
- showall(results)
- results = judge("IntervalArithmetic", "v0.9.1") # compare current version to that tag
- showall(results)
=#
using IntervalArithmetic
@benchgroup "Constructors" begin
@bench "Interval" Interval(1, 2)
end
@benchgroup "@interval" begin
@bench "@interval" @interval(0.1)
@bench "pi" @interval(pi)
@bench "expression" @interval(sin(0.1) + cos(0.2))
end
@benchgroup "Arithmetic" begin
a = Interval(1, 2)
b = Interval(3, 4)
for op in (+, -, *, /)
@bench string(op) $(op)($a, $b)
end
end
@benchgroup "Elementary functions" begin
for op in (exp, log, sin, tan)
@bench string(op) $(op)($a)
end
end
@benchgroup "Sum" begin
sum1(N) = sum(Interval(i, i+1) for i in 1:N)
sum2(N) = (one = Interval(1.0); sum(one / (i^2) for i in 1:N) )
@bench "Sum1" sum1(1000)
@bench "Sum2" sum2(1000)
end
| [
27,
456,
62,
30783,
29,
15,
198,
2,
28,
27759,
25,
198,
198,
12,
350,
10025,
13,
2860,
7203,
47,
10025,
44199,
4102,
13,
20362,
4943,
198,
12,
1262,
350,
10025,
44199,
4102,
198,
198,
12,
2482,
796,
18335,
35339,
7203,
9492,
2100,
... | 2.300683 | 439 |
<reponame>gottacatchenall/DynamicGrids.jl
# Sequence rules over the [`SimData`](@ref) object,
# calling [`maprule!`](@ref) for each individual `Rule`.
function sequencerules!(simdata::AbstractSimData)
newsimdata = sequencerules!(simdata, rules(simdata))
_maybemask!(grids(newsimdata))
newsimdata
end
function sequencerules!(simdata::AbstractSimData, rules::Tuple)
# Mask writes to dest if a mask is provided, except for
# CellRule which doesn't move values into masked areas
rule = rules[1]
rest = tail(rules)
# Run the first rules
newsimdata = maprule!(simdata, rule)
# Run the rest of the rules recursively
sequencerules!(newsimdata, rest)
end
sequencerules!(simdata::AbstractSimData, rules::Tuple{}) = simdata
| [
27,
7856,
261,
480,
29,
70,
1252,
330,
265,
6607,
439,
14,
44090,
8642,
2340,
13,
20362,
198,
2,
45835,
3173,
625,
262,
685,
63,
8890,
6601,
63,
16151,
31,
5420,
8,
2134,
11,
198,
2,
4585,
685,
63,
8899,
25135,
0,
63,
16151,
31,... | 2.845865 | 266 |
<gh_stars>0
module InterfaceSymbolicUtilsModule
using SymbolicUtils
import ..CoreModule: CONST_TYPE, Node, Options
import ..UtilsModule: isgood, isbad, @return_on_false
const SYMBOLIC_UTILS_TYPES = Union{<:Number,SymbolicUtils.Symbolic{<:Number}}
const SUPPORTED_OPS = (cos, sin, exp, cot, tan, csc, sec, +, -, *, /)
function isgood(x::SymbolicUtils.Symbolic)
return if SymbolicUtils.istree(x)
all(isgood.([SymbolicUtils.operation(x); SymbolicUtils.arguments(x)]))
else
true
end
end
subs_bad(x) = isgood(x) ? x : Inf
function parse_tree_to_eqs(tree::Node, options::Options, index_functions::Bool=false)
if tree.degree == 0
# Return constant if needed
tree.constant && return subs_bad(tree.val)
return SymbolicUtils.Sym{LiteralReal}(Symbol("x$(tree.feature)"))
end
# Collect the next children
children = tree.degree >= 2 ? (tree.l, tree.r) : (tree.l,)
# Get the operation
op = tree.degree > 1 ? options.binops[tree.op] : options.unaops[tree.op]
# Create an N tuple of Numbers for each argument
dtypes = map(x -> Number, 1:(tree.degree))
#
if !(op ∈ SUPPORTED_OPS) && index_functions
op = SymbolicUtils.Sym{(SymbolicUtils.FnType){Tuple{dtypes...},Number}}(Symbol(op))
end
return subs_bad(
op(map(x -> parse_tree_to_eqs(x, options, index_functions), children)...)
)
end
# For operators which are indexed, we need to convert them back
# using the string:
function convert_to_function(
x::SymbolicUtils.Sym{SymbolicUtils.FnType{T,Number}}, options::Options
) where {T<:Tuple}
degree = length(T.types)
if degree == 1
ind = findoperation(x.name, options.unaops)
return options.unaops[ind]
elseif degree == 2
ind = findoperation(x.name, options.binops)
return options.binops[ind]
else
throw(AssertionError("Function $(String(x.name)) has degree > 2 !"))
end
end
# For normal operators, simply return the function itself:
convert_to_function(x, options::Options) = x
# Split equation
function split_eq(
op, args, options::Options; varMap::Union{Array{String,1},Nothing}=nothing
)
!(op ∈ (sum, prod, +, *)) && throw(error("Unsupported operation $op in expression!"))
if Symbol(op) == Symbol(sum)
ind = findoperation(+, options.binops)
elseif Symbol(op) == Symbol(prod)
ind = findoperation(*, options.binops)
else
ind = findoperation(op, options.binops)
end
return Node(
ind,
convert(Node, args[1], options; varMap=varMap),
convert(Node, op(args[2:end]...), options; varMap=varMap),
)
end
function findoperation(op, ops)
for (i, oi) in enumerate(ops)
Symbol(oi) == Symbol(op) && return i
end
throw(error("Operation $(op) in expression not found in operations $(ops)!"))
end
function Base.convert(
::typeof(SymbolicUtils.Symbolic),
tree::Node,
options::Options;
varMap::Union{Array{String,1},Nothing}=nothing,
index_functions::Bool=false,
)
return node_to_symbolic(tree, options; varMap=varMap, index_functions=index_functions)
end
function Base.convert(
::typeof(Node),
x::Number,
options::Options;
varMap::Union{Array{String,1},Nothing}=nothing,
)
return Node(CONST_TYPE(x))
end
function Base.convert(
::typeof(Node),
expr::SymbolicUtils.Symbolic,
options::Options;
varMap::Union{Array{String,1},Nothing}=nothing,
)
if !SymbolicUtils.istree(expr)
varMap === nothing && return Node(String(expr.name))
return Node(String(expr.name), varMap)
end
# First, we remove integer powers:
y, good_return = multiply_powers(expr)
if good_return
expr = y
end
op = convert_to_function(SymbolicUtils.operation(expr), options)
args = SymbolicUtils.arguments(expr)
length(args) > 2 && return split_eq(op, args, options; varMap=varMap)
ind = if length(args) == 2
findoperation(op, options.binops)
else
findoperation(op, options.unaops)
end
return Node(ind, map(x -> convert(Node, x, options; varMap=varMap), args)...)
end
"""
node_to_symbolic(tree::Node, options::Options;
varMap::Union{Array{String, 1}, Nothing}=nothing,
index_functions::Bool=false)
The interface to SymbolicUtils.jl. Passing a tree to this function
will generate a symbolic equation in SymbolicUtils.jl format.
## Arguments
- `tree::Node`: The equation to convert.
- `options::Options`: Options, which contains the operators used in the equation.
- `varMap::Union{Array{String, 1}, Nothing}=nothing`: What variable names to use for
each feature. Default is [x1, x2, x3, ...].
- `index_functions::Bool=false`: Whether to generate special names for the
operators, which then allows one to convert back to a `Node` format
using `symbolic_to_node`.
(CURRENTLY UNAVAILABLE - See https://github.com/MilesCranmer/SymbolicRegression.jl/pull/84).
"""
function node_to_symbolic(
tree::Node,
options::Options;
varMap::Union{Array{String,1},Nothing}=nothing,
index_functions::Bool=false,
)
expr = subs_bad(parse_tree_to_eqs(tree, options, index_functions))
# Check for NaN and Inf
@assert isgood(expr) "The recovered equation contains NaN or Inf."
# Return if no varMap is given
varMap === nothing && return expr
# Create a substitution tuple
subs = Dict(
[
SymbolicUtils.Sym{LiteralReal}(Symbol("x$(i)")) =>
SymbolicUtils.Sym{LiteralReal}(Symbol(varMap[i])) for i in 1:length(varMap)
]...,
)
return substitute(expr, subs)
end
function symbolic_to_node(
eqn::T, options::Options; varMap::Union{Array{String,1},Nothing}=nothing
)::Node where {T<:SymbolicUtils.Symbolic}
return convert(Node, eqn, options; varMap=varMap)
end
# function Base.convert(::typeof(Node), x::Number, options::Options; varMap::Union{Array{String, 1}, Nothing}=nothing)
# function Base.convert(::typeof(Node), expr::SymbolicUtils.Symbolic, options::Options; varMap::Union{Array{String, 1}, Nothing}=nothing)
function multiply_powers(eqn::Number)::Tuple{SYMBOLIC_UTILS_TYPES,Bool}
return eqn, true
end
function multiply_powers(eqn::SymbolicUtils.Symbolic)::Tuple{SYMBOLIC_UTILS_TYPES,Bool}
if !SymbolicUtils.istree(eqn)
return eqn, true
end
op = SymbolicUtils.operation(eqn)
return multiply_powers(eqn, op)
end
function multiply_powers(
eqn::SymbolicUtils.Symbolic, op::F
)::Tuple{SYMBOLIC_UTILS_TYPES,Bool} where {F}
args = SymbolicUtils.arguments(eqn)
nargs = length(args)
if nargs == 1
l, complete = multiply_powers(args[1])
@return_on_false complete eqn
@return_on_false isgood(l) eqn
return op(l), true
elseif op == ^
l, complete = multiply_powers(args[1])
@return_on_false complete eqn
@return_on_false isgood(l) eqn
n = args[2]
if typeof(n) <: Int
if n == 1
return l, true
elseif n == -1
return 1.0 / l, true
elseif n > 1
return reduce(*, [l for i in 1:n]), true
elseif n < -1
return reduce(/, vcat([1], [l for i in 1:abs(n)])), true
else
return 1.0, true
end
else
r, complete2 = multiply_powers(args[2])
@return_on_false complete2 eqn
return l^r, true
end
elseif nargs == 2
l, complete = multiply_powers(args[1])
@return_on_false complete eqn
@return_on_false isgood(l) eqn
r, complete2 = multiply_powers(args[2])
@return_on_false complete2 eqn
@return_on_false isgood(r) eqn
return op(l, r), true
else
# return mapreduce(multiply_powers, op, args)
# ## reduce(op, map(multiply_powers, args))
out = map(multiply_powers, args) #vector of tuples
for i in 1:size(out, 1)
@return_on_false out[i][2] eqn
@return_on_false isgood(out[i][1]) eqn
end
cumulator = out[1][1]
for i in 2:size(out, 1)
cumulator = op(cumulator, out[i][1])
@return_on_false isgood(cumulator) eqn
end
return cumulator, true
end
end
end
| [
27,
456,
62,
30783,
29,
15,
198,
21412,
26491,
13940,
2022,
4160,
18274,
4487,
26796,
198,
198,
3500,
41327,
4160,
18274,
4487,
198,
11748,
11485,
14055,
26796,
25,
7102,
2257,
62,
25216,
11,
19081,
11,
18634,
198,
11748,
11485,
18274,
... | 2.343258 | 3,560 |
fs1()
@testset "LX input" begin
set_curpath("index.md")
mkpath(joinpath(F.PATHS[:assets], "index", "code", "output"))
write(joinpath(F.PATHS[:assets], "index", "code", "s1.jl"), "println(1+1)")
write(joinpath(F.PATHS[:assets], "index", "code", "output", "s1a.png"), "blah")
write(joinpath(F.PATHS[:assets], "index", "code", "output", "s1.out"), "blih")
st = raw"""
Some string
\input{julia}{s1.jl}
Then maybe
\output{s1.jl}
Finally img:
\input{plot:a}{s1.jl}
done.
""";
F.def_GLOBAL_VARS!()
F.def_GLOBAL_LXDEFS!()
m = F.convert_md(st)
h = F.convert_html(m)
@test occursin("<p>Some string <pre><code class=\"language-julia\">$(read(joinpath(F.PATHS[:assets], "index", "code", "s1.jl"), String))</code></pre>", h)
@test occursin("Then maybe <pre><code class=\"plaintext\">$(read(joinpath(F.PATHS[:assets], "index", "code", "output", "s1.out"), String))</code></pre>", h)
@test occursin("Finally img: <img src=\"/assets/index/code/output/s1a.png\" alt=\"\"> done.", h)
end
@testset "Input MD" begin
mkpath(joinpath(F.PATHS[:assets], "ccc"))
fp = joinpath(F.PATHS[:assets], "ccc", "asset1.md")
write(fp, "blah **blih**")
st = raw"""
Some string
\textinput{ccc/asset1}
"""
@test isapproxstr(st |> conv, "<p>Some string blah <strong>blih</strong></p>")
end
fs2()
@testset "LX input" begin
set_curpath("index.md")
mkpath(joinpath(F.PATHS[:site], "assets", "index", "code", "output"))
write(joinpath(F.PATHS[:site], "assets", "index", "code", "s1.jl"), "println(1+1)")
write(joinpath(F.PATHS[:site], "assets", "index", "code", "output", "s1a.png"), "blah")
write(joinpath(F.PATHS[:site], "assets", "index", "code", "output", "s1.out"), "blih")
st = raw"""
Some string
\input{julia}{s1.jl}
Then maybe
\output{s1.jl}
Finally img:
\input{plot:a}{s1.jl}
done.
""";
F.def_GLOBAL_VARS!()
F.def_GLOBAL_LXDEFS!()
m = F.convert_md(st)
h = F.convert_html(m)
@test occursin("<p>Some string <pre><code class=\"language-julia\">$(read(joinpath(F.PATHS[:site], "assets", "index", "code", "s1.jl"), String))</code></pre>", h)
@test occursin("Then maybe <pre><code class=\"plaintext\">$(read(joinpath(F.PATHS[:site], "assets", "index", "code", "output", "s1.out"), String))</code></pre>", h)
@test occursin("Finally img: <img src=\"/assets/index/code/output/s1a.png\" alt=\"\"> done.", h)
end
@testset "Input MD" begin
mkpath(joinpath(F.PATHS[:site], "assets", "ccc"))
fp = joinpath(F.PATHS[:site], "assets", "ccc", "asset1.md")
write(fp, "blah **blih**")
st = raw"""
Some string
\textinput{ccc/asset1}
"""
@test isapproxstr(st |> conv, "<p>Some string blah <strong>blih</strong></p>")
end
@testset "Input err" begin
gotd()
s = raw"""
AA
\input{julia}{foo/baz}
\input{plot}{foo/baz}
\textinput{foo/bar}
""" |> fd2html_td
@test isapproxstr(s, """
<p>AA
<p><span style="color:red;">// Couldn't find a file when trying to resolve an input request with relative path: `foo/baz`. //</span></p>
<p><span style="color:red;">// Couldn't find an output directory associated with 'foo/baz' when trying to input a plot. //</span></p>
<p><span style="color:red;">// Couldn't find a file when trying to resolve an input request with relative path: `foo/bar`. //</span></p></p>
""")
fs2()
gotd()
# table input
mkpath(joinpath(td, "_assets", "index", "output"))
write(joinpath(td, "_assets", "index", "output", "foo.csv"), "bar")
s = raw"""
@def fd_rpath = "index.md"
\tableinput{}{./foo.csv}
""" |> fd2html_td
@test isapproxstr(s, """
<p><span style="color:red;">// Table matching '/assets/index/foo.csv' not found. //</span></p></p>
""")
end
| [
9501,
16,
3419,
198,
198,
31,
9288,
2617,
366,
43,
55,
5128,
1,
2221,
198,
220,
220,
220,
900,
62,
22019,
6978,
7203,
9630,
13,
9132,
4943,
198,
220,
220,
220,
33480,
6978,
7,
22179,
6978,
7,
37,
13,
47,
1404,
7998,
58,
25,
1966... | 2.15277 | 1,859 |
using Flux, CUDA, Test
using Flux: pullback
@testset "CUDNN BatchNorm" begin
@testset "4D Input" begin
x = Float64.(collect(reshape(1:12, 2, 2, 3, 1)))
m = BatchNorm(3)
cx = gpu(x)
cm = gpu(m)
y, back = pullback((m, x) -> m(x), m, x)
cy, cback = pullback((m, x) -> m(x), cm, cx)
@test cpu(cy) ≈ y
Δ = randn(size(y))
dm, dx = back(Δ)
cdm, cdx = cback(gpu(Δ))
@test dm[].γ ≈ cpu(cdm[].γ)
@test dm[].β ≈ cpu(cdm[].β)
@test dx ≈ cpu(cdx)
end
@testset "2D Input" begin
x = Float64.(collect(reshape(1:12, 3, 4)))
m = BatchNorm(3)
cx = gpu(x)
cm = gpu(m)
y, back = pullback((m, x) -> m(x), m, x)
cy, cback = pullback((m, x) -> m(x), cm, cx)
@test cpu(cy) ≈ y
Δ = randn(size(y))
dm, dx = back(Δ)
cdm, cdx = cback(gpu(Δ))
@test dm[].γ ≈ cpu(cdm[].γ)
@test dm[].β ≈ cpu(cdm[].β)
@test dx ≈ cpu(cdx)
end
end
| [
3500,
1610,
2821,
11,
29369,
5631,
11,
6208,
198,
3500,
1610,
2821,
25,
2834,
1891,
198,
198,
31,
9288,
2617,
366,
34,
8322,
6144,
347,
963,
35393,
1,
2221,
198,
220,
220,
220,
2488,
9288,
2617,
366,
19,
35,
23412,
1,
2221,
198,
2... | 1.679153 | 614 |
const PAR_MAGIC = "PAR1"
const SZ_PAR_MAGIC = length(PAR_MAGIC)
const SZ_FOOTER = 4
const SZ_VALID_PAR = 2*SZ_PAR_MAGIC + SZ_FOOTER
# page is the unit of compression
mutable struct Page
colchunk::ColumnChunk
hdr::PageHeader
pos::Int
data::Vector{UInt8}
end
mutable struct PageLRU
refs::Dict{ColumnChunk,DRef}
function PageLRU()
new(Dict{ColumnChunk,DRef}())
end
end
function cacheget(lru::PageLRU, chunk::ColumnChunk, nf)
if chunk in keys(lru.refs)
poolget(lru.refs[chunk])
else
data = nf(chunk)
lru.refs[chunk] = poolset(data)
data
end
end
# parquet file.
# Keeps a handle to the open file and the file metadata.
# Holds a LRU cache of raw bytes of the pages read.
mutable struct ParFile
path::AbstractString
handle::IOStream
meta::FileMetaData
schema::Schema
page_cache::PageLRU
end
function ParFile(path::AbstractString)
f = open(path)
try
return ParFile(path, f)
catch ex
close(f)
rethrow(ex)
end
end
function ParFile(path::AbstractString, handle::IOStream; maxcache::Integer=10)
# TODO: maxcache should become a parameter to MemPool
is_par_file(handle) || error("Not a parquet format file: $path")
meta_len = metadata_length(handle)
meta = metadata(handle, path, meta_len)
ParFile(path, handle, meta, Schema(meta.schema), PageLRU())
end
##
# layer 1 access
# can access raw (uncompressed) bytes from pages
schema(par::ParFile) = par.schema
schema(conv::T, par::ParFile, schema_name::Symbol) where {T<:SchemaConverter} = schema(conv, par.schema, schema_name)
colname(col::ColumnChunk) = colname(col.meta_data)
colname(col::ColumnMetaData) = join(col.path_in_schema, '.')
colnames(rowgroup::RowGroup) = [colname(col) for col in rowgroup.columns]
function colnames(par::ParFile)
s = Set{AbstractString}()
for rg in rowgroups(par)
push!(s, colnames(rg)...)
end
collect(s)
end
ncols(par::ParFile) = length(colnames(par))
nrows(par::ParFile) = par.meta.num_rows
coltype(col::ColumnChunk) = coltype(col.meta_data)
coltype(col::ColumnMetaData) = col._type
# return all rowgroups in the par file
rowgroups(par::ParFile) = par.meta.row_groups
# Return rowgroups that stores all the columns mentioned in `cnames`.
# Returned row groups can be further queried to get the range of rows.
rowgroups(par::ParFile, colname::AbstractString, rowrange::UnitRange=1:typemax(Int64)) = rowgroups(par, [colname], rowrange)
function rowgroups(par::ParFile, cnames, rowrange::UnitRange=1:typemax(Int64))
R = RowGroup[]
L = length(cnames)
beginrow = 1
for rowgrp in rowgroups(par)
cnamesrg = colnames(rowgrp)
found = length(intersect(cnames, cnamesrg))
endrow = beginrow + rowgrp.num_rows - 1
(found == L) && (length(intersect(beginrow:endrow)) > 0) && push!(R, rowgrp)
beginrow = endrow + 1
end
R
end
columns(par::ParFile, rowgroupidx::Integer) = columns(par, rowgroups(par)[rowgroupidx])
columns(par::ParFile, rowgroup::RowGroup) = rowgroup.columns
columns(par::ParFile, rowgroup::RowGroup, colname::AbstractString) = columns(par, rowgroup, [colname])
function columns(par::ParFile, rowgroup::RowGroup, cnames)
R = ColumnChunk[]
for col in columns(par, rowgroup)
(colname(col) in cnames) && push!(R, col)
end
R
end
function _pagevec(par::ParFile, col::ColumnChunk)
# read pages from the file
pos = page_offset(col)
endpos = end_offset(col)
io = par.handle
pagevec = Page[]
while pos < endpos
seek(io, pos)
pagehdr = read_thrift(io, PageHeader)
buff = Array{UInt8}(undef, page_size(pagehdr))
page_data_pos = position(io)
data = read!(io, buff)
page = Page(col, pagehdr, page_data_pos, data)
push!(pagevec, page)
pos = position(io)
end
pagevec
end
pages(par::ParFile, rowgroupidx::Integer, colidx::Integer) = pages(par, columns(par, rowgroupidx), colidx)
pages(par::ParFile, cols::Vector{ColumnChunk}, colidx::Integer) = pages(par, cols[colidx])
pages(par::ParFile, col::ColumnChunk) = cacheget(par.page_cache, col, col->_pagevec(par,col))
function bytes(page::Page, uncompressed::Bool=true)
data = page.data
codec = page.colchunk.meta_data.codec
if uncompressed && (codec != CompressionCodec.UNCOMPRESSED)
uncompressed_sz = page.hdr.uncompressed_page_size
if codec == CompressionCodec.SNAPPY
data = Snappy.uncompress(data)
elseif codec == CompressionCodec.GZIP
data = transcode(GzipDecompressor, data)
else
error("Unknown compression codec for column chunk: $codec")
end
(length(data) == uncompressed_sz) || error("failed to uncompress page. expected $(uncompressed_sz), got $(length(data)) bytes")
end
data
end
##
# layer 2 access
# can access decoded values from pages
map_dict_vals(valdict::Vector{T1}, vals::Vector{T2}) where {T1, T2} = isempty(valdict) ? vals : [valdict[v+1] for v in vals]
values(par::ParFile, rowgroupidx::Integer, colidx::Integer) = values(par, columns(par, rowgroupidx), colidx)
values(par::ParFile, cols::Vector{ColumnChunk}, colidx::Integer) = values(par, cols[colidx])
function values(par::ParFile, col::ColumnChunk)
ctype = coltype(col)
pgs = pages(par, col)
valdict = Int[]
jtype = PLAIN_JTYPES[ctype+1]
if (ctype == _Type.BYTE_ARRAY) || (ctype == _Type.FIXED_LEN_BYTE_ARRAY)
jtype = Vector{jtype}
end
vals = Array{jtype}(undef, 0)
defn_levels = Int[]
repn_levels = Int[]
for pg in pgs
typ = pg.hdr._type
valtup = values(par, pg)
if (typ == PageType.DATA_PAGE) || (typ == PageType.DATA_PAGE_V2)
@debug("reading a data page for columnchunk")
_vals, _defn_levels, _repn_levels = valtup
enc, defn_enc, rep_enc = page_encodings(pg)
if enc == Encoding.PLAIN_DICTIONARY || enc == Encoding.RLE_DICTIONARY
append!(vals, map_dict_vals(valdict, _vals))
else
append!(vals, _vals)
end
append!(defn_levels, _defn_levels)
append!(repn_levels, _repn_levels)
elseif typ == PageType.DICTIONARY_PAGE
_vals = valtup[1]
@debug("read a dictionary page for columnchunk with $(length(_vals)) values")
valdict = isempty(valdict) ? _vals : append!(valdict, _vals)
end
end
vals, defn_levels, repn_levels
end
function read_levels(io::IO, max_val::Integer, enc::Int32, num_values::Integer)
bw = bitwidth(max_val)
(bw == 0) && (return Int[])
@debug("reading levels. enc:$enc ($(Thrift.enumstr(Encoding,enc))), max_val:$max_val, num_values:$num_values")
if enc == Encoding.RLE
read_hybrid(io, num_values, bw)
elseif enc == Encoding.BIT_PACKED
read_bitpacked_run_old(io, num_values, bw)
elseif enc == Encoding.PLAIN
# levels should never be of this type though
read_plain_values(io, _Type.INT32, num_values)
else
error("unsupported encoding $enc ($(Thrift.enumstr(Encoding,enc))) for levels")
end
end
function read_values(io::IO, enc::Int32, typ::Int32, num_values::Integer)
@debug("reading values. enc:$enc ($(Thrift.enumstr(Encoding,enc))), num_values:$num_values")
if enc == Encoding.PLAIN
read_plain_values(io, num_values, typ)
elseif enc == Encoding.PLAIN_DICTIONARY || enc == Encoding.RLE_DICTIONARY
read_rle_dict(io, num_values)
else
error("unsupported encoding $enc for pages")
#@debug("unsupported encoding $enc ($(Thrift.enumstr(Encoding,enc))) for pages")
#return Int[]
end
end
function values(par::ParFile, page::Page)
ctype = coltype(page.colchunk)
rawbytes = bytes(page)
io = IOBuffer(rawbytes)
encs = page_encodings(page)
num_values = page_num_values(page)
typ = page.hdr._type
if (typ == PageType.DATA_PAGE) || (typ == PageType.DATA_PAGE_V2)
read_levels_and_values(io, encs, ctype, num_values, par, page)
elseif typ == PageType.DICTIONARY_PAGE
(read_plain_values(io, num_values, ctype),)
else
()
end
end
function read_levels_and_values(io::IO, encs::Tuple, ctype::Int32, num_values::Integer, par::ParFile, page::Page)
cname = colname(page.colchunk)
enc, defn_enc, rep_enc = encs
#@debug("before reading defn levels bytesavailable in page: $(bytesavailable(io))")
# read definition levels. skipped if column is required
defn_levels = isrequired(par.schema, cname) ? Int[] : read_levels(io, max_definition_level(par.schema, cname), defn_enc, num_values)
#@debug("before reading repn levels bytesavailable in page: $(bytesavailable(io))")
# read repetition levels. skipped if all columns are at 1st level
repn_levels = ('.' in cname) ? read_levels(io, max_repetition_level(par.schema, cname), rep_enc, num_values) : Int[]
#@debug("before reading values bytesavailable in page: $(bytesavailable(io))")
# read values
vals = read_values(io, enc, ctype, num_values)
vals, defn_levels, repn_levels
end
# column and page metadata
open(par::ParFile, col::ColumnChunk) = open(par.handle, par.path, col)
close(par::ParFile, col::ColumnChunk, io) = (par.handle == io) || close(io)
function open(io, path::AbstractString, col::ColumnChunk)
if isfilled(col, :file_path)
@debug("opening file $(col.file_path) to read column metadata at $(col.file_offset)")
open(col.file_path)
else
if io == nothing
@debug("opening file $path to read column metadata at $(col.file_offset)")
open(path)
else
@debug("reading column metadata at $(col.file_offset)")
io
end
end
end
function metadata(io, path::AbstractString, col::ColumnChunk)
fio = open(io, path, col)
seek(fio, col.file_offset)
meta = read_thrift(fio, ColumnMetaData)
(fio !== io) && close(fio)
meta
end
function page_offset(col::ColumnChunk)
colmeta = col.meta_data
offset = colmeta.data_page_offset
Thrift.isfilled(colmeta, :index_page_offset) && (offset = min(offset, colmeta.index_page_offset))
Thrift.isfilled(colmeta, :dictionary_page_offset) && (offset = min(offset, colmeta.dictionary_page_offset))
offset
end
end_offset(col::ColumnChunk) = page_offset(col) + col.meta_data.total_compressed_size
page_size(page::PageHeader) = Thrift.isfilled(page, :compressed_page_size) ? page.compressed_page_size : page.uncompressed_page_size
page_encodings(page::Page) = page_encodings(page.hdr)
function page_encodings(page::PageHeader)
Thrift.isfilled(page, :data_page_header) ? page_encodings(page.data_page_header) :
Thrift.isfilled(page, :data_page_header_v2) ? page_encodings(page.data_page_header_v2) :
Thrift.isfilled(page, :dictionary_page_header) ? page_encodings(page.dictionary_page_header) : ()
end
page_encodings(page::DictionaryPageHeader) = (page.encoding,)
page_encodings(page::DataPageHeader) = (page.encoding, page.definition_level_encoding, page.repetition_level_encoding)
page_encodings(page::DataPageHeaderV2) = (page.encoding, Encoding.RLE, Encoding.RLE)
page_num_values(page::Page) = page_num_values(page.hdr)
function page_num_values(page::PageHeader)
Thrift.isfilled(page, :data_page_header) ? page_num_values(page.data_page_header) :
Thrift.isfilled(page, :data_page_header_v2) ? page_num_values(page.data_page_header_v2) :
Thrift.isfilled(page, :dictionary_page_header) ? page_num_values(page.dictionary_page_header) : 0
end
page_num_values(page::Union{DataPageHeader,DataPageHeaderV2,DictionaryPageHeader}) = page.num_values
# file metadata
read_thrift(buff::Array{UInt8}, ::Type{T}) where {T} = read(TCompactProtocol(TMemoryTransport(buff)), T)
read_thrift(io::IO, ::Type{T}) where {T} = read(TCompactProtocol(TFileTransport(io)), T)
read_thrift(t::TR, ::Type{T}) where {TR<:TTransport,T} = read(TCompactProtocol(t), T)
function metadata_length(io)
sz = filesize(io)
seek(io, sz - SZ_PAR_MAGIC - SZ_FOOTER)
# read footer size as little endian signed Int32
ProtoBuf.read_fixed(io, Int32)
end
function metadata(io, path::AbstractString, len::Integer=metadata_length(io))
@debug("metadata len = $len")
sz = filesize(io)
seek(io, sz - SZ_PAR_MAGIC - SZ_FOOTER - len)
meta = read_thrift(io, FileMetaData)
# go through all column chunks and read metadata from file offsets if required
for grp in meta.row_groups
for col in grp.columns
if !isfilled(col, :meta_data)
# need to read metadata from an offset
col.meta_data = metadata(io, path, col)
end
end
end
meta
end
metadata(par::ParFile) = par.meta
# file format verification
function is_par_file(fname::AbstractString)
open(fname) do io
return is_par_file(io)
end
end
function is_par_file(io)
sz = filesize(io)
(sz > SZ_VALID_PAR) || return false
seekstart(io)
magic = Array{UInt8}(undef, 4)
read!(io, magic)
(String(magic) == PAR_MAGIC) || return false
seek(io, sz - SZ_PAR_MAGIC)
magic = Array{UInt8}(undef, 4)
read!(io, magic)
(String(magic) == PAR_MAGIC) || return false
true
end
| [
198,
9979,
29463,
62,
45820,
2149,
796,
366,
27082,
16,
1,
198,
9979,
311,
57,
62,
27082,
62,
45820,
2149,
796,
4129,
7,
27082,
62,
45820,
2149,
8,
198,
9979,
311,
57,
62,
6080,
2394,
1137,
796,
604,
198,
9979,
311,
57,
62,
23428,... | 2.408476 | 5,545 |
# Author: <NAME>, <EMAIL>
# Date: 12/11/2014
module AbstractGenerativeModelImpl
export AbstractGenerativeModel
abstract AbstractGenerativeModel
end #module
module AbstractGenerativeModelInterfaces
export get,
isterminal
function get() end
function isterminal() end
end #module
| [
2,
6434,
25,
1279,
20608,
22330,
1279,
27630,
4146,
29,
198,
2,
7536,
25,
1105,
14,
1157,
14,
4967,
628,
198,
21412,
27741,
8645,
876,
17633,
29710,
198,
198,
39344,
27741,
8645,
876,
17633,
198,
198,
397,
8709,
27741,
8645,
876,
1763... | 3.217391 | 92 |
@testset "ModelKit - SLP" begin
@testset "CompiledHomotopy/InterpretedHomotopy" begin
@var x y a b c
f = [(2 * x^2 + b^2 * y^3 + 2 * a * x * y)^3, (a + c)^4 * x + y^2]
@var s sp[1:3] sq[1:3]
g = subs(f, [a, b, c] => s .* sp .+ (1 .- s) .* sq)
h = Homotopy(g, [x, y], s, [sp; sq])
p = [5.2, -1.3, 9.3]
q = [2.6, 3.3, 2.3]
for H in [InterpretedHomotopy(h), CompiledHomotopy(h)]
@test size(H) == (2, 2)
v = [0.192, 2.21]
t = 0.232
u = zeros(ComplexF64, 2)
U = zeros(ComplexF64, 2, 2)
evaluate!(u, H, v, t, [p; q])
@test u ≈ f([x, y] => v, [a, b, c] => t * p + (1 - t) * q)
taylor!(u, Val(1), H, TaylorVector{1}(Matrix(v')), t, [p; q])
@test u ≈ let
@var s sp[1:3] sq[1:3]
pt = s .* sp .+ (1 .- s) .* sq
differentiate(subs(f, [a, b, c] => pt), s)(
[x, y] => v,
sp => p,
sq => q,
s => t,
)
end
evaluate_and_jacobian!(u, U, H, v, t, [p; q])
@test U ≈
differentiate(f, [x, y])([x, y] => v, [a, b, c] => t * p + (1 - t) * q)
end
end
@testset "Codegen helpers" begin
@test ModelKit.sqr(3 + 2im) == (3 + 2im)^2
@test ModelKit.sqr(3) == 3^2
end
@testset "Homotopy codegen (Katsura(3))" begin
n = 3
@var x[0:n] ẋ[0:n] ẍ[0:n] x3[0:n] t γ
K = [
(
sum(x[abs(l)+1] * x[abs(m - l)+1] for l = -n:n if abs(m - l) <= n) - x[m+1] for m = 0:n-1
)...,
x[1] + 2 * sum(x[i+1] for i = 1:n) - 1,
]
h = γ .* t .* [x[1:n] .^ 2 .- 1; x[n+1] - 1] + (1 - t) .* K
H = ModelKit.Homotopy(h, x, t, [γ])
TH = CompiledHomotopy(H)
tx3 = TaylorVector{4}(zeros(Expression, 4, 4))
y, y1, y2, y3 = vectors(tx3)
y .= x
y1 .= ẋ
y2 .= ẍ
y3 .= x3
dt1 =
ModelKit.taylor!(zeros(Expression, 4), Val(1), TH, TaylorVector{1}(tx3), t, [γ])
dt2 =
ModelKit.taylor!(zeros(Expression, 4), Val(2), TH, TaylorVector{2}(tx3), t, [γ])
dt3 =
ModelKit.taylor!(zeros(Expression, 4), Val(3), TH, TaylorVector{3}(tx3), t, [γ])
dt4 =
ModelKit.taylor!(zeros(Expression, 4), Val(4), TH, TaylorVector{4}(tx3), t, [γ])
@var λ
Hd1 = subs(H.expressions, t => t + λ)
true_dt1 = subs(differentiate(Hd1, λ, 1), λ => 0)
Hd2 = subs(H.expressions, x => x .+ λ .* ẋ, t => t + λ)
true_dt2 = subs(differentiate(Hd2, λ, 2), λ => 0) / 2
Hd3 = subs(H.expressions, x => x .+ λ .* ẋ .+ λ^2 .* ẍ, t => t + λ)
true_dt3 = subs(differentiate(Hd3, λ, 3), λ => 0) / 6
Hd4 = subs(H.expressions, x => x .+ λ .* ẋ .+ λ^2 .* ẍ .+ λ^3 .* x3, t => t + λ)
true_dt4 = subs(differentiate(Hd4, λ, 4), λ => 0) / 24
@test expand.(TH(x, t, [γ])) == expand.(h)
@test expand.(ModelKit.jacobian(TH, x, t, [γ])) == expand.(differentiate(h, x))
@test expand.(dt1) == expand.(true_dt1)
@test expand.(dt2) == expand.(true_dt2)
@test expand.(dt3) == expand.(true_dt3)
@test expand.(dt4) == expand.(true_dt4)
end
@testset "taylor! - system" begin
@var x[1:2] ẋ[1:2] ẍ[1:2] x3[1:2] p[1:2] ṗ[1:2]
f = [(x[1] + x[2])^3 + x[1]^2 + x[1] + 5 * x[2] + 3 * p[1], 2 * x[1]^2 + p[2]]
F = System(f, x, p)
TF = CompiledSystem(F)
tx = TaylorVector{4}([x'; ẋ'; ẍ'; x3'])
tv = TaylorVector{5}(Expression, 2)
v, v1, v2, v3, v4 = vectors(tv)
@var λ
ModelKit.taylor!(TaylorVector{3}(tv), Val(2), TF, TaylorVector{2}(tx), p)
Fd2 = subs(F.expressions, x => x .+ λ .* ẋ)
true_v1 = subs(differentiate(Fd2, λ, 1), λ => 0)
true_v2 = subs(differentiate(Fd2, λ, 2), λ => 0) / 2
@test expand.(v) == expand.(f)
@test expand.(v1) == expand.(true_v1)
@test expand.(v2) == expand.(true_v2)
ModelKit.taylor!(TaylorVector{4}(tv), Val(3), TF, TaylorVector{3}(tx), p)
Fd3 = subs(F.expressions, x => x .+ λ .* ẋ .+ λ^2 .* ẍ)
true_v1 = subs(differentiate(Fd3, λ, 1), λ => 0)
true_v2 = subs(differentiate(Fd3, λ, 2), λ => 0) / 2
true_v3 = subs(differentiate(Fd3, λ, 3), λ => 0) / 6
@test expand.(v) == expand.(f)
@test expand.(v1) == expand.(true_v1)
sub = variables(v2) => rand(1:10_000, 6)
@test v2(sub) == true_v2(sub)
@test v3(sub) == true_v3(sub)
end
@testset "Monomials" begin
@var x y
F = System([x^2 - 1, y])
u = zeros(ComplexF64, 2)
U = zeros(ComplexF64, 2, 2)
w = randn(ComplexF64, 2)
evaluate_and_jacobian!(u, U, CompiledSystem(F), w)
@test sum(abs, U) > 0
U .= 0
evaluate_and_jacobian!(u, U, InterpretedSystem(F), w)
@test sum(abs, U) > 0
end
@testset "Evaluation of Arb" begin
@var x y
# define the polynomials
f₁ = (x^4 + y^4 - 1) * (x^2 + y^2 - 2) + x^5 * y
f₂ = x^2 + 2x * y^2 - 2 * y^2 - 1 / 2
F = System([f₁, f₂])
for I in [
Arblib.AcbMatrix(randn(ComplexF64, 2, 1)),
Arblib.AcbRefVector(randn(ComplexF64, 2)),
]
@test all(!iszero, F(I))
@test F(real.(I)) isa Arblib.ArbVector
@test all(!iszero, System(F)(real.(I)))
u = Arblib.AcbVector(2)
evaluate!(u, InterpretedSystem(System(F)), I)
@test all(!iszero, u)
v = Arblib.ArbVector(2)
evaluate!(v, InterpretedSystem(System(F)), real.(I))
@test all(!iszero, v)
U = Arblib.AcbMatrix(2, 2)
jacobian!(U, InterpretedSystem(System(F)), I)
@test all(!iszero, U)
V = Arblib.ArbMatrix(2, 2)
jacobian!(V, InterpretedSystem(System(F)), real.(I))
@test all(!iszero, V)
end
end
end
| [
31,
9288,
2617,
366,
17633,
20827,
532,
12419,
47,
1,
2221,
628,
220,
220,
220,
2488,
9288,
2617,
366,
7293,
3902,
28718,
313,
11081,
14,
9492,
5310,
276,
28718,
313,
11081,
1,
2221,
198,
220,
220,
220,
220,
220,
220,
220,
2488,
778... | 1.675808 | 3,683 |
<filename>src/parsing.jl
function addkey!(membernames, nam)
if !haskey(membernames, nam)
membernames[nam] = gensym()
end
membernames[nam]
end
onearg(e::Expr, f) = e.head == :call && length(e.args) == 2 && e.args[1] == f
onearg(e, f) = false
mapexpr(f, e) = Expr(e.head, map(f, e.args)...)
replace_syms!(x, membernames) = x
replace_syms!(q::QuoteNode, membernames) =
replace_syms!(Meta.quot(q.value), membernames)
replace_syms!(e::Expr, membernames) =
if onearg(e, :^)
e.args[2]
elseif onearg(e, :_I_)
@warn "_I_() for escaping variables is deprecated, use cols() instead"
addkey!(membernames, :($(e.args[2])))
elseif onearg(e, :cols)
addkey!(membernames, :($(e.args[2])))
elseif e.head == :quote
addkey!(membernames, Meta.quot(e.args[1]) )
elseif e.head == :.
replace_dotted!(e, membernames)
else
mapexpr(x -> replace_syms!(x, membernames), e)
end
is_simple_non_broadcast_call(x) = false
function is_simple_non_broadcast_call(expr::Expr)
expr.head == :call &&
length(expr.args) >= 2 &&
expr.args[1] isa Symbol &&
all(x -> x isa QuoteNode || onearg(x, :cols), expr.args[2:end])
end
is_simple_broadcast_call(x) = false
function is_simple_broadcast_call(expr::Expr)
expr.head == :. &&
length(expr.args) == 2 &&
expr.args[1] isa Symbol &&
expr.args[2] isa Expr &&
expr.args[2].head == :tuple &&
all(x -> x isa QuoteNode || onearg(x, :cols), expr.args[2].args)
end
function args_to_selectors(v)
t = map(v) do arg
if arg isa QuoteNode
arg
elseif onearg(arg, :cols)
arg.args[2]
else
throw(ArgumentError("This path should not be reached, arg: $(arg)"))
end
end
:(DataFramesMeta.make_source_concrete($(Expr(:vect, t...))))
end
is_macro_head(ex, name) = false
is_macro_head(ex::Expr, name) = ex.head == :macrocall && ex.args[1] == Symbol(name)
extract_macro_flags(ex, exprflags = (;Symbol("@byrow") => Ref(false),)) = (ex, exprflags)
function extract_macro_flags(ex::Expr, exprflags = (;Symbol("@byrow") => Ref(false),))
if ex.head == :macrocall
macroname = ex.args[1]
if macroname in keys(exprflags)
exprflag = exprflags[macroname]
if exprflag[] == true
throw(ArgumentError("Redundant flag $macroname used."))
end
exprflag[] = true
return extract_macro_flags(MacroTools.unblock(ex.args[3]), exprflags)
else
return (ex, exprflags)
end
end
return (ex, exprflags)
end
"""
get_source_fun(function_expr; wrap_byrow::Bool=false)
Given an expression that may contain `QuoteNode`s (`:x`)
and items wrapped in `cols`, return a function
that is equivalent to that expression where the
`QuoteNode`s and `cols` items are the inputs
to the function.
For fast compilation `get_source_fun` returns
the name of a called function where possible.
* `f(:x, :y)` will return `f`
* `f.(:x, :y)` will return `ByRow(f)`
* `:x .+ :y` will return `.+`
`get_source_fun` also returns an expression
representing the vector of inputs that will be
used as the `src` in the `src => fun => dest`
call later on.
If `wrap_byrow=true` then the function gets wrapped
in `ByRow`. If the expression begins with `@byrow`,
then `get_source_fun` is recurively called on the
expression that `@byrow` acts on, with `wrap_byrow=true`.
### Examples
julia> using MacroTools
julia> ex = :(:x + :y);
julia> DataFramesMeta.get_source_fun(ex)
(:(DataFramesMeta.make_source_concrete([:x, :y])), :+)
julia> ex = quote
:x .+ 1 .* :y
end |> MacroTools.prettify
julia> src, fun = DataFramesMeta.get_source_fun(ex);
julia> MacroTools.prettify(fun)
:((mammoth, goat)->mammoth .+ 1 .* goat)
julia> ex = :(@byrow :x * :y);
julia> src, fun = DataFramesMeta.get_source_fun(ex);
julia> MacroTools.prettify(fun)
:(ByRow(*))
```
"""
function get_source_fun(function_expr; wrap_byrow::Bool=false)
function_expr = MacroTools.unblock(function_expr)
if is_simple_non_broadcast_call(function_expr)
source = args_to_selectors(function_expr.args[2:end])
fun_t = function_expr.args[1]
# .+ to +
if startswith(string(fun_t), '.')
f_sym_without_dot = Symbol(chop(string(fun_t), head = 1, tail = 0))
fun = :(DataFrames.ByRow($f_sym_without_dot))
else
fun = fun_t
end
elseif is_simple_broadcast_call(function_expr)
# extract source symbols from quotenodes
source = args_to_selectors(function_expr.args[2].args)
fun_t = function_expr.args[1]
fun = :(DataFrames.ByRow($fun_t))
else
membernames = Dict{Any, Symbol}()
body = replace_syms!(function_expr, membernames)
source = :(DataFramesMeta.make_source_concrete($(Expr(:vect, keys(membernames)...))))
inputargs = Expr(:tuple, values(membernames)...)
fun = quote
$inputargs -> begin
$body
end
end
end
if wrap_byrow
fun = :(ByRow($fun))
end
return source, fun
end
# `nolhs` needs to be `true` when we have syntax of the form
# `@combine(gd, fun(:x, :y))` where `fun` returns a `table` object.
# We don't create the "new name" pair because new names are
# given by the table.
# We need wrap_byrow as a keyword argument here in case someone
# uses `@transform df @byrow begin ... end`, which we
# deal with outside of this function.
function fun_to_vec(ex::Expr;
gensym_names::Bool=false,
outer_flags::Union{NamedTuple, Nothing}=nothing,
no_dest::Bool=false)
# classify the type of expression
# :x # handled via dispatch
# cols(:x) # handled as though above
# f(:x) # requires pass_as_is, for `@with` and `@subset` in future
# y = :x # :x is a QuoteNode
# y = cols(:x) # use cols on RHS
# cols(:y) = :x # RHS in :block
# cols(:y) = cols(:x)
# y = f(:x) # re-write as simple call
# y = f(cols(:x)) # re-write as simple call, use cols
# y = :x + 1 # re-write as complicated call
# y = cols(:x) + 1 # re-write as complicated call, with cols
# cols(:y) = f(:x) # re-write as simple call, but RHS is :block
# cols(:y) = f(cols(:x)) # re-write as simple call, RHS is block, use cols
# cols(y) = :x + 1 # re-write as complicated col, but RHS is :block
# cols(:y) = cols(:x) + 1 # re-write as complicated call, RHS is block, use cols
# `@byrow` before any of the above
ex, inner_flags = extract_macro_flags(MacroTools.unblock(ex))
# Use tuple syntax in future when we add more flags
inner_wrap_byrow = inner_flags[Symbol("@byrow")][]
outer_wrap_byrow = outer_flags === nothing ? false : outer_flags[Symbol("@byrow")][]
if inner_wrap_byrow && outer_wrap_byrow
throw(ArgumentError("Redundant @byrow calls."))
else
wrap_byrow = inner_wrap_byrow || outer_wrap_byrow
end
if gensym_names
ex = Expr(:kw, gensym(), ex)
end
# :x
# handled below via dispatch on ::QuoteNode
# cols(:x)
if onearg(ex, :cols)
return ex.args[2]
end
if no_dest
source, fun = get_source_fun(ex, wrap_byrow = wrap_byrow)
return quote
$source => $fun
end
end
@assert ex.head == :kw || ex.head == :(=)
lhs = ex.args[1]
rhs = MacroTools.unblock(ex.args[2])
if is_macro_head(rhs, "@byrow")
s = "In keyword argument inputs, `@byrow` must be on the left hand side. " *
"Did you write `y = @byrow f(:x)` instead of `@byrow y = f(:x)`?"
throw(ArgumentError(s))
end
# y = ...
if lhs isa Symbol
msg = "Using an un-quoted Symbol on the LHS is deprecated. " *
"Write $(QuoteNode(lhs)) = ... instead."
Base.depwarn(msg, "")
lhs = QuoteNode(lhs)
end
# :y = :x
if lhs isa QuoteNode && rhs isa QuoteNode
source = rhs
dest = lhs
return quote
$source => $dest
end
end
# :y = cols(:x)
if lhs isa QuoteNode && onearg(rhs, :cols)
source = rhs.args[2]
dest = lhs
return quote
$source => $dest
end
end
# cols(:y) = :x
if onearg(lhs, :cols) && rhs isa QuoteNode
source = rhs
dest = lhs.args[2]
return quote
$source => $dest
end
end
# cols(:y) = cols(:x)
if onearg(lhs, :cols) && onearg(rhs, :cols)
source = rhs.args[2]
dest = lhs.args[2]
return quote
$source => $dest
end
end
# :y = f(:x)
# :y = f(cols(:x))
# :y = :x + 1
# :y = cols(:x) + 1
source, fun = get_source_fun(rhs; wrap_byrow = wrap_byrow)
if lhs isa QuoteNode
dest = lhs
return quote
$source => $fun => $dest
end
end
# cols(:y) = f(:x)
if onearg(lhs, :cols)
dest = lhs.args[2]
return quote
$source => $fun => $dest
end
end
throw(ArgumentError("This path should not be reached"))
end
fun_to_vec(ex::QuoteNode;
no_dest::Bool=false,
gensym_names::Bool=false,
outer_flags::Union{NamedTuple, Nothing}=nothing) = ex
function make_source_concrete(x::AbstractVector)
if isempty(x) || isconcretetype(eltype(x))
return x
elseif all(t -> t isa Union{AbstractString, Symbol}, x)
return Symbol.(x)
else
throw(ArgumentError("Column references must be either all the same " *
"type or a a combination of `Symbol`s and strings"))
end
end
protect_replace_syms!(e, membernames) = e
function protect_replace_syms!(e::Expr, membernames)
if e.head == :quote
e
else
replace_syms!(e, membernames)
end
end
function replace_dotted!(e, membernames)
x_new = replace_syms!(e.args[1], membernames)
y_new = protect_replace_syms!(e.args[2], membernames)
Expr(:., x_new, y_new)
end
function create_args_vector(args...)
create_args_vector(Expr(:block, args...))
end
"""
create_args_vector(arg) -> vec, outer_flags
Given an expression return a vector of operations
and a `NamedTuple` of the macro-flags that appear
in the expression.
If a `:block` expression, return the `args` of
the block as an array. If a simple expression,
wrap the expression in a one-element vector.
"""
function create_args_vector(arg)
arg, outer_flags = extract_macro_flags(MacroTools.unblock(arg))
if arg isa Expr && arg.head == :block
x = MacroTools.rmlines(arg).args
else
x = Any[arg]
end
return x, outer_flags
end
| [
27,
34345,
29,
10677,
14,
79,
945,
278,
13,
20362,
198,
8818,
751,
2539,
0,
7,
19522,
14933,
11,
299,
321,
8,
198,
220,
220,
220,
611,
5145,
10134,
2539,
7,
19522,
14933,
11,
299,
321,
8,
198,
220,
220,
220,
220,
220,
220,
220,
... | 2.201792 | 4,911 |
<reponame>mattwigway/ArchGDAL.jl<gh_stars>100-1000
using Downloads
using SHA
# this file downloads files which are used during testing the package
# if they are already present and their checksum matches, they are not downloaded again
REPO_URL = "https://github.com/yeesian/ArchGDALDatasets/blob/master/"
# remote files with SHA-2 256 hash
"""
To add more files, follow the below steps to generate the SHA
```
julia> using SHA
julia> open(filepath/filename) do f
bytes2hex(sha256(f))
end
```
"""
remotefiles = [
(
"data/road.zip",
"058bdc549d0fc5bfb6deaef138e48758ca79ae20df79c2fb4c40cb878f48bfd8",
),
]
function verify(path::AbstractString, hash::AbstractString)
@assert occursin(r"^[0-9a-f]{64}$", hash)
hash = lowercase(hash)
if isfile(path)
calc_hash = open(path) do file
return bytes2hex(sha256(file))
end
@assert occursin(r"^[0-9a-f]{64}$", calc_hash)
if calc_hash != hash
@error "Hash Mismatch! Expected: $hash, Calculated: $calc_hash\n"
return false
else
return true
end
else
error("File read error: $path")
end
end
function download_verify(
url::AbstractString,
hash::Union{AbstractString,Nothing},
dest::AbstractString,
)
file_existed = false
# verify if file exists
if isfile(dest)
file_existed = true
if hash !== nothing && verify(dest, hash)
# hash verified
return true
else
# either hash is nothing or couldn't pass the SHA test
@error(
"Failed to verify file: $dest with hash: $hash. Re-downloading file..."
)
end
end
# if the file exists but some problem exists, we delete it to start from scratch
file_existed && Base.rm(dest; force = true)
# Make sure the containing folder exists
mkpath(dirname(dest))
# downloads the file at dest
Downloads.download(url, dest)
# hash exists and verification fails
if hash !== nothing && !verify(dest, hash)
if file_existed
# the file might be corrupted so we start from scracth
Base.rm(dest; force = true)
Downloads.download(url, dest)
if hash !== nothing && !verify(dest, hash)
error("Verification failed")
end
else
error("Verification failed. File not created after download.")
end
end
return !file_existed
end
for (f, sha) in remotefiles
# create the directories if they don't exist
currdir = dirname(f)
isdir(currdir) || mkpath(currdir)
# download the file if it is not there or if it has a different checksum
currfile = normpath(joinpath(@__DIR__, f))
url = REPO_URL * f * "?raw=true"
download_verify(url, sha, currfile)
end
| [
27,
7856,
261,
480,
29,
76,
1078,
28033,
1014,
14,
19895,
45113,
1847,
13,
20362,
27,
456,
62,
30783,
29,
3064,
12,
12825,
198,
3500,
50093,
198,
3500,
25630,
198,
198,
2,
428,
2393,
21333,
3696,
543,
389,
973,
1141,
4856,
262,
5301... | 2.341762 | 1,226 |
@testset "eigenvalues/eigenvectors: $MatT" for MatT in (AcbMatrix, AcbRefMatrix)
A = [
0.6873474041954415 0.7282180564881044 0.07360652513458521
0.000835810121029068 0.9256166870757694 0.5363310989411239
0.07387174694790022 0.4050436025621329 0.20226010388885896
]
B = [
0.8982563031334123 0.3029712969740874 0.8585014523679579
0.7583002736998279 0.8854763478184455 0.3031103325817668
0.2319572749472405 0.5769840251057949 0.5119507333628952
]
M = MatT(A + B * im, prec = 64)
# M = MatT(rand(3, 3) + im * rand(3, 3), prec = 64)
VecT = typeof(similar(M, 3))
@testset "approx_eig_qr" begin
λs_a_r, revs_a = Arblib.approx_eig_qr(M, side = :right)
@test λs_a_r isa VecT
@test revs_a isa MatT
ε = 1e-10
λs_a_l, revs_a = Arblib.approx_eig_qr(M, tol = Arblib.Mag(ε), side = :left)
@test λs_a_l isa VecT
@test revs_a isa MatT
@test all(abs.(λs_a_r - λs_a_l) .< ε)
λs_r = similar(M, size(M, 1))
Arblib.approx_eig_qr!(λs_r, M)
@test Arblib.is_zero(λs_r - λs_a_r, length(λs_r))
end
@testset "eig_simple" begin
λs1, _ = Arblib.eig_simple_vdhoeven_mourrain(M, side = :right)
λs2, _ = Arblib.eig_simple_vdhoeven_mourrain(M, side = :left)
@test all(Arblib.containszero, λs1 - λs2)
# λs1, _ = Arblib.eig_simple_rump(M, side = :right)
# segfaults in acb_mat_solve at /workspace/srcdir/arb-2.18.1/acb_mat/solve.c:17
# Issue #321 in Arblib (fixed by #330)
# λs2, _ = Arblib.eig_simple_rump(M, side=:left)
# @test all(Arblib.containszero, λs1 - λs2)
λs1, _ = Arblib.eig_simple(M, side = :right)
λs2, _ = Arblib.eig_simple(M, side = :left)
@test all(Arblib.containszero, λs1 - λs2)
λs = similar(M, size(M, 1))
Arblib.eig_simple_vdhoeven_mourrain!(λs, M)
@test all(Arblib.containszero, λs - λs1)
λs = similar(M, size(M, 1))
Arblib.eig_simple_rump!(λs, M)
@test all(Arblib.containszero, λs - λs1)
λs = similar(M, size(M, 1))
Arblib.eig_simple!(λs, M)
@test all(Arblib.containszero, λs - λs1)
end
N = similar(M)
evs = [Arb(2.0), Arb(2.0), Arb(rand())]
N[1, 1], N[2, 2], N[3, 3] = evs
N = M * N * M^-1
@testset "enclosures" begin
ε = Arblib.Mag()
tol = 1e-12
λ_approx, R_approx = Arblib.approx_eig_qr(M, tol = tol)
@test Arblib.eig_global_enclosure!(
ε,
M,
λ_approx,
R_approx;
prec = precision(M),
) isa Arblib.Mag
@test ε <= tol
λs = similar(M, size(M, 1))
Arblib.eig_simple!(λs, M, λ_approx, R_approx)
for λa in λ_approx
a_real = let x = real(λa)
m = Arblib.midref(x)
r = Arblib.radref(x)
Arblib.set_interval!(x, m - (r + ε), m + (r + ε))
end
a_imag = let x = imag(λa)
m = Arblib.midref(x)
r = Arblib.radref(x)
Arblib.set_interval!(x, m - (r + ε), m + (r + ε))
end
a = Acb(a_real, a_imag)
@test any(Arblib.containszero(a - λ) for λ in λs)
end
@test_throws Arblib.EigenvalueComputationError Arblib.eig_simple(N)
λ_approx, R_approx = Arblib.approx_eig_qr(N)
v = sortperm(λ_approx, by = abs, rev = true)
λ = Acb(prec = precision(N))
R = similar(N, (3, 1))
Arblib.eig_enclosure_rump!(λ, R, N, λ_approx[v[1]], R_approx[:, v[1:1]])
@test !isfinite(λ)
λ = Acb(prec = precision(N))
R = similar(N, (3, 1))
Arblib.eig_enclosure_rump!(λ, R, N, λ_approx[v[3]], R_approx[:, v[3:3]])
@test isfinite(λ)
@test Arblib.contains_zero(λ - evs[3])
λ = Acb(prec = precision(N))
R = similar(N, (3, 2))
Arblib.eig_enclosure_rump!(λ, R, N, λ_approx[v[1]], R_approx[:, v[1:2]])
@test isfinite(λ)
@test Arblib.contains_zero(λ - 2)
@test all(Arblib.contains_zero.(N * R - R * λ))
λ = Acb(prec = precision(N))
R = similar(N, (3, 2))
J = similar(N, (2, 2))
Arblib.eig_enclosure_rump!(λ, J, R, N, λ_approx[v[2]], R_approx[:, v[1:2]])
@test Arblib.contains_zero(λ - 2)
@test all(Arblib.contains_zero.(N * R - R * J))
end
@testset "eig_multiple" begin
λs = similar(N, 3)
@test Arblib.eig_multiple_rump(N) isa VecT
Arblib.eig_multiple_rump!(λs, N)
v = sortperm(λs, by = abs, rev = true)
@test all(Arblib.contains_zero.(λs[v] - evs))
@test Arblib.eig_multiple(N) isa VecT
Arblib.eig_multiple!(λs, N)
v = sortperm(λs, by = abs, rev = true)
@test all(Arblib.contains_zero.(λs[v] - evs))
end
end
| [
31,
9288,
2617,
366,
68,
9324,
27160,
14,
68,
9324,
303,
5217,
25,
720,
19044,
51,
1,
329,
6550,
51,
287,
357,
12832,
65,
46912,
11,
4013,
65,
8134,
46912,
8,
198,
220,
220,
220,
317,
796,
685,
198,
220,
220,
220,
220,
220,
220,... | 1.683111 | 2,919 |
<gh_stars>0
module VortexHelperBowlPuffer
using ..Ahorn, Maple
@mapdef Entity "VortexHelper/BowlPuffer" BowlPuffer(x::Integer, y::Integer, noRespawn::Bool = false, explodeTimer::Number = 1.0)
const placements = Ahorn.PlacementDict(
"Pufferfish Bowl (Vortex Helper)" => Ahorn.EntityPlacement(
BowlPuffer,
"point"
),
"Pufferfish Bowl (No Respawn) (Vortex Helper)" => Ahorn.EntityPlacement(
BowlPuffer,
"point",
Dict{String, Any}(
"noRespawn" => true
)
)
)
pufferBowlSprite = "objects/VortexHelper/pufferBowl/idle00"
function Ahorn.selection(entity::BowlPuffer)
x, y = Ahorn.position(entity)
return Ahorn.Rectangle(x - 11, y - 11, 21, 19)
end
function Ahorn.render(ctx::Ahorn.Cairo.CairoContext, entity::BowlPuffer, room::Maple.Room)
Ahorn.drawSprite(ctx, pufferBowlSprite, 0, -3)
end
end | [
27,
456,
62,
30783,
29,
15,
198,
21412,
49790,
47429,
33,
4883,
47,
13712,
198,
3500,
11485,
10910,
1211,
11,
21249,
198,
198,
31,
8899,
4299,
20885,
366,
53,
26158,
47429,
14,
33,
4883,
47,
13712,
1,
8693,
47,
13712,
7,
87,
3712,
... | 2.422414 | 348 |
# Testing:
#
# - computation of sufficient statistics
# - distribution fitting (i.e. estimation)
#
using Distributions
using Base.Test
n0 = 100
N = 10^5
w = rand(n0)
# DiscreteUniform
x = rand(DiscreteUniform(10, 15), n0)
d = fit(DiscreteUniform, x)
@test isa(d, DiscreteUniform)
@test minimum(d) == minimum(x)
@test maximum(d) == maximum(x)
d = fit(DiscreteUniform, rand(DiscreteUniform(10, 15), N))
@test minimum(d) == 10
@test maximum(d) == 15
# Bernoulli
x = rand(Bernoulli(0.7), n0)
ss = suffstats(Bernoulli, x)
@test isa(ss, Distributions.BernoulliStats)
@test ss.cnt0 == n0 - countnz(x)
@test ss.cnt1 == countnz(x)
ss = suffstats(Bernoulli, x, w)
@test isa(ss, Distributions.BernoulliStats)
@test_approx_eq ss.cnt0 sum(w[x .== 0])
@test_approx_eq ss.cnt1 sum(w[x .== 1])
d = fit(Bernoulli, x)
p = countnz(x) / n0
@test isa(d, Bernoulli)
@test_approx_eq mean(d) p
d = fit(Bernoulli, x, w)
p = sum(w[x .== 1]) / sum(w)
@test isa(d, Bernoulli)
@test_approx_eq mean(d) p
d = fit(Bernoulli, rand(Bernoulli(0.7), N))
@test isa(d, Bernoulli)
@test_approx_eq_eps mean(d) 0.7 0.01
# Beta
d = fit(Beta, rand(Beta(1.3, 3.7), N))
@test isa(d, Beta)
@test_approx_eq_eps d.α 1.3 0.1
@test_approx_eq_eps d.β 3.7 0.1
# Binomial
x = rand(Binomial(100, 0.3), n0)
ss = suffstats(Binomial, (100, x))
@test isa(ss, Distributions.BinomialStats)
@test_approx_eq ss.ns sum(x)
@test ss.ne == n0
@test ss.n == 100
ss = suffstats(Binomial, (100, x), w)
@test isa(ss, Distributions.BinomialStats)
@test_approx_eq ss.ns dot(Float64[xx for xx in x], w)
@test_approx_eq ss.ne sum(w)
@test ss.n == 100
d = fit(Binomial, (100, x))
@test isa(d, Binomial)
@test ntrials(d) == 100
@test_approx_eq succprob(d) sum(x) / (n0 * 100)
d = fit(Binomial, (100, x), w)
@test isa(d, Binomial)
@test ntrials(d) == 100
@test_approx_eq succprob(d) dot(x, w) / (sum(w) * 100)
d = fit(Binomial, 100, rand(Binomial(100, 0.3), N))
@test isa(d, Binomial)
@test ntrials(d) == 100
@test_approx_eq_eps succprob(d) 0.3 0.01
# Categorical
p = [0.2, 0.5, 0.3]
x = rand(Categorical(p), n0)
ss = suffstats(Categorical, (3, x))
h = Float64[countnz(x .== i) for i = 1 : 3]
@test isa(ss, Distributions.CategoricalStats)
@test_approx_eq ss.h h
d = fit(Categorical, (3, x))
@test isa(d, Categorical)
@test d.K == 3
@test_approx_eq probs(d) h / sum(h)
d2 = fit(Categorical, x)
@test isa(d2, Categorical)
@test probs(d2) == probs(d)
ss = suffstats(Categorical, (3, x), w)
h = Float64[sum(w[x .== i]) for i = 1 : 3]
@test isa(ss, Distributions.CategoricalStats)
@test_approx_eq ss.h h
d = fit(Categorical, (3, x), w)
@test isa(d, Categorical)
@test_approx_eq probs(d) h / sum(h)
d = fit(Categorical, suffstats(Categorical, 3, x, w))
@test isa(d, Categorical)
@test_approx_eq probs(d) (h / sum(h))
d = fit(Categorical, rand(Categorical(p), N))
@test isa(d, Categorical)
@test_approx_eq_eps probs(d) p 0.01
# Exponential
x = rand(Exponential(0.5), n0)
ss = suffstats(Exponential, x)
@test isa(ss, Distributions.ExponentialStats)
@test_approx_eq ss.sx sum(x)
@test ss.sw == n0
ss = suffstats(Exponential, x, w)
@test isa(ss, Distributions.ExponentialStats)
@test_approx_eq ss.sx dot(x, w)
@test ss.sw == sum(w)
d = fit(Exponential, x)
@test isa(d, Exponential)
@test_approx_eq scale(d) mean(x)
d = fit(Exponential, x, w)
@test isa(d, Exponential)
@test_approx_eq scale(d) dot(x, w) / sum(w)
d = fit(Exponential, rand(Exponential(0.5), N))
@test isa(d, Exponential)
@test_approx_eq_eps scale(d) 0.5 0.01
# Normal
μ = 11.3
σ = 3.2
x = rand(Normal(μ, σ), n0)
ss = suffstats(Normal, x)
@test isa(ss, Distributions.NormalStats)
@test_approx_eq ss.s sum(x)
@test_approx_eq ss.m mean(x)
@test_approx_eq ss.s2 sum((x .- ss.m).^2)
@test_approx_eq ss.tw n0
ss = suffstats(Normal, x, w)
@test isa(ss, Distributions.NormalStats)
@test_approx_eq ss.s dot(x, w)
@test_approx_eq ss.m dot(x, w) / sum(w)
@test_approx_eq ss.s2 dot((x .- ss.m).^2, w)
@test_approx_eq ss.tw sum(w)
d = fit(Normal, x)
@test isa(d, Normal)
@test_approx_eq d.μ mean(x)
@test_approx_eq d.σ sqrt(mean((x .- d.μ).^2))
d = fit(Normal, x, w)
@test isa(d, Normal)
@test_approx_eq d.μ dot(x, w) / sum(w)
@test_approx_eq d.σ sqrt(dot((x .- d.μ).^2, w) / sum(w))
d = fit(Normal, rand(Normal(μ, σ), N))
@test isa(d, Normal)
@test_approx_eq_eps d.μ μ 0.1
@test_approx_eq_eps d.σ σ 0.1
import Distributions.NormalKnownMu, Distributions.NormalKnownSigma
ss = suffstats(NormalKnownMu(μ), x)
@test isa(ss, Distributions.NormalKnownMuStats)
@test ss.μ == μ
@test_approx_eq ss.s2 sum((x .- μ).^2)
@test_approx_eq ss.tw n0
ss = suffstats(NormalKnownMu(μ), x, w)
@test isa(ss, Distributions.NormalKnownMuStats)
@test ss.μ == μ
@test_approx_eq ss.s2 dot((x .- μ).^2, w)
@test_approx_eq ss.tw sum(w)
d = fit_mle(Normal, x; mu=μ)
@test isa(d, Normal)
@test d.μ == μ
@test_approx_eq d.σ sqrt(mean((x .- d.μ).^2))
d = fit_mle(Normal, x, w; mu=μ)
@test isa(d, Normal)
@test d.μ == μ
@test_approx_eq d.σ sqrt(dot((x .- d.μ).^2, w) / sum(w))
ss = suffstats(NormalKnownSigma(σ), x)
@test isa(ss, Distributions.NormalKnownSigmaStats)
@test ss.σ == σ
@test_approx_eq ss.sx sum(x)
@test_approx_eq ss.tw n0
ss = suffstats(NormalKnownSigma(σ), x, w)
@test isa(ss, Distributions.NormalKnownSigmaStats)
@test ss.σ == σ
@test_approx_eq ss.sx dot(x, w)
@test_approx_eq ss.tw sum(w)
d = fit_mle(Normal, x; sigma=σ)
@test isa(d, Normal)
@test d.σ == σ
@test_approx_eq d.μ mean(x)
d = fit_mle(Normal, x, w; sigma=σ)
@test isa(d, Normal)
@test d.σ == σ
@test_approx_eq d.μ dot(x, w) / sum(w)
# Uniform
x = rand(Uniform(1.2, 5.8), n0)
d = fit(Uniform, x)
@test isa(d, Uniform)
@test 1.2 <= minimum(d) <= maximum(d) <= 5.8
@test minimum(d) == minimum(x)
@test maximum(d) == maximum(x)
d = fit(Uniform, rand(Uniform(1.2, 5.8), N))
@test 1.2 <= minimum(d) <= maximum(d) <= 5.8
@test_approx_eq_eps minimum(d) 1.2 0.02
@test_approx_eq_eps maximum(d) 5.8 0.02
# Gamma
x = rand(Gamma(3.9, 2.1), n0)
ss = suffstats(Gamma, x)
@test isa(ss, Distributions.GammaStats)
@test_approx_eq ss.sx sum(x)
@test_approx_eq ss.slogx sum(log(x))
@test_approx_eq ss.tw n0
ss = suffstats(Gamma, x, w)
@test isa(ss, Distributions.GammaStats)
@test_approx_eq ss.sx dot(x, w)
@test_approx_eq ss.slogx dot(log(x), w)
@test_approx_eq ss.tw sum(w)
d = fit(Gamma, rand(Gamma(3.9, 2.1), N))
@test isa(d, Gamma)
@test_approx_eq_eps shape(d) 3.9 0.1
@test_approx_eq_eps scale(d) 2.1 0.2
# Geometric
x = rand(Geometric(0.3), n0)
ss = suffstats(Geometric, x)
@test isa(ss, Distributions.GeometricStats)
@test_approx_eq ss.sx sum(x)
@test_approx_eq ss.tw n0
ss = suffstats(Geometric, x, w)
@test isa(ss, Distributions.GeometricStats)
@test_approx_eq ss.sx dot(x, w)
@test_approx_eq ss.tw sum(w)
d = fit(Geometric, x)
@test isa(d, Geometric)
@test_approx_eq succprob(d) inv(1. + mean(x))
d = fit(Geometric, x, w)
@test isa(d, Geometric)
@test_approx_eq succprob(d) inv(1. + dot(x, w) / sum(w))
d = fit(Geometric, rand(Geometric(0.3), N))
@test isa(d, Geometric)
@test_approx_eq_eps succprob(d) 0.3 0.01
# Laplace
d = fit(Laplace, rand(Laplace(5.0, 3.0), N))
@test isa(d, Laplace)
@test_approx_eq_eps location(d) 5.0 0.1
@test_approx_eq_eps scale(d) 3.0 0.2
# Poisson
x = rand(Poisson(8.2), n0)
ss = suffstats(Poisson, x)
@test isa(ss, Distributions.PoissonStats)
@test_approx_eq ss.sx sum(x)
@test_approx_eq ss.tw n0
ss = suffstats(Poisson, x, w)
@test isa(ss, Distributions.PoissonStats)
@test_approx_eq ss.sx dot(x, w)
@test_approx_eq ss.tw sum(w)
d = fit(Poisson, x)
@test isa(d, Poisson)
@test_approx_eq mean(d) mean(x)
d = fit(Poisson, x, w)
@test isa(d, Poisson)
@test_approx_eq mean(d) dot(Float64[xx for xx in x], w) / sum(w)
d = fit(Poisson, rand(Poisson(8.2), N))
@test isa(d, Poisson)
@test_approx_eq_eps mean(d) 8.2 0.2
| [
2,
23983,
25,
198,
2,
198,
2,
220,
532,
29964,
286,
6751,
7869,
198,
2,
220,
532,
6082,
15830,
357,
72,
13,
68,
13,
31850,
8,
198,
2,
198,
198,
3500,
46567,
507,
198,
3500,
7308,
13,
14402,
198,
198,
77,
15,
796,
1802,
198,
45... | 2.092162 | 3,700 |
using DiffEqFlux, Flux
using LinearAlgebra, Distributions
using Optim, GalacticOptim
using Test
function run_test(f, layer, atol)
data_train_vals = [rand(length(layer.model)) for k in 1:500]
data_train_fn = f.(data_train_vals)
function loss_function(component)
data_pred = [layer(x,component) for x in data_train_vals]
loss = sum(norm.(data_pred.-data_train_fn))/length(data_train_fn)
return loss
end
function cb(p,l)
@show l
return false
end
optfunc = GalacticOptim.OptimizationFunction((x, p) -> loss_function(x), GalacticOptim.AutoZygote())
optprob = GalacticOptim.OptimizationProblem(optfunc, layer.p)
res = GalacticOptim.solve(optprob, ADAM(0.1), cb=cb, maxiters = 100)
optprob = GalacticOptim.OptimizationProblem(optfunc, res.minimizer)
res = GalacticOptim.solve(optprob, ADAM(0.01), cb=cb, maxiters = 100)
optprob = GalacticOptim.OptimizationProblem(optfunc, res.minimizer)
res = GalacticOptim.solve(optprob, BFGS(), cb=cb, maxiters = 200)
opt = res.minimizer
data_validate_vals = [rand(length(layer.model)) for k in 1:100]
data_validate_fn = f.(data_validate_vals)
data_validate_pred = [layer(x,opt) for x in data_validate_vals]
return sum(norm.(data_validate_pred.-data_validate_fn))/length(data_validate_fn) < atol
end
##test 01: affine function, Chebyshev and Polynomial basis
A = rand(2,2)
b = rand(2)
f = x -> A*x + b
layer = TensorLayer([ChebyshevBasis(10), PolynomialBasis(10)], 2)
@test run_test(f, layer, 0.05)
##test 02: non-linear function, Chebyshev and Legendre basis
A = rand(2,2)
b = rand(2)
f = x -> A*x*norm(x)+ b*sin(norm(x))
layer = TensorLayer([ChebyshevBasis(7), FourierBasis(7)], 2)
@test run_test(f, layer, 0.10)
| [
3500,
10631,
36,
80,
37,
22564,
11,
1610,
2821,
198,
3500,
44800,
2348,
29230,
11,
46567,
507,
198,
3500,
30011,
11,
23509,
27871,
320,
198,
3500,
6208,
198,
198,
8818,
1057,
62,
9288,
7,
69,
11,
7679,
11,
379,
349,
8,
628,
220,
2... | 2.412568 | 732 |
<reponame>grahamstark/ScottishTaxBenefitModel
#
# This is the benefit/tax credit/IT/MinWage/NI rates from April 2021
#
sys.it.savings_rates = [0.0, 20.0, 40.0, 45.0]
sys.it.savings_thresholds = [5_000.0, 37_700.0, 150_000.0]
sys.it.savings_basic_rate = 2 # above this counts as higher rate
sys.it.non_savings_rates = [19.0,20.0,21.0,41.0,46.0]
sys.it.non_savings_thresholds = [2_097, 12_726, 31_092, 150_000.0]
sys.it.non_savings_basic_rate = 2 # above this counts as higher rate
sys.it.dividend_rates = [0.0, 7.5,32.5,38.1]
sys.it.dividend_thresholds = [2_000.0, 37_700.0, 150_000.0]
sys.it.dividend_basic_rate = 2 # above this counts as higher rate
sys.it.personal_allowance = 12_570.00
sys.it.personal_allowance_income_limit = 100_000.00
sys.it.personal_allowance_withdrawal_rate = 50.0
sys.it.blind_persons_allowance = 2_520.00
sys.it.married_couples_allowance = 9_125.00
sys.it.mca_minimum = 3_530.00
sys.it.mca_income_maximum = 29_600.00
sys.it.mca_credit_rate = 10.0
sys.it.mca_withdrawal_rate = 50.0
sys.it.marriage_allowance = 1_260.00
sys.it.personal_savings_allowance = 1_000.00
# FIXME better to have it straight from
# the book with charges per CO2 range
# and the data being an estimate of CO2 per type
merge( sys.it.company_car_charge_by_CO2_emissions,
Dict([
Missing_Fuel_Type=>0.1,
No_Fuel=>0.1,
Other=>0.1,
Dont_know=>0.1,
Petrol=>0.25,
Diesel=>0.37,
Hybrid_use_a_combination_of_petrol_and_electricity=>0.16,
Electric=>0.02,
LPG=>0.02,
Biofuel_eg_E85_fuel=>0.02 ]))
sys.it.fuel_imputation = 24_500.00 # 20/21
#
# pensions
#
sys.it.pension_contrib_basic_amount = 3_600.00
sys.it.pension_contrib_annual_allowance = 40_000.00
sys.it.pension_contrib_annual_minimum = 4_000.00
sys.it.pension_contrib_threshold_income = 240_000.00
sys.it.pension_contrib_withdrawal_rate = 50.0
# sys.it.non_savings_income = NON_SAVINGS_INCOME
# sys.it.all_taxable = ALL_TAXABLE_INCOME
# sys.it.savings_income = SAVINGS_INCOME
# sys.it.dividend_income = DIVIDEND_INCOME
# sys.it.mca_date = MCA_DATE
sys.ni.abolished = false
sys.ni.primary_class_1_rates = [0.0, 0.0, 12.0, 2.0 ]
sys.ni.primary_class_1_bands = [120.0, 184.0, 967.0, 9999999999999.9] # the '-1' here is because json can't write inf
sys.ni.secondary_class_1_rates = [0.0, 13.8, 13.8 ] # keep 2 so
sys.ni.secondary_class_1_bands = [170.0, 967.0, 99999999999999.9 ]
sys.ni.state_pension_age = 66; # fixme move
sys.ni.class_2_threshold = 6_515.0;
sys.ni.class_2_rate = 3.05;
sys.ni.class_4_rates = [0.0, 9.0, 2.0 ]
sys.ni.class_4_bands = [9_569.0, 50_270.0, 99999999999999.9 ]
# sys.ni.class_1_income = IncludedItems([WAGES],[PENSION_CONTRIBUTIONS_EMPLOYER])
# sys.ni.class_4_income = [SELF_EMPLOYMENT_INCOME]
# sys.uc.
sys.lmt.isa_jsa_esa_abolished = false
sys.lmt.pen_credit_abolished = false
## FIXME we can't turn off pension credit individually here..
sys.lmt.premia.family = 17.65
sys.lmt.premia.family_lone_parent = 22.20
sys.lmt.premia.carer_single = 37.70
sys.lmt.premia.carer_couple = 2*37.70
sys.lmt.premia.disabled_child = 65.94
sys.lmt.premia.disability_single = 35.10
sys.lmt.premia.disability_couple = 50.05
sys.lmt.premia.enhanced_disability_child = 26.67
sys.lmt.premia.enhanced_disability_single = 17.20
sys.lmt.premia.enhanced_disability_couple = 24.60
sys.lmt.premia.severe_disability_single = 67.30
sys.lmt.premia.severe_disability_couple = 134.60
sys.lmt.premia.pensioner_is = 152.90
sys.lmt.allowances.age_18_24 = 59.20
sys.lmt.allowances.age_25_and_over = 74.70
sys.lmt.allowances.age_18_and_in_work_activity = 74.70
sys.lmt.allowances.over_pension_age = 191.15 #
sys.lmt.allowances.lone_parent = 74.70
sys.lmt.allowances.lone_parent_over_pension_age = 191.15
sys.lmt.allowances.couple_both_under_18 = 59.20
sys.lmt.allowances.couple_both_over_18 = 117.40
sys.lmt.allowances.couple_over_pension_age = 286.05
sys.lmt.allowances.couple_one_over_18_high = 117.40
sys.lmt.allowances.couple_one_over_18_med = 74.70
sys.lmt.allowances.pa_couple_one_over_18_low = 59.20
sys.lmt.allowances.child = 68.60
sys.lmt.allowances.pc_mig_single = 177.10
sys.lmt.allowances.pc_mig_couple = 270.30
sys.lmt.allowances.pc_child = 54.60
# sys.lmt.income_rules.
sys.lmt.income_rules.permitted_work =143.00
sys.lmt.income_rules.lone_parent_hb = 25.0
sys.lmt.income_rules.high = 20.0
sys.lmt.income_rules.low_couple = 10.0
sys.lmt.income_rules.low_single = 5.0
sys.lmt.income_rules.hb_additional = 17.10
sys.lmt.income_rules.childcare_max_1 = 175.0
sys.lmt.income_rules.childcare_max_2 = 300.0
sys.lmt.income_rules.incomes = LEGACY_MT_INCOME
sys.lmt.income_rules.hb_incomes = LEGACY_HB_INCOME
sys.lmt.income_rules.pc_incomes = LEGACY_PC_INCOME
sys.lmt.income_rules.sc_incomes = LEGACY_SAVINGS_CREDIT_INCOME
sys.lmt.income_rules.capital_min = 6_000.0
sys.lmt.income_rules.capital_max = 16_000.0
sys.lmt.income_rules.pc_capital_min = 10_000.0
sys.lmt.income_rules.pc_capital_max = 99999999999999.9
sys.lmt.income_rules.pensioner_capital_min = 10_000.0
sys.lmt.income_rules.pensioner_capital_max = 16_000.0
sys.lmt.income_rules.capital_tariff = 250
sys.lmt.income_rules.pensioner_tariff = 500
# FIXME why do we need a seperate copy of HoursLimits here?
sys.lmt.hours_limits.lower = 16
sys.lmt.hours_limits.med = 24
sys.lmt.hours_limits.higher = 30
sys.lmt.savings_credit.abolished = false
sys.lmt.savings_credit.withdrawal_rate = 60.0
sys.lmt.savings_credit.threshold_single = 153.70
sys.lmt.savings_credit.threshold_couple =244.12
sys.lmt.savings_credit.max_single = 14.04
sys.lmt.savings_credit.max_couple = 15.71
sys.lmt.savings_credit.available_till = Date( 2016, 04, 06 )
sys.lmt.child_tax_credit.abolished = false
sys.lmt.child_tax_credit.family = 545.0
sys.lmt.child_tax_credit.child = 2_845.0
sys.lmt.child_tax_credit.disability = 3_435
sys.lmt.child_tax_credit.severe_disability = 4825
sys.lmt.child_tax_credit.threshold = 16_480.0
sys.lmt.working_tax_credit.basic = 2_005+1_045 # CAREFUL!! assuming 20 uplift still in place
sys.lmt.working_tax_credit.lone_parent = 2_060
sys.lmt.working_tax_credit.couple = 2_060
sys.lmt.working_tax_credit.hours_ge_30 = 830
sys.lmt.working_tax_credit.disability = 3_240
sys.lmt.working_tax_credit.severe_disability = 1_400
sys.lmt.working_tax_credit.age_50_plus = 1_365.00
sys.lmt.working_tax_credit.age_50_plus_30_hrs = 2_030.00
sys.lmt.working_tax_credit.childcare_max_2_plus_children = 300.0 # pw
sys.lmt.working_tax_credit.childcare_max_1_child = 175.0
sys.lmt.working_tax_credit.childcare_proportion = 70.0
sys.lmt.working_tax_credit.taper = 41.0
sys.lmt.working_tax_credit.threshold = 6_565.0
sys.lmt.working_tax_credit.non_earnings_minima = 300.0 # FIXME check
sys.lmt.hb.taper = 65.0
sys.lmt.hb.ndd_deductions = [15.95,36.65,50.30,82.30,93.70,102.85]
sys.lmt.hb.ndd_incomes = [149.0,217.0,283.0,377.0,469.0,99999999999999.9]
sys.lmt.ctr.taper = 20.0
sys.lmt.ctr.ndd_deductions = []
sys.lmt.ctr.ndd_incomes = []
sys.uc.abolished = false
sys.uc.threshold = 2_500.0 ## NOT USED
sys.uc.age_18_24 = 344.00
sys.uc.age_25_and_over = 411.51
sys.uc.couple_both_under_25 = 490.60
sys.uc.couple_oldest_25_plus = 596.58
sys.uc.first_child = 282.50
sys.uc.subsequent_child = 237.08
sys.uc.disabled_child_lower = 128.89
sys.uc.disabled_child_higher = 402.41
sys.uc.limited_capcacity_for_work_activity = 343.63
sys.uc.carer = 163.73
sys.uc.ndd = 75.53
sys.uc.childcare_max_2_plus_children = 1_108.04 # pm
sys.uc.childcare_max_1_child = 646.35
sys.uc.childcare_proportion = 85.0 # pct
sys.uc.minimum_income_floor_hours = 35*WEEKS_PER_MONTH
sys.uc.work_allowance_w_housing = 293.0
sys.uc.work_allowance_no_housing = 515.0
sys.uc.other_income = UC_OTHER_INCOME
# sys.uc.earned_income :: IncludedItems = UC_EARNED_INCOME
sys.uc.capital_min = 6_000.0
sys.uc.capital_max = 16_000.0
# £1 *per week* ≆ 4.35 pm FIXME make 4.35 WEEKS_PER_MONTH?
sys.uc.capital_tariff = 250.0/4.35
sys.uc.taper = 63.0
sys.uc.ctr_taper = 20.0
sys.age_limits.state_pension_ages = pension_ages()
sys.age_limits.savings_credit_to_new_state_pension = Date( 2016, 04, 06 )
sys.hours_limits.lower = 16
sys.hours_limits.med = 24
sys.hours_limits.higher = 30
sys.child_limits.max_children = 2
# THESE ARE TILL APRIL 2022
sys.minwage.ages = [16,18,21,23]
sys.minwage.wage_per_hour = [4.62, 6.56, 8.36, 8.91]
sys.minwage.apprentice_rate = 4.30
sys.hr.maximum_rooms = 4
sys.hr.rooms_rent_reduction = [14.0,25.0]
sys.hr.single_room_age = 35
#
# These are unchanged in 3 years; see:
# https://www.gov.scot/publications/local-housing-allowance-rates-2021-2022/
#
sys.hr.brmas = loadBRMAs( 4, T, DEFAULT_BRMA_2021 )
sys.nmt_bens.attendance_allowance.abolished = false
sys.nmt_bens.attendance_allowance.higher = 89.60
sys.nmt_bens.attendance_allowance.lower = 60.00
sys.nmt_bens.child_benefit.abolished = false
sys.nmt_bens.child_benefit.first_child = 21.15
sys.nmt_bens.child_benefit.other_children = 14.00
sys.nmt_bens.child_benefit.high_income_thresh = 50_000.0
sys.nmt_bens.child_benefit.withdrawal = 1/100
sys.nmt_bens.child_benefit.guardians_allowance = 18.00
sys.nmt_bens.dla.abolished = false
sys.nmt_bens.dla.care_high = 89.60
sys.nmt_bens.dla.care_middle = 60.00
sys.nmt_bens.dla.care_low = 23.70
sys.nmt_bens.dla.mob_high = 62.55
sys.nmt_bens.dla.mob_low = 23.70
sys.nmt_bens.carers.abolished = false
sys.nmt_bens.carers.allowance = 67.60
sys.nmt_bens.carers.scottish_supplement = 231.40 # FROM APRIL 2021
sys.nmt_bens.carers.hours :: Int = 35
sys.nmt_bens.carers.gainful_employment_min = 128.00
sys.nmt_bens.pip.abolished = false
sys.nmt_bens.pip.dl_standard = 60.0
sys.nmt_bens.pip.dl_enhanced = 89.60
sys.nmt_bens.pip.mobility_standard = 23.70
sys.nmt_bens.pip.mobility_enhanced = 62.55
sys.nmt_bens.esa.abolished = false
sys.nmt_bens.esa.assessment_u25 = 59.20
sys.nmt_bens.esa.assessment_25p = 74.70
sys.nmt_bens.esa.main = 74.70
sys.nmt_bens.esa.work = 29.70
sys.nmt_bens.esa.support = 39.40
sys.nmt_bens.jsa.abolished = false
sys.nmt_bens.jsa.u25 = 59.20
sys.nmt_bens.jsa.o24 = 74.70
sys.nmt_bens.pensions.abolished = false
sys.nmt_bens.pensions.new_state_pension = 179.60
# pension_start_date = Date( 2016, 04, 06 )
sys.nmt_bens.pensions.cat_a = 137.60
sys.nmt_bens.pensions.cat_b = 137.60
sys.nmt_bens.pensions.cat_b_survivor = 82.45
sys.nmt_bens.pensions.cat_d = 82.45
sys.nmt_bens.bereavement.abolished = false
# higher effectively just means 'with children';
sys.nmt_bens.bereavement.lump_sum_higher = 3_500
sys.nmt_bens.bereavement.lump_sum_lower = 2_500
sys.nmt_bens.bereavement.higher = 350
sys.nmt_bens.bereavement.lower = 100
sys.nmt_bens.widows_pension.abolished = false
sys.nmt_bens.widows_pension.industrial_higher = 137.60
sys.nmt_bens.widows_pension.industrial_lower = 41.28
sys.nmt_bens.widows_pension.standard_rate = 122.55
sys.nmt_bens.widows_pension.parent = 122.55
sys.nmt_bens.widows_pension.ages = collect(54:-1:45)
sys.nmt_bens.widows_pension.age_amounts = [113.97,105.39,96.81,88.24,79.66,71.08,62.50,53.92,45.34,36.77]
#
# young carer grant
sys.nmt_bens.maternity.abolished = false
sys.nmt_bens.maternity.rate = 151.97
sys.nmt_bens.smp = 151.97 ## 90% of earn cpag 21/2 812
# = XX
sys.bencap.abolished = false
sys.bencap.outside_london_single = 257.69
sys.bencap.outside_london_couple = 384.62
# not really needed, but anyway ..
sys.bencap.inside_london_single = 296.35
sys.bencap.inside_london_couple = 442.31
sys.bencap.uc_incomes_limit = 617
sys.scottish_child_payment.amount = 10.0
sys.scottish_child_payment.maximum_age = 5
sys.ubi.abolished = true
sys.ubi.adult_amount = 4_800.0
sys.ubi.child_amount= 3_000.0
sys.ubi.universal_pension = 8_780.0
sys.ubi.adult_age = 17
sys.ubi.retirement_age = 66
| [
27,
7856,
261,
480,
29,
70,
13220,
301,
668,
14,
19040,
680,
27017,
42166,
270,
17633,
198,
2,
198,
2,
770,
318,
262,
4414,
14,
19290,
3884,
14,
2043,
14,
9452,
54,
496,
14,
22125,
3965,
422,
3035,
33448,
198,
2,
198,
17597,
13,
... | 2.209059 | 5,343 |
<reponame>mipals/SymEGRSSMatrices
struct SymEGRQSMatrix{T,UT<:AbstractArray,VT<:AbstractArray,dT<:AbstractArray} <: AbstractMatrix{T}
Ut::UT
Vt::VT
d::dT
n::Int
p::Int
function SymEGRQSMatrix{T,UT,VT,dT}(Ut,Vt,d,n,p) where
{T,UT<:AbstractArray,VT<:AbstractArray,dT<:AbstractArray}
Up, Un = size(Ut)
Vp, Vn = size(Vt)
(Un == Vn && Up == Vp && Vn == length(d)) || throw(DimensionMismatch())
new(Ut,Vt,d,n,p)
end
end
# Constuctor
SymEGRQSMatrix(Ut::AbstractArray{T,N}, Vt::AbstractArray{T,N}, d::AbstractArray{T,M}) where {T,N,M}=
SymEGRQSMatrix{T,typeof(Ut),typeof(Vt),typeof(d)}(Ut,Vt,d,size(Vt,2),size(Vt,1));
########################################################################
#### Helpful properties. Not nessecarily computionally efficient ####
########################################################################
Matrix(K::SymEGRQSMatrix) = tril(K.Ut'*K.Vt) + triu(K.Vt'*K.Ut,1) + Diagonal(K.d)
size(K::SymEGRQSMatrix) = (K.n, K.n)
function getindex(K::SymEGRQSMatrix{T}, i::Int, j::Int) where T
i > j && return dot(K.Ut[:,i],K.Vt[:,j])
j == i && return dot(K.Vt[:,i],K.Ut[:,j]) + K.d[i]
return dot(K.Vt[:,i],K.Ut[:,j])
end
Base.propertynames(F::SymEGRQSMatrix, private::Bool=false) =
(private ? fieldnames(typeof(F)) : ())
########################################################################
#### Linear Algebra routines ####
########################################################################
#### Matrix-matrix product ####
function dss_mul_mat!(Y::Array, K::SymEGRQSMatrix, X::Array)
Ut = K.Ut
Vt = K.Vt
d = getfield(K,:d)
p, n = size(Ut);
mx = size(X,2);
Vbar = zeros(p,mx);
Ubar = Ut*X;
@inbounds for i = 1:n
tmpV = Vt[:,i];
tmpU = Ut[:,i];
tmpX = X[i:i,:];
Ubar -= tmpU .* tmpX;
Vbar += tmpV .* tmpX;
Y[i,:] = tmpU'*Vbar + tmpV'*Ubar + d[i]*tmpX;
end
return Y
end
#### Log-determinant ####
logdet(K::SymEGRQSMatrix) = 2.0*logdet(cholesky(K))
#### Determinant ####
det(K::SymEGRQSMatrix) = det(cholesky(K))^2
mul!(y::AbstractVecOrMat, K::SymEGRQSMatrix, x::AbstractVecOrMat) =
dss_mul_mat!(y,K,x)
mul!(y::AbstractVecOrMat, K::Adjoint{<:Any,<:SymEGRQSMatrix}, x::AbstractVecOrMat) =
dss_mul_mat!(y,K.parent,x)
function (\)(K::SymEGRQSMatrix, x::AbstractVecOrMat)
L = cholesky(K);
return L'\(L\x)
end
| [
27,
7856,
261,
480,
29,
76,
541,
874,
14,
43094,
7156,
49,
5432,
19044,
45977,
198,
7249,
15845,
7156,
49,
48,
12310,
265,
8609,
90,
51,
11,
3843,
27,
25,
23839,
19182,
11,
36392,
27,
25,
23839,
19182,
11,
67,
51,
27,
25,
23839,
... | 2.132092 | 1,128 |
using DataFrames
using DuckDB
using Test
using Dates
using UUIDs
test_files = [
"test_appender.jl",
"test_basic_queries.jl",
"test_config.jl",
"test_connection.jl",
"test_df_scan.jl",
"test_prepare.jl",
"test_transaction.jl",
"test_sqlite.jl",
"test_replacement_scan.jl",
"test_table_function.jl",
"test_old_interface.jl",
"test_all_types.jl"
]
if size(ARGS)[1] > 0
filtered_test_files = []
for test_file in test_files
if test_file == ARGS[1]
push!(filtered_test_files, test_file)
end
end
test_files = filtered_test_files
end
for fname in test_files
println(fname)
include(fname)
end
| [
3500,
6060,
35439,
198,
3500,
21867,
11012,
198,
3500,
6208,
198,
3500,
44712,
198,
3500,
471,
27586,
82,
198,
198,
9288,
62,
16624,
796,
685,
198,
220,
220,
220,
366,
9288,
62,
1324,
2194,
13,
20362,
1600,
198,
220,
220,
220,
366,
... | 2.180952 | 315 |
<reponame>RalphAS/SLICOTMath.jl
# Portions translated from SLICOT-Reference distribution
# Copyright (c) 2002-2020 NICONET e.V.
function run_mb03bd(datfile, io=stdout)
NIN = 5
NOUT = 6
KMAX = 6
NMAX = 50
LDA1 = NMAX
LDA2 = NMAX
LDQ1 = NMAX
LDQ2 = NMAX
LDWORK = KMAX + max( 2*NMAX, 8*KMAX )
LIWORK = 2*KMAX + NMAX
QIND = Array{BlasInt,1}(undef, KMAX)
S = Array{BlasInt,1}(undef, KMAX)
A = Array{Float64,3}(undef, LDA1,LDA2,KMAX)
Q = Array{Float64,3}(undef, LDQ1,LDQ2,KMAX)
ALPHAR = Array{Float64,1}(undef, NMAX)
ALPHAI = Array{Float64,1}(undef, NMAX)
BETA = Array{Float64,1}(undef, NMAX)
SCAL = Array{BlasInt,1}(undef, NMAX)
IWORK = Array{BlasInt,1}(undef, LIWORK)
DWORK = Array{Float64,1}(undef, LDWORK)
f = open(datfile,"r")
readline(f)
vs = split(readline(f))
JOB = vs[1][1]
DEFL = vs[2][1]
COMPQ = vs[3][1]
K = parse(BlasInt, vs[4])
N = parse(BlasInt, vs[5])
H = parse(BlasInt, vs[6])
ILO = parse(BlasInt, vs[7])
IHI = parse(BlasInt, vs[8])
if ( N<0 || N>NMAX )
@error "Illegal N=$N"
end
vs = String[]
_isz = K
while length(vs) < _isz
append!(vs, replace.(split(readline(f)),'D'=>'E'))
end
S[1:_isz] .= parsex.(BlasInt, vs)
vs = String[]
_isz,_jsz,_ksz = (N,N,K)
while length(vs) < _isz*_jsz*_ksz
append!(vs, replace.(split(readline(f)),'D'=>'E'))
end
for k in 1:_ksz
for i in 1:_isz
_i0 = (i-1)*_jsz + (k-1)*_jsz*_isz
A[i,1:_jsz,k] .= parsex.(Float64, vs[_i0+1:_i0+_jsz])
end
end
if ( LSAME( COMPQ, 'P' ) )
vs = String[]
_isz = K
while length(vs) < _isz
append!(vs, replace.(split(readline(f)),'D'=>'E'))
end
QIND[1:_isz] .= parsex.(BlasInt, vs)
end # if
close(f)
# interp call 1
INFO, IWARN = SLICOT.mb03bd!(JOB, DEFL, COMPQ, QIND, K, N, H, ILO, IHI, S, A, Q, ALPHAR, ALPHAI, BETA, SCAL, LIWORK, LDWORK)
@test INFO == 0
println(io, "IWARN = $IWARN")
if ( LSAME( JOB, 'S' ) || LSAME( JOB, 'T' ) )
# interp output 1
println(io, "A:")
_nc = N
_nr = N
_nk = K
show(io, "text/plain", A[1:_nr,1:_nc,1:_nk])
println(io)
end # if
if ( LSAME( COMPQ, 'U' ) || LSAME( COMPQ, 'I' ) )
# interp output 2
println(io, "Q:")
_nc = N
_nr = N
_nk = K
show(io, "text/plain", Q[1:_nr,1:_nc,1:_nk])
println(io)
elseif ( LSAME( COMPQ, 'P' ) )
for L in 1:K
if ( QIND[L]>0 )
println(io, "factor ",QIND[L])
# write QIND( L )
# unable to translate write loop:
# write ( I, J, QIND( L ) ), J = 1, N
# interp output 3
show(io, "text/plain", Q[1:N,1:N,QIND[L]])
println(io)
end # if
end # for
end # if
# interp output 4
println(io, "ALPHAR:")
_nr = N
show(io, "text/plain", ALPHAR[1:_nr])
println(io)
# interp output 5
println(io, "ALPHAI:")
_nr = N
show(io, "text/plain", ALPHAI[1:_nr])
println(io)
# interp output 6
println(io, "BETA:")
_nr = N
show(io, "text/plain", BETA[1:_nr])
println(io)
# interp output 7
println(io, "SCAL:")
_nr = N
show(io, "text/plain", SCAL[1:_nr])
println(io)
end # run_mb03bd()
| [
27,
7856,
261,
480,
29,
49,
17307,
1921,
14,
8634,
2149,
2394,
37372,
13,
20362,
198,
2,
4347,
507,
14251,
422,
12419,
2149,
2394,
12,
26687,
6082,
198,
2,
15069,
357,
66,
8,
6244,
12,
42334,
45593,
1340,
2767,
304,
13,
53,
13,
19... | 1.843279 | 1,793 |
# Do not share a stream between processes
# The token would be shared so putting would give InvalidSequenceTokenException a lot
struct CloudWatchLogHandler{F<:Formatter} <: Handler{F, Union{}}
stream::CloudWatchLogStream
channel::Channel{LogEvent} # only one task should read from this channel
fmt::F
end
"""
CloudWatchLogHandler(
config::AWSConfig,
log_group_name,
log_stream_name,
formatter::Memento.Formatter,
)
Construct a Memento Handler for logging to a CloudWatch Log Stream.
This constructor creates a task which asynchronously submits logs to the stream.
A CloudWatch Log Event has only two properties: `timestamp` and `message`.
If a `Record` has a `date` property it will be used as the `timestamp`, otherwise the
current time will be captured when `Memento.emit` is called.
All `DateTime`s will be assumed to be in UTC.
The `message` will be generated by calling `Memento.format` on the `Record` with this
handler's `formatter`.
"""
function CloudWatchLogHandler(
config::AWSConfig,
log_group_name::AbstractString,
log_stream_name::AbstractString,
formatter::F=DefaultFormatter(),
) where F<:Formatter
ch = Channel{LogEvent}(Inf)
handler = CloudWatchLogHandler(
CloudWatchLogStream(config, log_group_name, log_stream_name),
ch,
formatter,
)
tsk = @async process_logs!(handler)
# channel will be closed if task fails, to avoid unknowingly discarding logs
bind(ch, tsk)
return handler
end
function process_available_logs!(handler::CloudWatchLogHandler)
events = Vector{LogEvent}()
batch_size = 0
while isready(handler.channel) && length(events) < MAX_BATCH_LENGTH
event = fetch(handler.channel)
batch_size += aws_size(event)
if batch_size <= MAX_BATCH_SIZE
take!(handler.channel)
push!(events, event)
else
break
end
end
if isempty(events)
warn(LOGGER, string(
"Channel was ready but no events were found. ",
"Is there another task pulling logs from this handler?",
))
end
try
@mock submit_logs(handler.stream, events)
catch e
warn(LOGGER, CapturedException(e, catch_backtrace()))
end
end
"""
process_logs!(handler::CloudWatchLogHandler)
Continually pulls logs from the handler's channel and submits them to AWS.
This function should terminate silently when the channel is closed.
"""
function process_logs!(handler::CloudWatchLogHandler)
group = handler.stream.log_group_name
stream = handler.stream.log_stream_name
debug(LOGGER, "Handler for group '$group' stream '$stream' initiated")
try
while isopen(handler.channel) # might be able to avoid the error in this case
wait(handler.channel)
process_available_logs!(handler)
sleep(PUTLOGEVENTS_RATE_LIMIT) # wait at least this long due to AWS rate limits
end
catch err
if !(err isa InvalidStateException && err.state === :closed)
log(
LOGGER,
:error,
"Handler for group '$group' stream '$stream' terminated unexpectedly",
)
error(LOGGER, CapturedException(err, catch_backtrace()))
end
end
debug(LOGGER, "Handler for group '$group' stream '$stream' terminated normally")
return nothing
end
function Memento.emit(handler::CloudWatchLogHandler, record::Record)
dt = isdefined(record, :date) ? record.date : Dates.now(tz"UTC")
message = format(handler.fmt, record)
event = LogEvent(message, dt)
put!(handler.channel, event)
end
| [
2,
2141,
407,
2648,
257,
4269,
1022,
7767,
198,
2,
383,
11241,
561,
307,
4888,
523,
5137,
561,
1577,
17665,
44015,
594,
30642,
16922,
257,
1256,
198,
7249,
10130,
10723,
11187,
25060,
90,
37,
27,
25,
8479,
1436,
92,
1279,
25,
32412,
... | 2.657081 | 1,391 |
##### multi dimensional advection
##### For incompressible model only
##### calculate tendencies in x direction
@kernel function calc_Gcˣ_kernel!(Gc, c, u, g::AbstractGrid, ΔT)
i, j, k = @index(Global, NTuple)
### offset index for halo points
ii = i + g.Hx
jj = j + g.Hy
kk = k + g.Hz
@inbounds Gc[ii, jj, kk] = adv_flux_x(ii, jj, kk, g, u, c, ΔT)
end
function calc_Gcsˣ!(Gcs, nut, u, g::AbstractGrid, ΔT, arch::Architecture)
kernel! = calc_Gcˣ_kernel!(device(arch), (16,16), (g.Nx, g.Ny, g.Nz))
barrier = Event(device(arch))
events = []
for name in nut_names
event = kernel!(Gcs[name].data, nut[name].data, u, g, ΔT, dependencies=barrier)
push!(events,event)
end
wait(device(arch), MultiEvent(Tuple(events)))
return nothing
end
##### calculate tendencies in y direction
@kernel function calc_Gcʸ_kernel!(Gc, c, v, g::AbstractGrid, ΔT)
i, j, k = @index(Global, NTuple)
### offset index for halo points
ii = i + g.Hx
jj = j + g.Hy
kk = k + g.Hz
@inbounds Gc[ii, jj, kk] = adv_flux_y(ii, jj, kk, g, v, c, ΔT)
end
function calc_Gcsʸ!(Gcs, nut, v, g::AbstractGrid, ΔT, arch::Architecture)
kernel! = calc_Gcʸ_kernel!(device(arch), (16,16), (g.Nx, g.Ny, g.Nz))
barrier = Event(device(arch))
events = []
for name in nut_names
event = kernel!(Gcs[name].data, nut[name].data, v, g, ΔT, dependencies=barrier)
push!(events,event)
end
wait(device(arch), MultiEvent(Tuple(events)))
return nothing
end
##### calculate tendencies in z direction
@kernel function calc_Gcᶻ_kernel!(Gc, c, w, g::AbstractGrid, ΔT)
i, j, k = @index(Global, NTuple)
### offset index for halo points
ii = i + g.Hx
jj = j + g.Hy
kk = k + g.Hz
@inbounds Gc[ii, jj, kk] = adv_flux_z(ii, jj, kk, g, w, c, ΔT)
end
function calc_Gcsᶻ!(Gcs, nut, w, g::AbstractGrid, ΔT, arch::Architecture)
kernel! = calc_Gcᶻ_kernel!(device(arch), (16,16), (g.Nx, g.Ny, g.Nz))
barrier = Event(device(arch))
events = []
for name in nut_names
event = kernel!(Gcs[name].data, nut[name].data, w, g, ΔT, dependencies=barrier)
push!(events,event)
end
wait(device(arch), MultiEvent(Tuple(events)))
return nothing
end
##### apply the tendency in x direction to tracer c
@kernel function multi_dim_x_kernel!(ctemp, Gc, c, u, g::AbstractGrid, ΔT)
i, j, k = @index(Global, NTuple)
### offset index for halo points
ii = i + g.Hx
jj = j + g.Hy
kk = k + g.Hz
@inbounds ctemp[ii, jj, kk] -= ΔT / volume(ii, jj, kk, g) * (δx⁺(ii, jj, kk, Gc, g) - c[ii, jj, kk] * δx⁺(ii, jj, kk, g, Trans_x, u))
end
function multi_dim_x!(nut_temp, Gcs, nut, u, g::AbstractGrid, ΔT, arch::Architecture)
kernel! = multi_dim_x_kernel!(device(arch), (16,16), (g.Nx, g.Ny, g.Nz))
barrier = Event(device(arch))
events = []
for name in nut_names
event = kernel!(nut_temp[name].data, Gcs[name].data, nut[name].data, u, g, ΔT, dependencies=barrier)
push!(events,event)
end
wait(device(arch), MultiEvent(Tuple(events)))
return nothing
end
##### apply the tendency in y direction to tracer c
@kernel function multi_dim_y_kernel!(ctemp, Gc, c, v, g::AbstractGrid, ΔT)
i, j, k = @index(Global, NTuple)
### offset index for halo points
ii = i + g.Hx
jj = j + g.Hy
kk = k + g.Hz
@inbounds ctemp[ii, jj, kk] -= ΔT / volume(ii, jj, kk, g) * (δy⁺(ii, jj, kk, Gc, g) - c[ii, jj, kk] * δy⁺(ii, jj, kk, g, Trans_y, v ))
end
function multi_dim_y!(nut_temp, Gcs, nut, v, g::AbstractGrid, ΔT, arch::Architecture)
kernel! = multi_dim_y_kernel!(device(arch), (16,16), (g.Nx, g.Ny, g.Nz))
barrier = Event(device(arch))
events = []
for name in nut_names
event = kernel!(nut_temp[name].data, Gcs[name].data, nut[name].data, v, g, ΔT, dependencies=barrier)
push!(events,event)
end
wait(device(arch), MultiEvent(Tuple(events)))
return nothing
end
##### apply the tendency in z direction to tracer c
@kernel function multi_dim_z_kernel!(ctemp, Gc, c, w, g::AbstractGrid, ΔT)
i, j, k = @index(Global, NTuple)
### offset index for halo points
ii = i + g.Hx
jj = j + g.Hy
kk = k + g.Hz
@inbounds ctemp[ii, jj, kk] -= ΔT / volume(ii, jj, kk, g) * (δz⁺(ii, jj, kk, Gc, g) - c[ii, jj, kk] * δz⁺(ii, jj, kk, g, Trans_z, w ))
end
function multi_dim_z!(nut_temp, Gcs, nut, w, g::AbstractGrid, ΔT, arch::Architecture)
kernel! = multi_dim_z_kernel!(device(arch), (16,16), (g.Nx, g.Ny, g.Nz))
barrier = Event(device(arch))
events = []
for name in nut_names
event = kernel!(nut_temp[name].data, Gcs[name].data, nut[name].data, w, g, ΔT, dependencies=barrier)
push!(events,event)
end
wait(device(arch), MultiEvent(Tuple(events)))
return nothing
end
function calc_nut_tendency!(a, b, c, ΔT)
for name in nut_names
@inbounds a[name].data .= b[name].data .- c[name].data
end
end
function nut_advection!(nut, nut_temp, Gcs, vel, g::AbstractGrid, ΔT, arch::Architecture)
for name in nut_names
@inbounds nut_temp[name].data .= nut[name].data
end
##### x direction
calc_Gcsˣ!(Gcs, nut_temp, vel.u.data, g, ΔT, arch)
fill_halo_Gcs!(Gcs, g)
multi_dim_x!(nut_temp, Gcs, nut, vel.u.data, g, ΔT, arch)
fill_halo_nut!(nut_temp, g)
##### y direction
calc_Gcsʸ!(Gcs, nut_temp, vel.v.data, g, ΔT, arch)
fill_halo_Gcs!(Gcs, g)
multi_dim_y!(nut_temp, Gcs, nut, vel.v.data, g, ΔT, arch)
fill_halo_nut!(nut_temp, g)
##### z direction
calc_Gcsᶻ!(Gcs, nut_temp, vel.w.data, g, ΔT, arch)
fill_halo_Gcs!(Gcs, g)
multi_dim_z!(nut_temp, Gcs, nut, vel.w.data, g, ΔT, arch)
fill_halo_nut!(nut_temp, g)
calc_nut_tendency!(Gcs, nut_temp, nut, ΔT)
end | [
4242,
2,
5021,
38517,
512,
303,
596,
198,
4242,
2,
1114,
13352,
601,
856,
2746,
691,
198,
4242,
2,
15284,
25671,
287,
2124,
4571,
198,
31,
33885,
2163,
42302,
62,
38,
66,
135,
96,
62,
33885,
0,
7,
38,
66,
11,
269,
11,
334,
11,
... | 2.133333 | 2,715 |
<gh_stars>10-100
# Using the ZigZagBoomerang with Turing with the BouncyParticle sampler
# (The approach taken here is retrieving the likelihood function from Turing and sampling
# directly with ZigZagBoomerang and not using Turings `AbstractMCMC` )
using Turing
using ZigZagBoomerang
const ZZB = ZigZagBoomerang
using LinearAlgebra
const ∅ = nothing
using DelimitedFiles
include("plot_chain.jl") # simple visualization of chains with GLMakie
# define Turing Logit regression model
# following https://github.com/TuringLang/Turing.jl/blob/master/benchmarks/nuts/lr.jl
@model lr_nuts(x, y, σ) = begin
N,D = size(x)
α ~ Normal(0, σ)
β ~ MvNormal(zeros(D), ones(D)*σ)
for n = 1:N
y[n] ~ BinomialLogit(1, dot(x[n,:], β) + α)
end
end
# read data
function readlrdata()
fname = joinpath(dirname(@__FILE__), "lr_nuts.data")
z = readdlm(fname)
x = z[:,1:end-1]
y = z[:,end] .- 1
return x, y
end
x, y = readlrdata()
# define problem
model = lr_nuts(x, y, 100.0)
# sample First with Turing and Nuts
n_samples = 1_000 # Sampling parameter settings
nuts_chain = @time sample(model, NUTS(0.65), n_samples) # (a bit frickle, sometimes adapts wrong)
# sampling took 383 s
# plot NUTS
fig2 = plot_chain(1:n_samples, collect(eachrow(dropdims(nuts_chain[nuts_chain.name_map.parameters].value.data, dims=3)) ))
save("lrnuts.png", fig2)
# sample with ZigZagBoomerang
using ForwardDiff
using ForwardDiff: Dual, value
"""
make_gradient_and_dhessian_logp(turingmodel) -> ∇nlogp!
Gradient of negative log-likelihood and second derivative in direction of movement
Following https://github.com/TuringLang/Turing.jl/blob/master/src/core/ad.jl
"""
function make_gradient_and_dhessian_neglogp(
model::Turing.Model,
sampler=Turing.SampleFromPrior(),
ctx::Turing.DynamicPPL.AbstractContext = DynamicPPL.DefaultContext()
)
vi = Turing.VarInfo(model)
# define function to compute log joint.
function ℓ(θ)
new_vi = Turing.VarInfo(vi, sampler, θ)
model(new_vi, sampler, ctx)
logp = Turing.getlogp(new_vi)
return logp
end
return function (y, t, x, θ, args...)
x_ = x + Dual{:hSrkahPmmC}(0.0, 1.0)*θ
y_ = ForwardDiff.gradient(x->-ℓ(x), x_)
y .= value.(y_)
y, dot(θ, y_).partials[]
end
end
∇neglogp! = make_gradient_and_dhessian_neglogp(model)
d = 1 + 24 # number of parameters
t0 = 0.0
x0 = zeros(d) # starting point sampler
θ0 = randn(d) # starting direction sampler
T = 200. # end time (similar to number of samples in MCMC)
c = 50.0 # initial guess for the bound
# define BouncyParticle sampler (has two relevant parameters)
Z = BouncyParticle(∅, ∅, # ignored
10.0, # momentum refreshment rate
0.95, # momentum correlation / only gradually change momentum in refreshment/momentum update
0.0 # ignored
)
trace, final, (acc, num), cs = @time pdmp(∇neglogp!, # problem
t0, x0, θ0, T, # initial state and duration
ZZB.LocalBound(c), # use Hessian information
Z; # sampler
adapt=true, # adapt bound c
progress=true, # show progress bar
subsample=true # keep only samples at refreshment times
)
# took 272 s
# obtain direction change times and points of piecewise linear trace
t, x = ZigZagBoomerang.sep(trace)
# plot bouncy particle sampler
fig3 = plot_chain(t, x, false)
save("lrbouncy.png", fig3)
# check visually
# lines(mean(trace))
# lines!(mean(nuts_chain).nt[:mean])
# show both in one plot
fig4 = plot_chain(1:n_samples, collect(eachrow(dropdims(nuts_chain[nuts_chain.name_map.parameters].value.data, dims=3)) ),
color=:red, title="Green: Bouncy Particle. Red: NUTS.")
fig4 = plot_chain!(fig4, t*n_samples/T, x, false, color=:green)
save("lrboth.png", fig4)
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
2,
8554,
262,
24992,
57,
363,
33,
4207,
263,
648,
351,
39141,
351,
262,
347,
977,
948,
7841,
1548,
6072,
20053,
198,
2,
357,
464,
3164,
2077,
994,
318,
50122,
262,
14955,
2163,
422,
3914... | 2.48294 | 1,524 |
######################################################################
# Additional errors used in the library.
# -----
# Licensed under MIT License
export CancellationError
struct CancellationError <: Exception
what
end
CancellationError() = CancellationError(nothing)
function Base.showerror(io::IO, err::CancellationError)
print(io, "Cancelled")
if err.what !== nothing
print(io, ": ")
print(io, err.what)
end
end
export TimeoutError
struct TimeoutError <: Exception
message::String
end
TimeoutError() = TimeoutError("")
function Base.showerror(io::IO, err::TimeoutError)
if isempty(err.message)
print(io, "Timeout")
else
print(io, "Timeout: $(err.message)")
end
end
| [
29113,
29113,
4242,
2235,
198,
2,
15891,
8563,
973,
287,
262,
5888,
13,
198,
2,
37404,
198,
2,
49962,
739,
17168,
13789,
198,
198,
39344,
43780,
297,
341,
12331,
198,
7249,
43780,
297,
341,
12331,
1279,
25,
35528,
198,
220,
220,
220,
... | 2.882813 | 256 |
@inline function initialize!(integrator,cache::ExplicitRKConstantCache,f=integrator.f)
integrator.kshortsize = 2
integrator.k = eltype(integrator.sol.k)(integrator.kshortsize)
integrator.fsalfirst = f(integrator.t,integrator.uprev)
end
@inline function perform_step!(integrator,cache::ExplicitRKConstantCache,f=integrator.f)
@unpack t,dt,uprev,u = integrator
@unpack A,c,α,αEEst,stages = cache
@unpack kk = cache
# Calc First
kk[1] = integrator.fsalfirst
# Calc Middle
for i = 2:stages-1
utilde = zero(kk[1])
for j = 1:i-1
utilde = @muladd utilde + A[j,i]*kk[j]
end
kk[i] = f(@muladd(t+c[i]*dt),@muladd(uprev+dt*utilde));
end
#Calc Last
utilde = zero(kk[1])
for j = 1:stages-1
utilde = @muladd utilde + A[j,end]*kk[j]
end
kk[end] = f(@muladd(t+c[end]*dt),@muladd(uprev+dt*utilde)); integrator.fsallast = kk[end] # Uses fsallast as temp even if not fsal
# Accumulate Result
utilde = α[1]*kk[1]
for i = 2:stages
utilde = @muladd utilde + α[i]*kk[i]
end
u = @muladd uprev + dt*utilde
if integrator.opts.adaptive
uEEst = αEEst[1]*kk[1]
for i = 2:stages
uEEst = @muladd uEEst + αEEst[i]*kk[i]
end
integrator.EEst = integrator.opts.internalnorm( dt*(utilde-uEEst)./@muladd(integrator.opts.abstol+max(abs.(uprev),abs.(u))*integrator.opts.reltol))
end
if isfsal(integrator.alg.tableau)
integrator.fsallast = kk[end]
else
integrator.fsallast = f(t+dt,u)
end
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
@pack integrator = t,dt,u
end
@inline function initialize!(integrator,cache::ExplicitRKCache,f=integrator.f)
integrator.kshortsize = 2
integrator.fsallast = cache.fsallast
integrator.fsalfirst = cache.kk[1]
integrator.k = eltype(integrator.sol.k)(integrator.kshortsize)
integrator.k[1] = integrator.fsalfirst
integrator.k[2] = integrator.fsallast
f(integrator.t,integrator.uprev,integrator.fsalfirst) # Pre-start fsal
end
@inline function perform_step!(integrator,cache::ExplicitRKCache,f=integrator.f)
@unpack t,dt,uprev,u,k = integrator
uidx = eachindex(integrator.uprev)
@unpack A,c,α,αEEst,stages = cache.tab
@unpack kk,utilde,tmp,atmp,uEEst = cache
# Middle
for i = 2:stages-1
for l in uidx
utilde[l] = zero(kk[1][1])
end
for j = 1:i-1
for l in uidx
utilde[l] = @muladd utilde[l] + A[j,i]*kk[j][l]
end
end
for l in uidx
tmp[l] = @muladd uprev[l]+dt*utilde[l]
end
f(@muladd(t+c[i]*dt),tmp,kk[i])
end
#Last
for l in uidx
utilde[l] = zero(kk[1][1])
end
for j = 1:stages-1
for l in uidx
utilde[l] = @muladd utilde[l] + A[j,end]*kk[j][l]
end
end
for l in uidx
u[l] = @muladd uprev[l]+dt*utilde[l]
end
f(@muladd(t+c[end]*dt),u,kk[end]) #fsallast is tmp even if not fsal
#Accumulate
if !isfsal(integrator.alg.tableau)
for i in uidx
utilde[i] = α[1]*kk[1][i]
end
for i = 2:stages
for l in uidx
utilde[l] = @muladd utilde[l] + α[i]*kk[i][l]
end
end
for i in uidx
u[i] = @muladd uprev[i] + dt*utilde[i]
end
end
if integrator.opts.adaptive
for i in uidx
uEEst[i] = αEEst[1]*kk[1][i]
end
for i = 2:stages
for j in uidx
uEEst[j] = @muladd uEEst[j] + αEEst[i]*kk[i][j]
end
end
for i in uidx
atmp[i] = (dt*(utilde[i]-uEEst[i])./@muladd(integrator.opts.abstol+max(abs(uprev[i]),abs(u[i]))*integrator.opts.reltol))
end
integrator.EEst = integrator.opts.internalnorm(atmp)
end
if !isfsal(integrator.alg.tableau)
f(t+dt,u,integrator.fsallast)
end
@pack integrator = t,dt,u
end
| [
31,
45145,
2163,
41216,
0,
7,
18908,
12392,
11,
23870,
3712,
18438,
3628,
49,
42,
3103,
18797,
30562,
11,
69,
28,
18908,
12392,
13,
69,
8,
198,
220,
4132,
12392,
13,
50133,
2096,
1096,
796,
362,
198,
220,
4132,
12392,
13,
74,
796,
... | 2.027609 | 1,811 |
module FluspectMod
#using GSL
using Polynomials
using Statistics
# Matlab reading
using MAT
# Numerical integration package (Simson rule)
using QuadGK
# Is this OK?
file_Opti = joinpath(dirname(pathof(FluspectMod)), "Optipar2017_ProspectD.mat")
const minwle = 400.; # PAR range
const maxwle = 700.;
const minwlf = 650.; # SIF range
const maxwlf = 850.;
# Doubling Adding layers
const ndub = 10
# Canopy Layers:
const nl = 60
const xl = collect(0:-1/nl:-1)
const lazitab = collect(5:10:355)
const dx = 1.0/nl
# Read in all optical data:
opti = matread(file_Opti)["optipar"]
nr_ = convert(Array{Float32}, opti["nr"]);nr = nr_
Km_ = convert(Array{Float32}, opti["Kdm"]);Km = Km_
Kab_ = convert(Array{Float32}, opti["Kab"]);Kab = Kab_
Kant_ = convert(Array{Float32}, opti["Kant"]);Kant =Kant_
Kcar_ = convert(Array{Float32}, opti["Kca"]);Kcar= Kcar_
Kw_ = convert(Array{Float32}, opti["Kw"]);Kw=Kw_
KBrown_ = convert(Array{Float32}, opti["Ks"]);KBrown=KBrown_
phi_ = convert(Array{Float32}, opti["phi"]);phi=phi_
KcaV_ = convert(Array{Float32}, opti["KcaV"]);KcaV=KcaV_
KcaZ_ = convert(Array{Float32}, opti["KcaZ"]);KcaZ =KcaZ_
lambda_ = convert(Array{Float32}, opti["wl"]);lambda = lambda_
# Enable downsampling spectral resolution to arbitrary grid (specify array with boundaries here)
# Don't like global arrays yet, need to make sure this is not creating performance issues.
function init(swl)
global WL = swl
# Stupid...
global nr = zeros(length(swl)-1)
global Km = zeros(length(swl)-1)
global Kab = zeros(length(swl)-1)
global Kant = zeros(length(swl)-1)
global Kcar = zeros(length(swl)-1)
global Kw = zeros(length(swl)-1)
global KBrown = zeros(length(swl)-1)
global phi = zeros(length(swl)-1)
global KcaV = zeros(length(swl)-1)
global KcaZ = zeros(length(swl)-1)
global lambda = zeros(length(swl)-1)
for i in 1:length(swl)-1
wo = findall((lambda_.>=swl[i]).&(lambda_.<swl[i+1]) )
#println(mean(nr_[wo]))
nr[i] = mean(nr_[wo])
Km[i] = mean(Km_[wo])
Kab[i] = mean(Kab_[wo])
Kant[i] = mean(Kant_[wo])
Kcar[i] = mean(Kcar_[wo])
Kw[i] = mean(Kw_[wo])
KBrown[i] = mean(KBrown_[wo])
phi[i] = mean(phi_[wo])
KcaV[i] = mean(KcaV_[wo])
KcaZ[i] = mean(KcaZ_[wo])
lambda[i] = mean(lambda_[wo])
end
end
function fluspect(x::Vector; fqe::Number=0.0, Kab__=Kab, Kant__=Kant, KBrown__=KBrown, Kw__=Kw, Km__=Km, KcaV__=KcaV, KcaZ__=KcaZ)
#println(fqe)
# , Kab__=Kab, Kant__=Kant, KBrown__=KBrown, Kw__=Kw, Km__=Km, KcaV__=KcaV, KcaZ__=KcaZ)
# ***********************************************************************
# <NAME>., <NAME>. (1990), PROSPECT: a model of leaf optical
# properties spectra; Remote Sens. Environ.; 34:75-91.
# Reference:
# Féret, <NAME> & Jacquemoud [2017]. PROSPECT-D: Towards modeling
# leaf optical properties through a complete lifecycle
# Remote Sensing of Environment; 193:204215
# DOI: http://doi.org/10.1016/j.rse.2017.03.004
# The specific absorption coefficient corresponding to brown pigment is()
# provided by <NAME> [EMMAH, INRA Avignon, <EMAIL>]
# & used with his autorization.
# ***********************************************************************
N = x[1]
Cab = x[2]
Car = x[3]
Ant = x[4]
Brown = x[5]
Cw = x[6]
Cm = x[7]
Cx = x[8]
Kcaro = (1.0-Cx).* KcaV__ + Cx .* KcaZ__;
Kall = (Cab*Kab__.+Car*Kcaro.+Ant*Kant__.+Brown*KBrown__.+Cw*Kw__.+Cm*Km__)./N
# Relative absorption by Chlorophyll only (drives SIF and GPP eventually)
kChlrel = Cab*Kab./(Kall.*N.+eps());
# Adding eps() here to keep it stable and NOT set to 1 manually when Kall=0 (ForwardDiff won't work otherwise)
tau = real.((1.0.-Kall).*exp.(-Kall) .+ Kall.^2.0.*expint.(Kall.+eps()))
# ***********************************************************************
# reflectance & transmittance of one layer
# ***********************************************************************
# <NAME>., <NAME>., <NAME>., <NAME>. (1969)
# Interaction of isotropic ligth with a compact plant leaf; J. Opt.
# Soc. Am., 59[10]:1376-1379.
# ***********************************************************************
# reflectivity & transmissivity at the interface
#-------------------------------------------------
talf = calctav.(40.,nr)
ralf = 1.0.-talf
t12 = calctav.(90.,nr)
r12 = 1.0.-t12
t21 = t12./(nr.^2)
r21 = 1.0.-t21
# top surface side
denom = 1.0.-r21.*r21.*tau.^2
Ta = talf.*tau.*t21./denom
Ra = ralf.+r21.*tau.*Ta
# bottom surface side
t = t12.*tau.*t21./denom
r = r12.+r21.*tau.*t
# ***********************************************************************
# reflectance & transmittance of N layers
# Stokes equations to compute properties of next N-1 layers [N real]
# Normal case()
# ***********************************************************************
# Stokes G.G. (1862), On the intensity of the light reflected from
# | transmitted through a pile of plates; Proc. Roy. Soc. Lond.
# 11:545-556.
# ***********************************************************************
D = sqrt.((1.0.+r.+t).*(1.0.+r.-t).*(1.0.-r.+t).*(1.0.-r.-t))
rq = r.^2
tq = t.^2
a = (1.0.+rq.-tq.+D)./(2r)
b = (1.0.-rq.+tq.+D)./(2t)
bNm1 = b.^(N.-1); #
bN2 = bNm1.^2
a2 = a.^2
denom = a2.*bN2.-1
Rsub = a.*(bN2.-1)./denom
Tsub = bNm1.*(a2.-1)./denom
# Case of zero absorption
j = findall(r.+t .>= 1)
Tsub[j] = t[j]./(t[j]+(1.0.-t[j])*(N-1))
Rsub[j] = 1.0.-Tsub[j]
# Reflectance & transmittance of the leaf: combine top layer with next N-1 layers
denom = 1.0.-Rsub.*r
tran = Ta.*Tsub./denom
refl = Ra.+Ta.*Rsub.*t./denom
RT = [refl tran]
if fqe ==0.0
return RT
end
# FROM SCOPE notes:
# From here a new path is taken: The doubling method used to calculate
# fluoresence is now only applied to the part of the leaf where absorption
# takes place, that is, the part exclusive of the leaf-air interfaces. The
# reflectance (rho) and transmittance (tau) of this part of the leaf are
# now determined by "subtracting" the interfaces
# CF Note: All of the below takes about 10 times more time than the RT above. Need to rething speed and accuracy. (10nm is bringing it down a lot!)
Rb = (refl.-ralf)./(talf.*t21+(refl.-ralf).*r21); # Remove the top interface
Z = tran.*(1.0.-Rb.*r21)./(talf.*t21); # Derive Z from the transmittance
rho = (Rb.-r21.*Z.^2)./(1.0.-(r21.*Z).^2); # Reflectance and transmittance
tau = (1.0.-Rb.*r21)./(1.0.-(r21.*Z).^2).*Z; # of the leaf mesophyll layer
t = tau;
r = max.(rho,0.0); # Avoid negative r
# Derive Kubelka-Munk s and k
I_rt = findall((r.+t).<1);
D[I_rt] = sqrt.((1 .+ r[I_rt] .+ t[I_rt]) .* (1 .+ r[I_rt] .- t[I_rt]) .* (1 .- r[I_rt] .+ t[I_rt]) .* (1 .- r[I_rt] .- t[I_rt]));
a[I_rt] = (1 .+ r[I_rt].^2 .- t[I_rt].^2 .+ D[I_rt]) ./ (2r[I_rt]);
b[I_rt] = (1 .- r[I_rt].^2 + t[I_rt].^2 .+ D[I_rt]) ./ (2t[I_rt]);
a[(r.+t).>=1] .= 1.0;
b[(r.+t).>=1] .= 1.0;
s = r./t;
I_a = findall((a.>1).&(a.!=Inf));
s[I_a] = 2 .*a[I_a] ./ (a[I_a].^2 .- 1) .* log.(b[I_a]);
k = log.(b);
k[I_a] = (a[I_a].-1) ./ (a[I_a].+1) .* log.(b[I_a]);
kChl = kChlrel .* k;
# indices of wle and wlf within wlp
Iwle = findall((lambda.>=minwle) .& (lambda.<=maxwle));
Iwlf = findall((lambda.>=minwlf) .& (lambda.<=maxwlf));
wle = lambda[Iwle]; # excitation wavelengths, column
wlf = lambda[Iwlf]; # fluorescence wavelengths, column
epsi = 2.0^(-ndub);
# initialisations
te = 1 .-(k[Iwle].+s[Iwle]) .* epsi;
tf = 1 .-(k[Iwlf].+s[Iwlf]) .* epsi;
re = s[Iwle] .* epsi;
rf = s[Iwlf] .* epsi;
sigmoid = 1 ./(1 .+exp.(-wlf./10).*exp.(wle'./10)); # matrix computed as an outproduct
#println(size(sigmoid)," ", size(phi), " ", size(kChl)," ", size(Iwle), " ", size(Iwlf), " ", size(kChl[Iwle]))
Mf = Mb = fqe .* ((0.5*phi[Iwlf]).*epsi) .* kChl[Iwle]'.*sigmoid
Ih = ones(1,length(te)); # row of ones
Iv = ones(length(tf),1); # column of ones
# Doubling routine
for i = 1:ndub
xe = te./(1 .-re.*re); ten = te.*xe; ren = re.*(1 .+ten);
xf = tf./(1 .-rf.*rf); tfn = tf.*xf; rfn = rf.*(1 .+tfn);
A11 = xf*Ih + Iv*xe'; A12 = (xf*xe').*(rf*Ih .+ Iv*re');
A21 = 1 .+(xf*xe').*(1 .+rf*re'); A22 = (xf.*rf)*Ih+Iv*(xe.*re)';
#println(size(A11)," ", size(A12), " ", size(Mf)," ", size(Mb), " ")
Mfn = Mf .* A11 .+ Mb .* A12;
Mbn = Mb .* A21 .+ Mf .* A22;
te = ten; re = ren; tf = tfn; rf = rfn;
Mf = Mfn; Mb = Mbn;
end
# Here we add the leaf-air interfaces again for obtaining the final
# leaf level fluorescences.
g = Mb; f = Mf;
Rb = rho .+ tau.^2 .*r21./(1 .-rho.*r21);
Xe = Iv * (talf[Iwle]./(1 .-r21[Iwle].*Rb[Iwle]))';
Xf = t21[Iwlf]./(1 .-r21[Iwlf].*Rb[Iwlf]) * Ih;
Ye = Iv * (tau[Iwle].*r21[Iwle]./(1 .-rho[Iwle].*r21[Iwle]))';
Yf = tau[Iwlf].*r21[Iwlf]./(1 .-rho[Iwlf].*r21[Iwlf]) * Ih;
A = Xe .* (1 .+ Ye.*Yf) .* Xf;
B = Xe .* (Ye .+ Yf) .* Xf;
gn = A .* g + B .* f;
fn = A .* f + B .* g;
Mb = gn;
Mf = fn;
return RT,Mf,Mb
end
function RTM_sail(x::Vector; LIDFa=-0.35, LIDFb=-0.15,q=0.05, tts=30, tto=0, psi=90, wl=lambda, TypeLidf=1)
# State Vector X includes (in that order):
#N,Cab,Car,Ant,Cbrown,Cw,Cm,Cx,lai,rsoil
LAI = x[9]
# Define soil as polynomial (depends on state vector size):
pSoil = Polynomials.Poly(x[10:end])
rsoil = Polynomials.polyval(pSoil,wl.-mean(wl));
iLAI = LAI/nl; # [1] LAI of elementary layer (guess we can change that)
# Size of wavelength array:
nwl = length(wl)
# Call Fluspect for Leaf optical properties (can be done outside later if needed)
LRT = fluspect(x[1:8], fqe=0.0)
ρ=LRT[:,1]
τ=LRT[:,2]
# Geometric quantities (need to check allocation cost!)
cts = cos(deg2rad(tts))
cto = cos(deg2rad(tto))
sin_tts = sin(deg2rad(tts)); # sin solar angle
ctscto = cts*cto;
tants = tan(deg2rad(tts));
tanto = tan(deg2rad(tto));
cospsi = cos(deg2rad(psi));
dso = sqrt(tants*tants+tanto*tanto-2.0*tants*tanto*cospsi);
# Generate leaf angle distribution:
if TypeLidf==1
lidf,litab = dladgen(LIDFa,LIDFb);
elseif TypeLidf==2
lidf,litab = campbell(LIDFa);
end
#println(lidf)
cos_ttlo = cos.(deg2rad.(lazitab)); # cos leaf azimuth angles
cos_ttli = cos.(deg2rad.(litab)); # cosine of normal of upperside of leaf
sin_ttli = sin.(deg2rad.(litab)); # sine of normal of upperside of leaf
# angular distance, compensation of shadow length
# Calculate geometric factors associated with extinction and scattering
#Initialise sums
ks = 0.0;
ko = 0.0;
bf = 0.0;
sob = 0.0;
sof = 0.0;
# Weighted sums over LIDF
@simd for i=1:length(litab)
# ttl = litab[i]; % leaf inclination discrete values
ctl = cos(deg2rad(litab[i]));
# SAIL volume scattering phase function gives interception and portions to be
# multiplied by rho and tau
chi_s,chi_o,frho,ftau=volscatt(tts,tto,psi,litab[i]);
#********************************************************************************
#* SUITS SYSTEM COEFFICIENTS
#*
#* ks : Extinction coefficient for direct solar flux
#* ko : Extinction coefficient for direct observed flux
#* att : Attenuation coefficient for diffuse flux
#* sigb : Backscattering coefficient of the diffuse downward flux
#* sigf : Forwardscattering coefficient of the diffuse upward flux
#* sf : Scattering coefficient of the direct solar flux for downward diffuse flux
#* sb : Scattering coefficient of the direct solar flux for upward diffuse flux
#* vf : Scattering coefficient of upward diffuse flux in the observed direction
#* vb : Scattering coefficient of downward diffuse flux in the observed direction
#* w : Bidirectional scattering coefficient
#********************************************************************************
# Extinction coefficients
ksli = chi_s./cts;
koli = chi_o./cto;
# Area scattering coefficient fractions
sobli = frho*pi/ctscto;
sofli = ftau*pi/ctscto;
bfli = ctl*ctl;
ks = ks+ksli*lidf[i];
ko = ko+koli*lidf[i];
bf = bf+bfli*lidf[i];
sob = sob+sobli*lidf[i];
sof = sof+sofli*lidf[i];
end
#println(sob, " ", sof)
# Geometric factors to be used later with rho and tau
sdb = 0.5*(ks+bf);
sdf = 0.5*(ks-bf);
dob = 0.5*(ko+bf);
dof = 0.5*(ko-bf);
ddb = 0.5*(1 .+bf);
ddf = 0.5*(1 .-bf);
# Skipped SCOPE lines 186-213 here (catch up later)
# 1.4 solar irradiance factor for all leaf orientations
# See eq 19 in vdT 2009
Cs = cos_ttli.*cts; # [nli] pag 305 modified by Joris
Ss = sin_ttli.*sin_tts; # [nli] pag 305 modified by Joris
cos_deltas = Cs*ones(1,length(lazitab)) .+ Ss*cos_ttlo'; # [nli,nlazi]
fs = abs.(cos_deltas./cts); # [nli,nlazi] pag 305
# 1.5 probabilities Ps, Po, Pso
Ps = exp.(ks*xl*LAI); # [nl+1] p154{1} probability of viewing a leaf in solar dir
Po = exp.(ko*xl*LAI); # [nl+1] p154{1} probability of viewing a leaf in observation dir
Ps[1:nl] = Ps[1:nl] *(1 .-exp.(-ks*LAI*dx))/(ks*LAI*dx); # Correct Ps/Po for finite dx
Po[1:nl] = Po[1:nl] *(1 .-exp.(-ko*LAI*dx))/(ko*LAI*dx); # Correct Ps/Po for finite dx
#Pso: Probability of observing a sunlit leaf at depth x, see eq 31 in vdT 2009
Pso = similar(Po);
for j=1:length(xl)
#println(size(a), " ", size(Pso), " ", size(Po))
Pso[j] = quadgk(x -> Psofunction(ko,ks,LAI,q,dso,x), xl[j]-dx,xl[j], rtol=1e-2)[1]/dx
#Pso[j,:]= quad(@(y)Psofunction(K,k,LAI,q,dso,y),xl(j)-dx,xl(j))/dx; %#ok<FREMO>
end
Pso[Pso.>Po]= minimum([Po[Pso.>Po] Ps[Pso.>Po]],dims=2); #takes care of rounding error
Pso[Pso.>Ps]= minimum([Po[Pso.>Ps] Ps[Pso.>Ps]],dims=2); #takes care of rounding error
# All with length of wavelengths:
sigb = ddb.*ρ.+ddf.*τ;
sigf = ddf.*ρ.+ddb.*τ;
sb = sdb*ρ .+ sdf*τ; # [nwl] sb, p305{1} diffuse backscatter scattering coefficient for specular incidence
sf = sdf*ρ .+ sdb*τ; # [nwl] sf, p305{1} diffuse forward scattering coefficient for specular incidence
vb = dob*ρ .+ dof*τ; # [nwl] vb, p305{1} directional backscatter scattering coefficient for diffuse incidence
vf = dof*ρ .+ dob*τ; # [nwl] vf, p305{1} directional forward scattering coefficient for diffuse incidence
w = sob*ρ .+ sof*τ; # [nwl] w, p309{1} bidirectional scattering coefficent (directional-directional)
a = 1 .-sigf; # [nwl] attenuation
m = sqrt.(a.^2 .-sigb.^2); # [nwl]
rinf = (a.-m)./sigb; # [nwl]
rinf2= rinf.*rinf; # [nwl]
#println(minimum(m), " ", min(ks))
# direct solar radiation
J1k = calcJ1.(-1, m,ks,LAI); # [nwl]
J2k = calcJ2.( 0, m,ks,LAI); # [nwl]
J1K = calcJ1.(-1, m,ko,LAI); # [nwl] % added for calculation of rdo
J2K = calcJ2.( 0, m,ko,LAI); # [nwl] % added for calculation of rdo
e1 = exp.(-m.*LAI); # [nwl]
e2 = e1.^2; # [nwl]
re = rinf.*e1; # [nwl]
denom = 1 .-rinf2.*e2;
s1 = sf .+rinf.*sb;
s2 = sf.*rinf+sb;
v1 = vf.+rinf.*vb;
v2 = vf.*rinf.+vb;
Pss = s1.*J1k; # [nwl]
Qss = s2.*J2k; # [nwl]
Poo = v1.*J1K; # (nwl) % added for calculation of rdo
Qoo = v2.*J2K; # [nwl] % added for calculation of rdo
tau_ss = exp(-ks*LAI); # [1]
tau_oo = exp(-ko*LAI);
Z = (1 - tau_ss * tau_oo)/(ks + ko); # needed for analytic rso
tau_dd = (1 .-rinf2).*e1 ./denom; # [nwl]
rho_dd = rinf.*(1 .-e2) ./denom; # [nwl]
tau_sd = (Pss.-re.*Qss) ./denom; # [nwl]
tau_do = (Poo.-re.*Qoo) ./denom; # [nwl]
rho_sd = (Qss.-re.*Pss) ./denom; # [nwl]
rho_do = (Qoo.-re.*Poo) ./denom; # (nwl)
T1 = v2.*s1.*(Z.-J1k*tau_oo)./(ko.+m).+v1.*s2.*(Z.-J1K*tau_ss)./(ks.+m);
T2 = -(Qoo.*rho_sd+Poo.*tau_sd).*rinf;
rho_sod = (T1+T2)./(1 .-rinf2);
# Bidirectional reflectance
# Single scattering contribution
rho_sos = w.*iLAI.*sum(Pso[1:nl]);
# Total canopy contribution
rho_so=rho_sos.+rho_sod;
#println(rho_so[100:120])
dn=1 .-rsoil.*rho_dd;
# Total canopy contribution
rso = rho_so .+ rsoil .* Pso[nl+1] .+ ((tau_sd.+tau_ss*rsoil.*rho_dd).*tau_oo.+(tau_sd.+tau_ss).*tau_do).*rsoil./denom;
# SAIL analytical reflectances
# rsd: directional-hemispherical reflectance factor for solar incident flux
rsd = rho_sd .+ (tau_ss .+ tau_sd).*rsoil.*tau_dd./denom;
# rdd: bi-hemispherical reflectance factor
rdd = rho_dd .+ tau_dd.*rsoil.*tau_dd./denom;
# rdo: bi-directional reflectance factor
rdo = rho_do .+ (tau_oo .+ tau_do).*rsoil.*tau_dd./denom;
#return [rso rsd rdd rdo
# Dummy code here to track direct and diffuse light first, need to separate this into another function later
Esun_ = zeros(nwl).+100
Esky_ = zeros(nwl).+100
Emin_ = zeros(nl+1,nwl)
Eplu_ = zeros(nl+1,nwl)
Eplu_1 = rsoil.*((tau_ss.+tau_sd).*Esun_.+tau_dd.*Esky_)./denom;
Eplu0 = rho_sd.*Esun_ .+ rho_dd.*Esky_ .+ tau_dd.*Eplu_1;
Emin_1 = tau_sd.*Esun_ .+ tau_dd.*Esky_ .+ rho_dd.*Eplu_1;
delta1 = Esky_ .- rinf.*Eplu0;
delta2 = Eplu_1 .- rinf.*Emin_1;
# calculation of the fluxes in the canopy (this seems slow!)
t1 = sf.+rinf.*sb
t2 = sb+rinf.*sf
# The order here mattered, now doing the loop over nl, not nwl! (faster)
# This loop is time consuming, probably don't need high spectral resolution for the NIR part here, so it can be shortened a lot (say 100nm steps from 700-2500nm?)
# We just need it for net energy balance and PAR here.
return [rso rsd rdd rdo]
for i = 1:nl+1
J1kx = calcJ1.(xl[i],m,ks,LAI); # [nl]
J2kx = calcJ2.(xl[i],m,ks,LAI); # [nl]
F1 = Esun_.*J1kx.*t1 .+ delta1.*exp.( m.*LAI.*xl[i]); #[nl]
F2 = Esun_.*J2kx.*t2 .+ delta2.*exp.(-m.*LAI.*(xl[i].+1)); #[nl]
Emin_[i,:] = (F1.+rinf.*F2)./(1 .-rinf2);# [nl,nwl]
Eplu_[i,:] = (F2.+rinf.*F1)./(1 .-rinf2);# [nl,nwl]
end
end
# Had to make this like the Prosail F90 function, forwardDiff didn't work otherwise.
function calcJ1(x,m,k,LAI)
del=(k-m)*LAI
if abs(del)>1E-3;
J1 = (exp(m*LAI*x)-exp(k*LAI*x))/(k-m);
else
J1 = -0.5*LAI*x*(exp(m*LAI*x)+exp(k*LAI*x))*(1.0-del^2/12.0);
end
return J1
end
function calcJ2(x,m,k,LAI)
return (exp(k*LAI*x)-exp(-k*LAI)*exp(-m*LAI*(1+x)))/(k+m);
end
# APPENDIX IV function Pso from SCOPE v1.73
function Psofunction(K,k,LAI,q,dso,xl)
if dso!=0.0
alf = (dso/q) *2/(k+K);
pso = exp((K+k)*LAI*xl + sqrt(K*k)*LAI/(alf )*(1-exp(xl*(alf ))));# [nl+1] factor for correlation of Ps and Po
else
pso = exp((K+k)*LAI*xl - sqrt(K*k)*LAI*xl);# [nl+1] factor for correlation of Ps and Po
end
return pso
end
# FROM SCOPE v1.73 APPENDIX II function volscat
"""********************************************************************************
!* tts = solar zenith
!* tto = viewing zenith
!* psi = azimuth
!* ttl = leaf inclination angle
!* chi_s = interception functions
!* chi_o = interception functions
!* frho = function to be multiplied by leaf reflectance rho
!* ftau = functions to be multiplied by leaf transmittance tau
!********************************************************************************
! Compute volume scattering functions and interception coefficients
! for given solar zenith, viewing zenith, azimuth and leaf inclination angle.
! chi_s and chi_o are the interception functions.
! frho and ftau are the functions to be multiplied by leaf reflectance rho and
! leaf transmittance tau, respectively, in order to obtain the volume scattering
! function.
"""
function volscatt(tts,tto,psi,ttli)
#Volscatt version 2.
#created by <NAME>
#edited by <NAME> to matlab nomenclature.
# date: 11 February 2008
#tts [1] Sun zenith angle in degrees
#tto [1] Observation zenith angle in degrees
#psi [1] Difference of azimuth angle between solar and viewing position
#ttli [ttli] leaf inclination array
nli = length(ttli);
psi_rad = deg2rad(psi);
cos_psi = cos(deg2rad(psi)); # cosine of relative azimuth angle
cos_ttli = cos(deg2rad(ttli)); # cosine of normal of upperside of leaf
sin_ttli = sin(deg2rad(ttli)); # sine of normal of upperside of leaf
cos_tts = cos(deg2rad(tts)); # cosine of sun zenith angle
sin_tts = sin(deg2rad(tts)); # sine of sun zenith angle
cos_tto = cos(deg2rad(tto)); # cosine of observer zenith angle
sin_tto = sin(deg2rad(tto)); # sine of observer zenith angle
Cs = cos_ttli*cos_tts; # p305{1}
Ss = sin_ttli*sin_tts; # p305{1}
Co = cos_ttli*cos_tto; # p305{1}
So = sin_ttli*sin_tto; # p305{1}
As = maximum([Ss,Cs]);
Ao = maximum([So,Co]);
#println(-Cs./As, " ", Ss, " ", Cs)
bts = acos.(-Cs./As); # p305{1}
bto = acos.(-Co./Ao); # p305{2}
chi_o = 2/pi*((bto-pi/2).*Co + sin(bto).*So);
chi_s = 2/pi*((bts-pi/2).*Cs + sin(bts).*Ss);
delta1 = abs(bts-bto); # p308{1}
delta2 = pi-abs(bts + bto - pi); # p308{1}
Tot = psi_rad + delta1 + delta2; # pag 130{1}
bt1 = minimum([psi_rad,delta1]);
bt3 = maximum([psi_rad,delta2]);
bt2 = Tot - bt1 - bt3;
T1 = 2Cs.*Co + Ss.*So.*cos_psi;
T2 = sin(bt2).*(2As.*Ao + Ss.*So.*cos(bt1).*cos(bt3));
Jmin = ( bt2).*T1 - T2;
Jplus = (pi-bt2).*T1 + T2;
frho = Jplus/(2pi^2);
ftau = -Jmin /(2pi^2);
# pag.309 wl-> pag 135{1}
frho = maximum([0.0,frho]);
ftau = maximum([0.0,ftau]);
#println(tts, " ",tto, " ",psi, " ",ttli)
#println(chi_s, " ", chi_o, " ",frho, " ",ftau)
return chi_s,chi_o,frho,ftau
end
function dladgen(a::Number,b::Number)
litab=[5.,15.,25.,35.,45.,55.,65.,75.,81.,83.,85.,87.,89.];
freq = similar(litab)
for i1=1:8
t = i1*10;
freq[i1]=dcum(a,b,t);
end
for i2=9:12
t = 80.0+(i2-8)*2.;
freq[i2]=dcum(a,b,t);
end
freq[13]=1;
for i = 13:-1:2
freq[i]=freq[i]-freq[i-1];
end
return freq,litab
end
function dcum(a::Number,b::Number,t::Number)
y = 0.0
if a>=1
f = 1-cos(deg2rad(t));
else
epsi=1e-8;
delx=1;
x=2*deg2rad(t);
p=x;
while (delx >= epsi)
#println(delx)
y = a*sin(x)+0.5*b*sin(2.0*x);
dx=0.5*(y-x+p);
x=x+dx;
delx=abs(dx);
end
f = (2.0*y+p)/pi;
end
return f
end
"""
From SCOPE v1.73:
********************************************************************************
* Campbell.f
*
* Computation of the leaf angle distribution function value (freq)
* Ellipsoidal distribution function caracterised by the average leaf
* inclination angle in degree (ala)
* Campbell 1986
*
********************************************************************************
edit 2017 12 28: change sampling of angles to match with dladgen.m
"""
function campbell(ala)
tx1=[10.,20.,30.,40.,50.,60.,70.,80.,82.,84.,86.,88.,90.];
tx2=[0.,10.,20.,30.,40.,50.,60.,70.,80.,82.,84.,86.,88.];
litab = (tx2.+tx1)./2.0;
n=length(litab);
tl1 = deg2rad(tx1)
tl2 = deg2rad(tx2)
excent = exp(-1.6184e-5*ala^3+2.1145e-3*ala^2-1.2390e-1*ala+3.2491);
sum0 = 0;
freq=zeros(n);
for i=1:n
x1 = excent./(sqrt(1 .+excent^2 .*tan(tl1(i)).^2));
x2 = excent./(sqrt(1 .+excent^2 .*tan(tl2(i)).^2));
if (excent==1)
freq[i] = abs(cos(tl1(i))-cos(tl2(i)));
else
alpha = excent./sqrt(abs(1-excent.^2));
alpha2 = alpha.^2;
x12 = x1.^2;
x22 = x2.^2;
if (excent>1)
alpx1 = sqrt(alpha2(excent>1)+x12(excent>1));
alpx2[excent>1] = sqrt(alpha2(excent>1)+x22(excent>1));
dum = x1*alpx1+alpha2*log(x1+alpx1);
freq[i] = abs(dum-(x2.*alpx2+alpha2.*log(x2+alpx2)));
else
almx1 = sqrt(alpha2-x12);
almx2 = sqrt(alpha2-x22);
dum = x1.*almx1+alpha2.*asin(x1./alpha);
freq[i] = abs(dum-(x2.*almx2+alpha2.*asin(x2./alpha)));
end
end
end
sum0 = sum(freq,dims=2);
freq0=freq./sum0;
return freq0,litab
end
"""
calctav(alfa, nr)
***********************************************************************
From calctav.m in PROSPECT-D
***********************************************************************
<NAME>. (1964), Transmission of isotropic radiation across an
interface between two dielectrics, Appl. Opt., 3(1):111-113.
<NAME>. (1973), Transmission of isotropic light across a
dielectric surface in two and three dimensions, J. Opt. Soc. Am.,
63(6):664-666.
***********************************************************************
"""
function calctav(α,nr)
rd = pi/180
n2 = nr^2
np = n2+1
nm = n2-1
a = (nr+1)*(nr+1)/2
k = -(n2-1)*(n2-1)/4
sa = sin(α*rd)
if α!=90.0
b1 = sqrt((sa^2-np/2).*(sa^2-np/2)+k)
else
b1 = 0
end
b2 = sa^2-np/2
b = b1-b2
b3 = b^3
a3 = a^3
ts = (k^2.0/(6*b3)+k/b-b/2)-(k^2.0/(6*a3)+k/a-a/2)
tp1 = -2*n2*(b-a)/(np^2)
tp2 = -2*n2*np*log(b/a)/(nm^2)
tp3 = n2*(1.0/b-1.0/a)/2
tp4 = 16*n2^2.0*(n2^2+1)*log((2*np*b-nm^2)/(2*np*a-nm^2))/(np^3.0*nm^2)
tp5 = 16*n2^3.0*(1.0/(2*np*b-nm^2)-1.0/(2*np*a-nm^2))/(np^3)
tp = tp1+tp2+tp3+tp4+tp5
tav = (ts+tp)/(2*sa^2)
return tav
end
#function expint(x)
# p = Poly([8.267661952366478e+00, -7.773807325735529e-01, -3.012432892762715e-01, -7.811863559248197e-02, -1.019573529845792e-02,-6.973790859534190e-04,-2.569498322115933e-05, -4.819538452140960e-07, -3.602693626336023e-09])
# polyv = polyval(p,real(x));
# k = findall( abs(imag(x)) <= polyv );
# -GSL.sf_expint_Ei(-x)
#end
# From Matlab!
function expint(x)
p = Polynomials.Poly([8.267661952366478e+00, -7.773807325735529e-01, -3.012432892762715e-01, -7.811863559248197e-02, -1.019573529845792e-02,-6.973790859534190e-04,-2.569498322115933e-05, -4.819538452140960e-07, -3.602693626336023e-09])
polyv = Polynomials.polyval(p,real(x));
if abs(imag(x)) <= polyv
#initialization
egamma=0.57721566490153286061;
xk = x;
yk = -egamma - log(xk);
j = 1;
pterm = xk;
term = xk;
while abs(term) > (eps(yk))
yk = yk + term;
j = j + 1;
pterm = -xk.*pterm/j;
term = pterm/j;
end # end of the while loop
y = yk;
else
n = 1.0;
xk = x;
am2 = 0.0
bm2 = 1.0
am1 = 1.0
bm1 = xk
f = am1 / bm1;
oldf = Inf;
j = 2;
while abs(f-oldf) > (100*eps()*abs(f))
alpha = n-1+(j/2); # note: beta= 1
#calculate A(j), B(j), and f(j)
a = am1 + alpha * am2;
b = bm1 + alpha * bm2;
# save new normalized variables for next pass through the loop
# note: normalization to avoid overflow or underflow
am2 = am1 / b;
bm2 = bm1 / b;
am1 = a / b;
bm1 = 1.0;
f = am1;
j = j+1;
# calculate the coefficients for j odd
alpha = (j-1)/2;
beta = xk;
a = beta * am1 + alpha * am2;
b = beta * bm1 + alpha * bm2;
am2 = am1 / b;
bm2 = bm1 / b;
am1 = a / b;
bm1 = 1;
oldf = f;
f = am1;
j = j+1;
end
y= exp(-xk) * f - 1im*pi*((real(xk)<0)&(imag(xk)==0));
end
return y
end
end
| [
21412,
1610,
385,
806,
5841,
198,
2,
3500,
46326,
198,
3500,
12280,
26601,
8231,
198,
3500,
14370,
198,
2,
6550,
23912,
3555,
198,
3500,
36775,
198,
2,
399,
6975,
605,
11812,
5301,
357,
8890,
1559,
3896,
8,
198,
3500,
20648,
38,
42,
... | 1.923092 | 15,330 |
using MyPkgDemo
using Documenter
makedocs(;
modules=[MyPkgDemo],
authors="MegamindHenry",
repo="https://github.com/MegamindHenry/MyPkgDemo.jl/blob/{commit}{path}#L{line}",
sitename="MyPkgDemo.jl",
format=Documenter.HTML(;
prettyurls=get(ENV, "CI", "false") == "true",
canonical="https://MegamindHenry.github.io/MyPkgDemo.jl",
assets=String[],
),
pages=[
"Home" => "index.md",
"Add Nums" => "add_nums.md",
"Cat Str" => "cat_str.md"
],
)
deploydocs(;
repo="github.com/MegamindHenry/MyPkgDemo.jl",
)
| [
3500,
2011,
47,
10025,
11522,
78,
198,
3500,
16854,
263,
198,
198,
76,
4335,
420,
82,
7,
26,
198,
220,
220,
220,
13103,
41888,
3666,
47,
10025,
11522,
78,
4357,
198,
220,
220,
220,
7035,
2625,
42672,
321,
521,
32476,
1600,
198,
220,... | 2.038462 | 286 |
<reponame>sisl/GrammarExpts.jl
#*****************************************************************************
# Written by <NAME>, <EMAIL>
# *****************************************************************************
# Copyright ã 2015, United States Government, as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved. The Reinforcement Learning Encounter Simulator (RLES)
# platform is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You
# may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0. Unless required by applicable
# law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
# _____________________________________________________________________________
# Reinforcement Learning Encounter Simulator (RLES) includes the following
# third party software. The SISLES.jl package is licensed under the MIT Expat
# License: Copyright (c) 2014: <NAME>.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED
# "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# *****************************************************************************
"""
Experiment 4
test combined time series and binary logical operations
"""
module NNGrammarExpt4
export circuit4, restarts
import Compat.ASCIIString
using TFTools
using Datasets
using TensorFlow
using TensorFlow.CoreTypes #DT_FLOAT32, DT_BOOL, PyVectorType
using TensorFlow.API: l2_loss, AdamOptimizer, GradientDescentOptimizer, cast,
round_, reshape_, l2_normalize, RMSPropOptimizer,
reduce_max, reduce_min, minimum_, maximum_, transpose_, less_, greater,
expand_dims, tile, shape, mul, reduce_sum, moments, conv2d, truncated_normal,
concat, sub_, matmul, clip_by_value
using TensorFlow.API.TfNn: sigmoid
using StatsBase
using RLESUtils, Confusion
using PyCall
@pyimport tensorflow as tf
function restarts(f::Function, N::Int64; kwargs...)
[f(; kwargs...) for i = 1:N]
end
function circuit4(;
dataname::AbstractString="vhdist3",
labelfield::AbstractString="nmac", #"F_x1_lt_100_and_x2_lt_500",
learning_rate::Float64=0.001,
max_training_epochs::Int64=500,
target_cost::Float64=0.001,
batch_size::Int64=4820,
mux_hidden_units::Vector{Int64}=Int64[50,30],
display_step::Int64=1,
b_debug::Bool=false,
nshow::Int64=1)
Dl = dataset(dataname, :nmac; transform=x->Float32(x)) #DFSetLabeled
val_inputs = constant(Float32[0, 50, 100, 250, 500, 1000, 3000, 5000])
data_set = TFDataset(Dl)
# Construct model
(n_examples, n_steps, n_feats) = size(Dl)
n_featsflat = n_steps * n_feats
n_vals = get_shape(val_inputs)[1]
# inputs
feats = Placeholder(DT_FLOAT32, [-1, n_steps, n_feats])
inputs = Tensor(feats)
normalizer = Normalizer(data_set; converttype=Float32)
normed_input = normalize01(normalizer, inputs)
feats_flat = reshape_(normed_input, constant(Int32[-1, n_featsflat]))
#softness parameter
softness = collect(linspace(0.1, 1.0, max_training_epochs))
softness_pl = Placeholder(DT_FLOAT32, [1])
#toggle hard or soft output
harden_pl = Placeholder(DT_BOOL, [1])
# common (embedding) layer
#embed_in = constant(rand(Float32, 1, 5))
#embed_hidden_units = [5]
#embed_blk = ReluStack(embed_in, embed_hidden_units)
#embed_out = out(embed_blk)
# mux select input
#muxselect = feats_flat #simple flatten
#muxselect = constant(ones(Float32, 1, 1)) #constant 1
#muxselect = embed_out #relustack embedding
overrides = zeros(Int64, 8)
#overrides = [1, 2, 3, 5, 1, 1, 1, 1]
#overrides = [0, 2, 0, 5, 0, 1, 1, 0]
#convolution stuff
HT = 3
WT = 2
STRIDES = PyVectorType([1, 1, 1, 1])
filt_weights = Variable(truncated_normal(constant([HT, WT, 1, 1]), constant(0.0), constant(5e-2)))
conv1 = conv2d(expand_dims(Tensor(normed_input), constant(3)), Tensor(filt_weights), STRIDES, "SAME")
muxselect = reshape_(conv1, constant(Int32[-1, n_featsflat]))
# f1 feat select
f1_in = inputs
f1_mux = SoftMux(n_feats, mux_hidden_units, f1_in, muxselect, Tensor(harden_pl), Tensor(softness_pl); override=overrides[1])
f1_out = out(f1_mux)
# f2 feat select
f2_in = inputs
f2_mux = SoftMux(n_feats, mux_hidden_units, f2_in, muxselect, Tensor(harden_pl), Tensor(softness_pl); override=overrides[2])
f2_out = hardout(f2_mux)
# v1 value select
#v1_in = val_inputs
n_vals=1
v1_in = constant(Float32[100.0])
v1_mux = SoftMux(n_vals, mux_hidden_units, v1_in, muxselect, Tensor(harden_pl); override=overrides[3])
v1_out = out(v1_mux)
# v2 value select
#v2_in = val_inputs
n_vals = 1
v2_in = constant(Float32[500.0])
v2_mux = SoftMux(n_vals, mux_hidden_units, v2_in, muxselect, Tensor(harden_pl); override=overrides[4])
v2_out = hardout(v2_mux)
compare_ops = [op_lt, op_gt]
logical_ops = [op_and, op_or]
temporal_ops = [op_F, op_G]
# a1 float op block
a1_in = (f1_out, v1_out)
a1_blk = SoftOpsMux(a1_in, compare_ops, mux_hidden_units, muxselect, Tensor(harden_pl), Tensor(softness_pl); opargs=Any[Tensor(softness_pl)], override=overrides[5])
a1_out = out(a1_blk)
# a2 float op block
a2_in = (f2_out, v2_out)
a2_blk = SoftOpsMux(a2_in, compare_ops, mux_hidden_units, muxselect, Tensor(harden_pl), Tensor(softness_pl); opargs=Any[Tensor(softness_pl)], override=overrides[6])
a2_out = hardout(a2_blk)
# l1 logical op block
l1_in = (a1_out, a2_out)
l1_blk = SoftOpsMux(l1_in, logical_ops, mux_hidden_units, muxselect, Tensor(harden_pl), Tensor(softness_pl); override=overrides[7])
l1_out = hardout(l1_blk)
# t1 temporal op block
t1_in = (l1_out,)
#t1_in = (constant(rand(Float32, shape(l1_out))), )
t1_blk = SoftOpsMux(t1_in, temporal_ops, mux_hidden_units, muxselect, Tensor(harden_pl), Tensor(softness_pl); override=overrides[8])
t1_out = out(t1_blk)
# outputs
pred = t1_out
labels = Placeholder(DT_FLOAT32, [-1])
#take nnout of batch, compute moments, take variance component and sum
f1_var = reduce_sum(moments(f1_mux.nnout, Tensor(Int32[0]))[2], Tensor(0))
v1_var = reduce_sum(moments(v1_mux.nnout, Tensor(Int32[0]))[2], Tensor(0))
f2_var = reduce_sum(moments(f2_mux.nnout, Tensor(Int32[0]))[2], Tensor(0))
v2_var = reduce_sum(moments(v2_mux.nnout, Tensor(Int32[0]))[2], Tensor(0))
a1_var = reduce_sum(moments(a1_blk.softmux.nnout, Tensor(Int32[0]))[2], Tensor(0))
a2_var = reduce_sum(moments(a2_blk.softmux.nnout, Tensor(Int32[0]))[2], Tensor(0))
l1_var = reduce_sum(moments(l1_blk.softmux.nnout, Tensor(Int32[0]))[2], Tensor(0))
t1_var = reduce_sum(moments(t1_blk.softmux.nnout, Tensor(Int32[0]))[2], Tensor(0))
sum_var = f1_var + v1_var + f2_var + v2_var + constant(300.0) .* a1_var + constant(300.0) .* a2_var + constant(300.0) .* l1_var + constant(300.0) .* t1_var
#packed_nnouts = concat(Tensor(1), Tensor([f1_mux.nnout, v1_mux.nnout, f2_mux.nnout, v2_mux.nnout, a1_blk.softmux.nnout, a2_blk.softmux.nnout, l1_blk.softmux.nnout, t1_blk.softmux.nnout]))
#m = matmul(packed_nnouts, packed_nnouts, false, true)
#allpairs_cossim = reduce_sum(sub_(constant(8.0), m))
# Define loss and optimizer
#cost = l2_loss(pred - labels) # Squared loss
cost = l2_loss(pred - labels) + constant(1.0) .* sum_var
#cost = l2_loss(pred - labels) + constant(1.0) .* allpairs_cossim
#optimizer
#optimizer = minimize(AdamOptimizer(learning_rate), cost)
opt = Optimizer(tf.train[:GradientDescentOptimizer](learning_rate))
gvs = opt.x[:compute_gradients](cost.x)
capped_gvs = [(tf.nn[:l2_normalize](tf.clip_by_value(grad, -1.0, 1.0), 0), var) for (grad, var) in gvs]
optimizer = Operation(opt.x[:apply_gradients](capped_gvs))
#compiled hardselect
ckt = Circuit([
f1_mux, v1_mux,
f2_mux, v2_mux,
a1_blk, a2_blk,
l1_blk,
t1_blk,
],
Vector{ASCIIString}[
["alt", "range"],
["0", "50", "100", "250", "500", "1000", "3000", "5000"],
["alt", "range"],
["0", "50", "100", "250", "500", "1000", "3000", "5000"],
["<", ">"],
["<", ">"],
["and", "or"],
["F", "G"]])
#debug
f1_grad = gradient_tensor(cost, f1_mux.weight)
v1_grad = gradient_tensor(cost, v1_mux.weight)
f2_grad = gradient_tensor(cost, f2_mux.weight)
v2_grad = gradient_tensor(cost, v2_mux.weight)
a1_grad = gradient_tensor(cost, a1_blk.softmux.weight)
a2_grad = gradient_tensor(cost, a2_blk.softmux.weight)
l1_grad = gradient_tensor(cost, l1_blk.softmux.weight)
t1_grad = gradient_tensor(cost, t1_blk.softmux.weight)
#/debug
# Iniuializing the variables
init = initialize_all_variables()
# Rock and roll
println("Optimization Start: $(now())")
sess = Session()
run(sess, init)
#debug
#fd = FeedDict(feats => data_set.X, labels => data_set.Y)
#run(sess, normed_input, fd)
#/debug
# Training cycle
for epoch in 1:max_training_epochs
avg_cost = 0.0
total_batch = div(num_examples(data_set), batch_size)
#Loop over all batches
for i in 1:total_batch
batch_xs, batch_ys = next_batch(data_set, batch_size)
fd = FeedDict(feats => batch_xs, labels => batch_ys, softness_pl => [softness[epoch]], harden_pl => [false])
# Fit training using batch data
run(sess, optimizer, fd)
#debug
#softsel = softselect_by_example(sess, ckt, fd)
#grads = run(sess, Tensor([f1_grad, v1_grad, f2_grad, v2_grad, a1_grad, a2_grad, l1_grad, t1_grad]), fd)
#conv1_out = run(sess, conv1, fd)
#@show run(sess, pred, fd)
#@show run(sess, pred - labels, fd)
#@show run(sess, muxselect, fd)
#@show run(sess, f1_grad, fd)
#@show run(sess, v1_grad, fd)
#/debug
# Compute average loss
batch_average_cost = run(sess, cost, fd)
avg_cost += batch_average_cost / (total_batch * batch_size)
end
# Display logs per epoch step
if epoch % display_step == 0
#softsel = softselect_by_example(sess, ckt, fd)
#grads = run(sess, Tensor([f1_grad, v1_grad, f2_grad, v2_grad, a1_grad, a2_grad, l1_grad, t1_grad]), fd)
#println(softsel)
#println(capped_gvs)
#grads = Tensor([grad for (grad, var) in capped_gvs])
println("Epoch $(epoch) cost=$(avg_cost)")
#println("Norm=$(norm(grads))")
if avg_cost < Float32(target_cost)
break;
end
end
end
println("Optimization Finished: $(now())")
# Test model
correct_prediction = (round_(pred) == labels)
# Calculate accuracy
accuracy = mean(cast(correct_prediction, DT_FLOAT32))
#reload data_set to recover original order (next_batch will internally shuffle)
data_set = TFDataset(Dl)
X = data_set.X
Y = data_set.Y
#soft metrics
fd = FeedDict(feats => data_set.X, labels => data_set.Y, softness_pl => [softness[end]], harden_pl => [false])
Ypred_soft = run(sess, round_(pred), fd)
acc_soft = run(sess, accuracy, fd)
stringout = simplestring(sess, ckt, fd; order=[8,1,5,2,7,3,6,4])
top5 = topstrings(stringout, 5)
softsel = softselect_by_example(sess, ckt, fd)
grads = run(sess, Tensor([f1_grad, v1_grad, f2_grad, v2_grad, a1_grad, a2_grad, l1_grad, t1_grad]), fd)
conf = confusion(Ypred_soft.==1.0, Y.==1.0)
conf_indices = confusion_indices(Ypred_soft.==1.0, Y.==1.0)
#hard metrics
fd = FeedDict(feats => data_set.X, labels => data_set.Y, softness_pl => [softness[end]], harden_pl => [true])
Ypred_hard = run(sess, round_(pred), fd)
acc_hard = run(sess, accuracy, fd)
println("Soft Accuracy:", acc_soft)
println("Hard Accuracy:", acc_hard)
println(top5)
d=Dict{ASCIIString,Any}()
top5, acc_hard
end
op_F(x::Tensor) = reduce_max(x, Tensor(1))
op_G(x::Tensor) = reduce_min(x, Tensor(1))
op_and(x::Tensor, y::Tensor) = minimum_(x, y) #element-wise
op_or(x::Tensor, y::Tensor) = maximum_(x, y) #element-wise
#TODO: move these to a central location
function op_gt(x::Tensor, y::Tensor, W::Tensor=constant(1.0/100.0))
tmp = expand_dims(y, Tensor(1))
ytiled = tile(tmp, Tensor([1, get_shape(x)[2]]))
result = sigmoid(mul(W, x - ytiled))
result
end
function op_lt(x::Tensor, y::Tensor, W::Tensor=constant(1.0/100.0))
tmp = expand_dims(y, Tensor(1))
ytiled = tile(tmp, Tensor([1, get_shape(x)[2]]))
result = sigmoid(mul(W, ytiled - x))
end
end #module
| [
27,
7856,
261,
480,
29,
82,
3044,
14,
38,
859,
3876,
3109,
457,
82,
13,
20362,
198,
2,
17174,
17174,
4557,
35625,
198,
2,
22503,
416,
1279,
20608,
22330,
1279,
27630,
4146,
29,
198,
2,
41906,
17174,
4557,
35625,
198,
2,
15069,
6184,... | 2.307604 | 6,076 |
<reponame>JuliaPackageMirrors/NearestNeighbors.jl
# Does not test leafsize
# Does not test different metrics
import Distances.evaluate
@testset "knn" begin
@testset "metric" for metric in metrics
@testset "tree type" for TreeType in trees_with_brute
# 8 node rectangle
data = [0.0 0.0 0.0 0.5 0.5 1.0 1.0 1.0;
0.0 0.5 1.0 0.0 1.0 0.0 0.5 1.0]
tree = TreeType(data, metric; leafsize=2)
idxs, dists = knn(tree, [0.8, 0.8], 1)
@test idxs[1] == 8 # Should be closest to top right corner
@test evaluate(metric, [0.2, 0.2], zeros(2)) ≈ dists[1]
idxs, dists = knn(tree, [0.1, 0.8], 3, true)
@test idxs == [3, 2, 5]
idxs, dists = knn(tree, [0.8 0.1; 0.8 0.8], 1, true)
@test idxs[1][1] == 8
@test idxs[2][1] == 3
idxs, dists = knn(tree, [SVector{2, Float64}(0.8,0.8), SVector{2, Float64}(0.1,0.8)], 1, true)
@test idxs[1][1] == 8
@test idxs[2][1] == 3
idxs, dists = knn(tree, [1//10, 8//10], 3, true)
@test idxs == [3, 2, 5]
@test_throws ArgumentError knn(tree, [0.1, 0.8], 10) # k > n_points
@test_throws ArgumentError knn(tree, [0.1], 10) # n_dim != trees dim
end
end
end
@testset "knn skip" begin
@testset "tree type" for TreeType in trees_with_brute
data = rand(2, 1000)
tree = TreeType(data)
idxs, dists = knn(tree, data[:, 10], 2, true)
first_idx = idxs[1]
second_idx = idxs[2]
idxs, dists = knn(tree, data[:, 10], 2, true, i -> i == first_idx)
@test idxs[1] == second_idx
end
end
| [
27,
7856,
261,
480,
29,
16980,
544,
27813,
27453,
5965,
14,
8199,
12423,
46445,
32289,
13,
20362,
198,
2,
8314,
407,
1332,
12835,
7857,
198,
2,
8314,
407,
1332,
1180,
20731,
198,
11748,
4307,
1817,
13,
49786,
198,
198,
31,
9288,
2617,... | 1.862106 | 921 |
<reponame>ArbitRandomUser/Javis.jl
"""
ObjectSetting
The current settings of an [`Object`](@ref) which are saved in `object.current_setting`.
# Fields
- `line_width::Float64`: the current line width
- `mul_line_width::Float64`: the current multiplier for line width.
The actual line width is then: `mul_line_width * line_width`
- `opacity::Float64`: the current opacity
- `mul_opacity::Float64`: the current multiplier for opacity.
The actual opacity is then: `mul_opacity * opacity`
- `fontsize::Float64` the current font size
- `show_object::Bool` is set to false if scale would be 0.0 which is forbidden by Cairo
- `current_scale::Tuple{Float64, Float64}`: the current scale
- `desired_scale::Tuple{Float64, Float64}`: the new desired scale
- `mul_scale::Float64`: the multiplier for the new desired scale.
The actual new scale is then: `mul_scale * desired_scale`
"""
mutable struct ObjectSetting
line_width::Float64
mul_line_width::Float64 # the multiplier of line width is between 0 and 1
opacity::Float64
mul_opacity::Float64 # the multiplier of opacity is between 0 and 1
fontsize::Float64
# scale has three fields instead of just the normal two
# current scale
# desired scale and scale multiplier => `desired_scale*mul_scale` is the new desired scale
# the scale change needs to be computed using `current_scale` and the desired scale
# current_scale should never be 0 as this breaks scaleto has various other bad effects
# see: https://github.com/JuliaGraphics/Luxor.jl/issues/114
# in this case show will be set to false and the object will not be called
show_object::Bool
current_scale::Scale
desired_scale::Scale
mul_scale::Float64 # the multiplier of scale is between 0 and 1
ObjectSetting() =
new(2.0, 1.0, 1.0, 1.0, 10.0, true, Scale(1.0, 1.0), Scale(1.0, 1.0), 1.0)
end
"""
update_ObjectSetting!(as::ObjectSetting, by::ObjectSetting)
Set the fields of `as` to the same as `by`. Basically copying them over.
"""
function update_ObjectSetting!(as::ObjectSetting, by::ObjectSetting)
as.line_width = by.line_width
as.mul_line_width = by.mul_line_width
as.opacity = by.opacity
as.mul_opacity = by.mul_opacity
as.fontsize = by.fontsize
as.show_object = by.show_object
as.current_scale = by.current_scale
as.desired_scale = by.desired_scale
as.mul_scale = by.mul_scale
end
function update_background_settings!(setting::ObjectSetting, object::AbstractObject)
in_global_layer = get(object.opts, :in_global_layer, false)
if in_global_layer
update_ObjectSetting!(setting, object.current_setting)
end
end
function update_object_settings!(object::AbstractObject, setting::ObjectSetting)
update_ObjectSetting!(object.current_setting, setting)
end
| [
27,
7856,
261,
480,
29,
3163,
2545,
29531,
12982,
14,
41,
23401,
13,
20362,
198,
37811,
198,
220,
220,
220,
9515,
34149,
198,
198,
464,
1459,
6460,
286,
281,
685,
63,
10267,
63,
16151,
31,
5420,
8,
543,
389,
7448,
287,
4600,
15252,
... | 2.943515 | 956 |
#------------------------------------------------------------------------------
"""
excise(x...)
Remove all lines where the is a NaN/missing in any of the x arrays
# Examples
- `x1 = excise(x)`
- `(y1,x1) = excise(y,x)`
"""
function excise(x...)
n = length(x)
vv = FindNNPs(x...) #find rows with NaN/missing
z = ntuple(i->copy(selectdim(x[i],1,vv)),n) #create a tuple of arrays
(n==1) && (z = z[1]) #if a single array in the tuple
return z
end
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
"""
FindNNPs(x...;Keepdim=1)
Find rows (if Keepdim==1) which have no NaNs missing in other dimensions (eg. in no columns).
# Input
- `z::Array`: one or several numerical arrays
- `Keepdim::Int`: (keyword) 1 if check rows, 2 if check columns, etc
# Output
- `vvb::BitVector`: vector, element t is true if row (if Keepdim==1) t has no NaN or missing
# Notice
- Set Keepdim=2 if we should instead look for NaNs/missings along rows (and other dimensions).
- For heterogenous arrays like `x=[x1,x1]`, use `FindNNPs(x...)`
<EMAIL>
"""
function FindNNPs(x...;Keepdim=1)
N = length(x)
T = size(x[1],Keepdim) #length of output
xDims = maximum(ndims,x) #max of ndims(x[i]) for i=1:N
dims = setdiff(1:xDims,Keepdim) #dimensions to check
vvM = falses(T,N)
for i = 1:N #loop over inputs
vvM[:,i] = any(z -> ismissing(z) || isnan(z),x[i],dims=dims) #this order is important
end
vvb = vec(.!any(vvM,dims=2)) #rows witout NaN/missing in any of the x matrices
return vvb
end
#------------------------------------------------------------------------------
| [
2,
10097,
26171,
198,
37811,
198,
220,
220,
220,
47547,
7,
87,
23029,
198,
198,
27914,
477,
3951,
810,
262,
318,
257,
11013,
45,
14,
45688,
287,
597,
286,
262,
2124,
26515,
198,
198,
2,
21066,
198,
12,
4600,
87,
16,
796,
47547,
7,... | 2.644412 | 689 |
<reponame>UnofficialJuliaMirror/Nabla.jl-49c96f43-aa6d-5a04-a506-44c7070ebe78<filename>src/sensitivities/indexing.jl
# Implementation of reverse-mode sensitivities for `getindex`.
import Base.getindex
for i = 1:7
T = Expr(:curly, :Tuple, fill(:Any, i)...)
is_node = Expr(:vect, true, fill(false, i - 1)...)
@eval @explicit_intercepts getindex $T $is_node
end
function ∇(Ā, ::typeof(getindex), ::Type{Arg{1}}, p, y, ȳ, A, inds...)
Ā[inds...] += ȳ
return Ā
end
function ∇(Ā, ::typeof(getindex), ::Type{Arg{1}}, p, y::AbstractArray, ȳ::AbstractArray, A, inds...)
@views Ā[inds...] .+= reshape(ȳ, size(y)...)
return Ā
end
function ∇(::typeof(getindex), ::Type{Arg{1}}, p, y, ȳ, A, inds...)
return ∇(zerod_container(A), getindex, Arg{1}, p, y, ȳ, A, inds...)
end
# # Implementation of reverse-mode sensitivities for `view`. Not currently in use because
# `view` turns out to actually be a bit awkward.
# eval(DiffBase, add_intercept(:view, :(Base.view), :(Tuple{Any, Vararg})))
# @inline function ∇(Ā, ::typeof(view), ::Type{Arg{1}}, p, y, ȳ, A, inds...)
# return Base.setindex!(Ā, ȳ, inds...)
# end
# @inline function ∇(::typeof(view), ::Type{Arg{1}}, p, y, ȳ, A, inds...)
# return ∇(zeros(A), view, Arg{1}, p, y, ȳ, A, inds...)
# end
| [
27,
7856,
261,
480,
29,
3118,
16841,
16980,
544,
27453,
1472,
14,
45,
397,
5031,
13,
20362,
12,
2920,
66,
4846,
69,
3559,
12,
7252,
21,
67,
12,
20,
64,
3023,
12,
64,
35638,
12,
2598,
66,
2154,
2154,
68,
1350,
3695,
27,
34345,
29... | 2.253054 | 573 |
module WaveFD
using Base.Threads, CvxCompress, DSP, Distributed, DistributedArrays, FFTW, LinearAlgebra, NearestNeighbors, Random, SpecialFunctions, StaticArrays, Statistics, WaveFD_jll
import
Base.convert,
Base.copy,
Base.get,
Base.min,
Base.max,
Base.maximum,
Base.show,
Base.size
abstract type Language end
struct LangC <: Language end
struct LangJulia <: Language end
show(io::IO, l::LangC) = write(io, "C")
show(io::IO, l::LangJulia) = write(io, "Julia")
abstract type ImagingCondition end
struct ImagingConditionStandard <: ImagingCondition end
struct ImagingConditionWaveFieldSeparationFWI <: ImagingCondition end
struct ImagingConditionWaveFieldSeparationRTM <: ImagingCondition end
include("stencil.jl")
include("spacetime.jl")
include("compressedio.jl")
include("wavelet.jl")
include("absorb.jl")
include("illumination.jl")
include("prop2DAcoIsoDenQ_DEO2_FDTD.jl")
include("prop2DAcoTTIDenQ_DEO2_FDTD.jl")
include("prop2DAcoVTIDenQ_DEO2_FDTD.jl")
include("prop3DAcoIsoDenQ_DEO2_FDTD.jl")
include("prop3DAcoTTIDenQ_DEO2_FDTD.jl")
include("prop3DAcoVTIDenQ_DEO2_FDTD.jl")
export
dtmod,
Prop2DAcoIsoDenQ_DEO2_FDTD,
Prop2DAcoTTIDenQ_DEO2_FDTD,
Prop2DAcoVTIDenQ_DEO2_FDTD,
Prop3DAcoIsoDenQ_DEO2_FDTD,
Prop3DAcoTTIDenQ_DEO2_FDTD,
Prop3DAcoVTIDenQ_DEO2_FDTD,
fieldfile!,
free,
Ginsu,
linearadjoint,
linearforward,
nonlinearforward,
ntmod,
reportfreq!,
sourceillum!,
sourceillum,
traces,
Wavelet,
WaveletDerivRicker,
WaveletMinPhaseRicker,
WaveletCausalRicker,
WaveletOrmsby,
WaveletMinPhaseOrmsby,
WaveletRicker,
WaveletSine
end
| [
21412,
17084,
26009,
198,
198,
3500,
7308,
13,
16818,
82,
11,
327,
85,
87,
7293,
601,
11,
360,
4303,
11,
4307,
6169,
11,
4307,
6169,
3163,
20477,
11,
376,
9792,
54,
11,
44800,
2348,
29230,
11,
3169,
12423,
46445,
32289,
11,
14534,
1... | 2.509772 | 614 |
<filename>julia/emit_log_direct.jl
using AMQPClient
const VIRTUALHOST = "/"
const HOST = "127.0.0.1"
function send()
# 1. Create a connection to the localhost or 127.0.0.1 of virtualhost '/'
connection(; virtualhost=VIRTUALHOST, host=HOST) do conn
# 2. Create a channel to send messages
channel(conn, AMQPClient.UNUSED_CHANNEL, true) do chan
# 3. Declare exchange
exchange = "direct_logs"
exchange_declare(chan, exchange, EXCHANGE_TYPE_DIRECT)
# 4. Get severity and message
if length(Base.ARGS) >= 3
severity = Base.ARGS[1]
received = join(Base.ARGS[2:end], ' ')
else
severity = "info"
received = "Hello World"
end
data = convert(Vector{UInt8}, codeunits(received))
msg = Message(data, content_type="text/plain", delivery_mode=PERSISTENT)
# 5. Publish the message
basic_publish(chan, msg; exchange=exchange, routing_key=severity)
println("Message sent: $received, Severity: $severity")
end
end
end
send()
| [
27,
34345,
29,
73,
43640,
14,
368,
270,
62,
6404,
62,
12942,
13,
20362,
198,
3500,
3001,
48,
47,
11792,
198,
9979,
569,
48771,
25620,
39,
10892,
796,
12813,
1,
198,
9979,
367,
10892,
796,
366,
16799,
13,
15,
13,
15,
13,
16,
1,
6... | 2.121547 | 543 |
<gh_stars>0
# Question
# What is the largest prime factor of the number 600851475143 ?
# Time
# O(n) any better solution??
function sieve(a)
# find all the prime numbers less than or equal to a
sieve = collect(1:a)
# now from this iterate from 2 and remove all their multiples
index = ones(Bool, a)
while true
end
end
function main()
val = 600851475143
sq = convert(Int, floor(sqrt(val)))
# now search for all the factors which
# we could use sieve of erosthenes to findout the prime numbers
sieve(sq)
println(sq)
end
@time main()
| [
27,
456,
62,
30783,
29,
15,
198,
2,
18233,
198,
2,
1867,
318,
262,
4387,
6994,
5766,
286,
262,
1271,
10053,
5332,
1415,
2425,
21139,
5633,
198,
198,
2,
3862,
198,
2,
440,
7,
77,
8,
597,
1365,
4610,
3548,
198,
8818,
264,
12311,
7... | 2.659193 | 223 |
<gh_stars>1-10
abstract type AbstractModelSet end
# using CSV
# using DataFrames
"""
WindFarmModelSet(wakedeficitmodel, wake_deflection_model, wake_combination_model, local_ti_model)
Container for objects defining models to use in wind farm calculations
# Arguments
- `wake_defiict_model::AbstractWakeDeficitModel`: contains a struct defining the desired wake deficit model
- `wake_deflection_model::AbstractWakeDeflectionModel`: contains a struct defining the desired wake deflection model
- `wake_combination_model::AbstractWakeCombinationModel`: contains a struct defining the desired wake combination model
- `local_ti_model::AbstractTurbulenceIntensityModel`: contains a struct defining the desired turbulence intensity model
"""
struct WindFarmModelSet{DTM,DNM,CM,TIM} <: AbstractModelSet
wake_deficit_model::DTM
wake_deflection_model::DNM
wake_combination_model::CM
local_ti_model::TIM
end
"""
point_velocity(loc, turbine_x, turbine_y, turbine_z, turbine_yaw, turbine_ct, turbine_ai,
rotor_diameter, hub_height, turbine_local_ti, sorted_turbine_index, wtvelocities,
wind_resource, model_set::AbstractModelSet;
wind_farm_state_id=1, downwind_turbine_id=0)
Calculates the wind speed at a given point for a given state
# Arguments
- `loc::Array{TF,3}`: Location of interest
- `turbine_x::Array{TF,nTurbines}`: turbine east-west locations in the state
reference frame
- `turbine_y::Array{TF,nTurbines}`: turbine north-south locations in the state
reference frame
- `turbine_z::Array{TF,nTurbines}`: turbine base height in the state reference frame
- `turbine_yaw::Array{TF,nTurbines}`: turbine yaw for the given wind direction in
radians
- `turbine_ct::Array{TF,nTurbines}`: turbine thrust coefficients for the given state
- `turbine_ai::Array{TF,nTurbines}`: turbine axial induction for the given state
- `rotor_diameter::Array{TF,nTurbines}`: turbine rotor diameters
- `hub_height::Array{TF,nTurbines}`: turbine hub heights
- `turbine_local_ti::Array{TF,nTurbines}`: turbine local turbulence intensity for
the given state
- `sorted_turbine_index::Array{TF,nTurbines}`: array containing indices of wind turbines
from most upwind to most downwind turbine in the given state
- `wtvelocities::Array{TF,nTurbines}`: effective inflow wind speed for given state
- `wind_resource::DiscretizedWindResource`: contains wind resource discreption (directions,
speeds, frequencies, etc)
- `wind_farm_state_id::Int`: index to correct state to use from wind resource provided.
Defaults to 1
- `downwind_turbine_id::Int`: index of wind turbine of interest (if any). If not a point for
calculating effective wind speed of a turbine, then provide 0 (default)
"""
function point_velocity(locx, locy, locz, turbine_x, turbine_y, turbine_z, turbine_yaw, turbine_ct, turbine_ai,
rotor_diameter, hub_height, turbine_local_ti, sorted_turbine_index, wtvelocities,
wind_resource, model_set::AbstractModelSet;
wind_farm_state_id=1, downwind_turbine_id=0)
wakedeficitmodel = model_set.wake_deficit_model
wakedeflectionmodel = model_set.wake_deflection_model
wakecombinationmodel = model_set.wake_combination_model
# extract flow information
wind_speed = wind_resource.wind_speeds[wind_farm_state_id]
reference_height = wind_resource.measurement_heights[wind_farm_state_id]
# set ground height
ground_height = wind_resource.wind_shear_model.ground_height # TODO: allow topology to be given
# find order for wind shear and deficit calculations
shear_order = wind_resource.wind_shear_model.shear_order
# adjust wind speed for wind shear
if shear_order == "nothing"
wind_speed_internal = wind_speed
elseif shear_order == "first"
wind_speed_internal = adjust_for_wind_shear(locz, wind_speed, reference_height, ground_height, wind_resource.wind_shear_model)
else
wind_speed_internal = wind_speed
end
# get number of turbines
nturbines = length(turbine_x)
# initialize deficit summation term to zero
deficit_sum = 0.0
# loop through all turbines
for u=1:nturbines
# get index of upstream turbine
upwind_turb_id = Int(sorted_turbine_index[u])
# don't allow turbine to impact itself
if upwind_turb_id == downwind_turbine_id; continue; end
# downstream distance between upstream turbine and point
x = locx - turbine_x[upwind_turb_id]
# check turbine relative locations
if x > 1E-6
# skip this loop if it would include a turbine's impact on itself)
if upwind_turb_id==downwind_turbine_id; continue; end
# calculate wake deflection of the current wake at the point of interest
horizontal_deflection = wake_deflection_model(locx, locy, locz, turbine_x, turbine_yaw, turbine_ct,
upwind_turb_id, rotor_diameter, turbine_local_ti, wakedeflectionmodel)
vertical_deflection = 0.0
# velocity difference in the wake
deltav = wake_deficit_model(locx, locy, locz, turbine_x, turbine_y, turbine_z, horizontal_deflection, vertical_deflection,
upwind_turb_id, downwind_turbine_id, hub_height, rotor_diameter, turbine_ai,
turbine_local_ti, turbine_ct, turbine_yaw, wakedeficitmodel)
# combine deficits according to selected wake combination method
deficit_sum = wake_combination_model(deltav, wind_speed_internal, wtvelocities[upwind_turb_id], deficit_sum, wakecombinationmodel)
# println(deficit_sum, " ", downwind_turbine_id, " ", upwind_turb_id)
end
end
# find velocity at point without shear
point_velocity = wind_speed_internal - deficit_sum
if shear_order == "nothing"
point_velocity_out = point_velocity
elseif shear_order == "first"
point_velocity_out = point_velocity
else
point_velocity_out = adjust_for_wind_shear(locz, point_velocity, reference_height, ground_height, wind_resource.wind_shear_model)
end
return point_velocity_out
end
"""
point_velocity(turbine_x, turbine_y, turbine_z, rotor_diameter, hub_height, turbine_yaw,
sorted_turbine_index, ct_model, rotor_sample_points_y, rotor_sample_points_z, wind_resource,
model_set::AbstractModelSet; wind_farm_state_id=1)
Calculates the wind speed at a given point for a given state
# Arguments
- `turbine_x::Array{TF,nTurbines}`: turbine east-west locations in the state
reference frame
- `turbine_y::Array{TF,nTurbines}`: turbine north-south locations in the state
reference frame
- `turbine_z::Array{TF,nTurbines}`: turbine base height in the state reference frame
- `rotor_diameter::Array{TF,nTurbines}`: turbine rotor diameters
- `hub_height::Array{TF,nTurbines}`: turbine hub heights
- `turbine_yaw::Array{TF,nTurbines}`: turbine yaw for the given wind direction in
radians
- `sorted_turbine_index::Array{TF,nTurbines}`: turbine sorted order upstream to downstream
for given state
- `ct_model::AbstractThrustCoefficientModel`: defines how the thrust coefficient changes
with state etc
- rotor_sample_points_y::Array{TF,N}`: horizontal wind location of points to sample across
the rotor swept area when calculating the effective wind speed for the wind turbine.
Points are centered at the hub (0,0) and scaled by the radius (1=tip of blades)
- rotor_sample_points_z::Array{TF,N}`: vertical wind location of points to sample across the
rotor swept area when calculating the effective wind speed for the wind turbine. Points
are centered at the hub (0,0) and scaled by the radius (1=tip of blades)
- `wind_resource::DiscretizedWindResource`: wind resource discreption (directions, speeds,
frequencies, etc)
- `model_set::AbstractModelSet`: defines wake-realated models to be used in analysis
- `wind_farm_state_id::Int`: index to correct state to use from wind resource provided.
Defaults to 1
"""
function turbine_velocities_one_direction(turbine_x, turbine_y, turbine_z, rotor_diameter, hub_height, turbine_yaw,
sorted_turbine_index, ct_model, rotor_sample_points_y, rotor_sample_points_z, wind_resource,
model_set::AbstractModelSet; wind_farm_state_id=1, velocity_only=true)
# get number of turbines and rotor sample point
n_turbines = length(turbine_x)
n_rotor_sample_points = length(rotor_sample_points_y)
# initialize correct array types
arr_type = promote_type(typeof(turbine_x[1]),typeof(turbine_y[1]),typeof(turbine_z[1]),typeof(rotor_diameter[1]),
typeof(hub_height[1]),typeof(turbine_yaw[1]))
# initialize arrays
turbine_velocities = zeros(arr_type, n_turbines)
turbine_ct = zeros(arr_type, n_turbines)
turbine_ai = zeros(arr_type, n_turbines)
turbine_local_ti = zeros(arr_type, n_turbines)
# loop over all turbines
for d=1:n_turbines
# get index of downstream turbine
downwind_turbine_id = Int(sorted_turbine_index[d])
# initialize downstream wind turbine velocity to zero
# println("start array: ", turbine_velocities[downwind_turbine_id])
# wind_turbine_velocity = typeof(turbine_velocities[downwind_turbine_id])(0.0)
wind_turbine_velocity = 0.0
# turbine_velocities[downwind_turbine_id] = 0.0
# loop over all rotor sample points to approximate the effective inflow velocity
for p=1:n_rotor_sample_points
# scale rotor sample point coordinate by rotor diameter (in rotor hub ref. frame)
local_rotor_sample_point_y = rotor_sample_points_y[p]*0.5*rotor_diameter[downwind_turbine_id]
local_rotor_sample_point_z = rotor_sample_points_z[p]*0.5*rotor_diameter[downwind_turbine_id]
# put rotor sample points in wind direction coordinate system, and account for yaw
locx = turbine_x[downwind_turbine_id] .+ local_rotor_sample_point_y*sin(turbine_yaw[downwind_turbine_id])
locy = turbine_y[downwind_turbine_id] .+ local_rotor_sample_point_y*cos(turbine_yaw[downwind_turbine_id])
locz = turbine_z[downwind_turbine_id] .+ hub_height[downwind_turbine_id] + local_rotor_sample_point_z
# calculate the velocity at given point
point_velocity_with_shear = point_velocity(locx, locy, locz, turbine_x, turbine_y, turbine_z, turbine_yaw, turbine_ct, turbine_ai,
rotor_diameter, hub_height, turbine_local_ti, sorted_turbine_index, turbine_velocities,
wind_resource, model_set,
wind_farm_state_id=wind_farm_state_id, downwind_turbine_id=downwind_turbine_id)
# add sample point velocity to turbine velocity to be averaged later
wind_turbine_velocity += point_velocity_with_shear
end
# final velocity calculation for downstream turbine (average equally across all points)
wind_turbine_velocity /= n_rotor_sample_points
turbine_velocities[downwind_turbine_id] = deepcopy(wind_turbine_velocity)
# update thrust coefficient for downstream turbine
turbine_ct[downwind_turbine_id] = calculate_ct(turbine_velocities[downwind_turbine_id], ct_model[downwind_turbine_id])
# update axial induction for downstream turbine
turbine_ai[downwind_turbine_id] = _ct_to_axial_ind_func(turbine_ct[downwind_turbine_id])
# get local turbulence intensity for this wind state
ambient_ti = wind_resource.ambient_tis[wind_farm_state_id]
# update local turbulence intensity for downstream turbine
turbine_local_ti[downwind_turbine_id] = calculate_local_ti(turbine_x, turbine_y, ambient_ti, rotor_diameter, hub_height, turbine_yaw, turbine_local_ti, sorted_turbine_index,
turbine_velocities, turbine_ct, model_set.local_ti_model; turbine_id=downwind_turbine_id, tol=1E-6)
# println("local ti turb 9: ", turbine_local_ti[downwind_turbine_id])
end
# df = DataFrame(ID=1:n_turbines, V=turbine_velocities, TI=turbine_local_ti, CT=turbine_ct)
# CSV.write("internaldata.txt", df)
if velocity_only
return turbine_velocities
else
return turbine_velocities, turbine_ct, turbine_ai, turbine_local_ti
end
end
function turbine_velocities_one_direction(x, turbine_z, rotor_diameter, hub_height, turbine_yaw,
sorted_turbine_index, ct_model, rotor_sample_points_y, rotor_sample_points_z, wind_resource,
model_set::AbstractModelSet; wind_farm_state_id=1, velocity_only=true)
n_turbines = Int(length(x)/2)
# println(typeof(x), n_turbines)
turbine_x = x[1:n_turbines]
turbine_y = x[n_turbines+1:end]
# println(turbine_x)
# println("turbine_x type ", typeof(turbine_x))
# println("type of x ", typeof(x))
# get number of turbines and rotor sample point
# n_turbines = length(turbine_x)
n_rotor_sample_points = length(rotor_sample_points_y)
arr_type = promote_type(typeof(turbine_x[1]),typeof(turbine_y[1]),typeof(turbine_z[1]),typeof(rotor_diameter[1]),
typeof(hub_height[1]),typeof(turbine_yaw[1]))
turbine_velocities = zeros(arr_type, n_turbines)
turbine_ct = zeros(arr_type, n_turbines)
turbine_ai = zeros(arr_type, n_turbines)
turbine_local_ti = zeros(arr_type, n_turbines)
for d=1:n_turbines
# get index of downstream turbine
downwind_turbine_id = Int(sorted_turbine_index[d])
# initialize downstream wind turbine velocity to zero
# println("start array: ", turbine_velocities[downwind_turbine_id])
# wind_turbine_velocity = typeof(turbine_velocities[downwind_turbine_id])(0.0)
wind_turbine_velocity = 0.0
# turbine_velocities[downwind_turbine_id] = 0.0
for p=1:n_rotor_sample_points
# scale rotor sample point coordinate by rotor diameter (in rotor hub ref. frame)
local_rotor_sample_point_y = rotor_sample_points_y[p]*0.5*rotor_diameter[downwind_turbine_id]
local_rotor_sample_point_z = rotor_sample_points_z[p]*0.5*rotor_diameter[downwind_turbine_id]
locx = turbine_x[downwind_turbine_id] .+ local_rotor_sample_point_y*sin(turbine_yaw[downwind_turbine_id])
locy = turbine_y[downwind_turbine_id] .+ local_rotor_sample_point_y*cos(turbine_yaw[downwind_turbine_id])
locz = turbine_z[downwind_turbine_id] .+ hub_height[downwind_turbine_id] + local_rotor_sample_point_z
# calculate the velocity at given point
point_velocity_with_shear = point_velocity(locx, locy, locz, turbine_x, turbine_y, turbine_z, turbine_yaw, turbine_ct, turbine_ai,
rotor_diameter, hub_height, turbine_local_ti, sorted_turbine_index, turbine_velocities,
wind_resource, model_set,
wind_farm_state_id=wind_farm_state_id, downwind_turbine_id=downwind_turbine_id)
# add sample point velocity to turbine velocity to be averaged later
wind_turbine_velocity += point_velocity_with_shear
end
# final velocity calculation for downstream turbine (average equally across all points)
wind_turbine_velocity /= n_rotor_sample_points
turbine_velocities[downwind_turbine_id] = deepcopy(wind_turbine_velocity)
# update thrust coefficient for downstream turbine
turbine_ct[downwind_turbine_id] = calculate_ct(turbine_velocities[downwind_turbine_id], ct_model[downwind_turbine_id])
# update axial induction for downstream turbine
turbine_ai[downwind_turbine_id] = _ct_to_axial_ind_func(turbine_ct[downwind_turbine_id])
# update local turbulence intensity for downstream turbine
ambient_ti = wind_resource.ambient_tis[wind_farm_state_id]
turbine_local_ti[downwind_turbine_id] = calculate_local_ti(turbine_x, turbine_y, ambient_ti, rotor_diameter, hub_height, turbine_yaw, turbine_local_ti, sorted_turbine_index,
turbine_velocities, turbine_ct, model_set.local_ti_model; turbine_id=downwind_turbine_id, tol=1E-6)
end
if velocity_only
return turbine_velocities
else
return turbine_velocities, turbine_ct, turbine_ai, turbine_local_ti
end
end
# turbine_velocities_one_direction!(model_set::AbstractModelSet, problem_description::AbstractWindFarmProblem; wind_farm_state_id=1) = turbine_velocities_one_direction!([0.0], [0.0],
# model_set::AbstractModelSet, problem_description::AbstractWindFarmProblem; wind_farm_state_id=1)
"""
calculate_flow_field(xrange, yrange, zrange, model_set::AbstractModelSet, turbine_x,
turbine_y, turbine_z, turbine_yaw, turbine_ct, turbine_ai, rotor_diameter, hub_height,
turbine_local_ti, sorted_turbine_index, wtvelocities, wind_resource; wind_farm_state_id=1)
Generates a flow field for a given state and cross section
# Arguments
- `xrange::Range`: range defining east-west locations to sample in global reference frame
- `yrange::Range`: range defining north-west locations to sample in global reference frame
- `zrange::Range`: range defining vertical locations to sample in global reference frame
- `model_set::AbstractModelSet`: defines wake-realated models to be used in analysis
- `turbine_x::Array{TF,nTurbines}`: turbine east-west locations in the global
reference frame
- `turbine_y::Array{TF,nTurbines}`: turbine north-south locations in the global
reference frame
- `turbine_z::Array{TF,nTurbines}`: turbine base height in the global reference frame
- `turbine_yaw::Array{TF,nTurbines}`: turbine yaw for the given wind direction in
radians
- `turbine_ct::Array{TF,nTurbines}`: thrust coefficient of each turbine for the given state
- `turbine_ai::Array{TF,nTurbines}`: turbine axial induction for the given state
- `rotor_diameter::Array{TF,nTurbines}`: turbine rotor diameters
- `hub_height::Array{TF,nTurbines}`: turbine hub heights
- `turbine_local_ti::Array{TF,nTurbines}`: turbine local turbulence intensity for
the given state
- `sorted_turbine_index::Array{TF,nTurbines}`: turbine north-south locations in the
global reference frame
- `wtvelocities::Array{TF,nTurbines}`: effective inflow wind speed for given state
- `wind_resource::DiscretizedWindResource`: wind resource discreption (directions, speeds,
frequencies, etc)
- `wind_farm_state_id::Int`: index to correct state to use from wind resource provided.
Defaults to 1
"""
function calculate_flow_field(xrange, yrange, zrange,
model_set::AbstractModelSet, turbine_x, turbine_y, turbine_z, turbine_yaw, turbine_ct, turbine_ai,
rotor_diameter, hub_height, turbine_local_ti, sorted_turbine_index, wtvelocities,
wind_resource; wind_farm_state_id=1)
xlen = length(xrange)
ylen = length(yrange)
zlen = length(zrange)
npoints = xlen*ylen*zlen
point_velocities = zeros(npoints)
point_velocities = reshape(point_velocities, (zlen, ylen, xlen))
# rotate to direction frame for velocity calculations
rot_tx, rot_ty = rotate_to_wind_direction(turbine_x, turbine_y, wind_resource.wind_directions[wind_farm_state_id])
# sort the turbines
sorted_turbine_index = sortperm(rot_tx)
for zi in 1:zlen
for yi in 1:ylen
for xi in 1:xlen
locx = xrange[xi]
locy = yrange[yi]
locz = zrange[zi]
locx, locy = rotate_to_wind_direction(locx, locy, wind_resource.wind_directions[wind_farm_state_id])
point_velocities[zi, yi, xi] = point_velocity(locx, locy, locz, rot_tx, rot_ty, turbine_z, turbine_yaw, turbine_ct, turbine_ai,
rotor_diameter, hub_height, turbine_local_ti, sorted_turbine_index, wtvelocities,
wind_resource, model_set,
wind_farm_state_id=wind_farm_state_id, downwind_turbine_id=0)
end
end
end
# if zlen == 1
# return point_velocities[1,1:ylen,1:xlen]
# elseif ylen == 1
# return point_velocities[1:zlen,1,1:xlen]
# elseif xlen == 1
# return point_velocities[1:zlen,1:ylen,1]
# else
return point_velocities[1:zlen,1:ylen,1:xlen]
# end
end
function calculate_flow_field(xrange, yrange, zrange,
model_set::AbstractModelSet, turbine_x, turbine_y, turbine_z, turbine_yaw,
rotor_diameter, hub_height, ct_models, rotor_sample_points_y, rotor_sample_points_z,
wind_resource; wind_farm_state_id=1)
# rotate to direction frame for velocity calculations
rot_tx, rot_ty = rotate_to_wind_direction(turbine_x, turbine_y, wind_resource.wind_directions[wind_farm_state_id])
# sort the turbines
sorted_turbine_index = sortperm(rot_tx)
turbine_velocities, turbine_ct, turbine_ai, turbine_local_ti = turbine_velocities_one_direction(rot_tx, rot_ty, turbine_z, rotor_diameter, hub_height, turbine_yaw,
sorted_turbine_index, ct_models, rotor_sample_points_y, rotor_sample_points_z, wind_resource,
model_set, wind_farm_state_id=wind_farm_state_id, velocity_only=false)
return calculate_flow_field(xrange, yrange, zrange,
model_set, turbine_x, turbine_y, turbine_z, turbine_yaw, turbine_ct, turbine_ai,
rotor_diameter, hub_height, turbine_local_ti, sorted_turbine_index, turbine_velocities,
wind_resource, wind_farm_state_id=wind_farm_state_id)
end | [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
397,
8709,
2099,
27741,
17633,
7248,
886,
198,
2,
1262,
44189,
198,
2,
1262,
6060,
35439,
198,
198,
37811,
198,
220,
220,
220,
3086,
48412,
17633,
7248,
7,
86,
4335,
891,
3628,
19849,
11,
... | 2.519399 | 8,583 |
<gh_stars>10-100
"""
SeisSort(in, out;<keyword arguments>)
Sort a seis file using its header words
# Arguments
* `in`: input filename >> a text file with information about data extent, data and header file names; a binary file containing data and a binary file containing headers.
* `out`: output filename
# Keyword arguments
* `key=["imx","imy"]`
* `rev=false` : sort headers in decreasing order
* `ntrace=1000` : number of traces to read at a time
# Output
file `out` is created with data sorted.
*Credits: AS, 2015*
"""
function SeisSort(in, out;key=["<KEY>"],rev=false,ntrace=100000)
filename_h = ParseHeaderName(in)
stream_h = open(filename_h)
seek(stream_h, header_count["n1"])
nt = read(stream_h,Int32)
nx = convert(Int64,filesize(stream_h)/(4*length(fieldnames(Header))))
h = Header[]
# find min and max for each key
h1 = GrabHeader(stream_h,1)
minval = Array(Float32,length(key))
for ikey=1:length(key)
minval[ikey] = getfield(h1,Symbol(key[ikey]))
end
for j=2:nx
h1 = GrabHeader(stream_h,j)
for ikey=1:length(key)
key_val = abs(getfield(h1,Symbol(key[ikey])))
if (key_val < minval[ikey])
minval[ikey] = key_val
end
end
end
mykey = vec(zeros(Float64,nx))
seekstart(stream_h)
for j=1:nx
h1 = GrabHeader(stream_h,j)
for ikey=1:length(key)
mykey[j] += ((getfield(h1,Symbol(key[ikey])) + minval[ikey] + 1)*
(10^(6*(length(key)-ikey))))
end
end
close(stream_h)
p = convert(Array{Int32,1},sortperm(mykey,rev=rev))
DATAPATH = get(ENV,"DATAPATH",join([pwd(),"/"]))
filename_d_out = join([DATAPATH out "@data@"])
filename_h_out = join([DATAPATH out "@headers@"])
nhead = length(fieldnames(Header))
stream_h = open(filename_h_out)
nx = Int(floor(filesize(stream_h)/(nhead*4)))
close(stream_h)
extent = ReadTextHeader(in)
extent.n2 = nx
extent.n3 = 1
extent.n4 = 1
extent.n5 = 1
WriteTextHeader(out,extent,"native_float",4,filename_d_out,filename_h_out)
FetchHeaders(filename_h,out,p,nx)
Seismic.FetchTraces(in,out)
tmp = join(["tmp_SeisSort_",string(Int(floor(rand()*100000)))])
cp(out,tmp,remove_destination=true);
@compat SeisProcessHeaders(out, tmp, [UpdateHeader],
[Dict(:itmin=>1,:itmax=>nt)])
filename_h_tmp = join([DATAPATH tmp "@headers@"])
filename_h_out = join([DATAPATH out "@headers@"])
cp(filename_h_tmp,filename_h_out,remove_destination=true);
rm(filename_h_tmp);
rm(tmp);
end
function FetchHeaders(filename_h_in::AbstractString, filename_out::AbstractString,
p::Array{Int32,1}, nx)
stream_h = open(filename_h_in)
h = Header[]
for j = 1:nx
append!(h,[GrabHeader(stream_h,p[j])])
end
SeisWriteHeaders(filename_out,h,update_tracenum=false)
end
| [
27,
456,
62,
30783,
29,
940,
12,
3064,
198,
37811,
198,
220,
220,
220,
1001,
271,
42758,
7,
259,
11,
503,
26,
27,
2539,
4775,
7159,
43734,
198,
198,
42758,
257,
384,
271,
2393,
1262,
663,
13639,
2456,
198,
198,
2,
20559,
2886,
198... | 2.239437 | 1,278 |
<reponame>ethansaxenian/RosettaDecode
using Printf, Distributions, IterTools
newv(n::Int, p::Float64) = rand(Bernoulli(p), n)
runs(v::Vector{Int}) = sum((a & ~b) for (a, b) in zip(v, IterTools.chain(v[2:end], v[1])))
mrd(n::Int, p::Float64) = runs(newv(n, p)) / n
nrep = 500
for p in 0.1:0.2:1
lim = p * (1 - p)
println()
for ex in 10:2:14
n = 2 ^ ex
sim = mean(mrd.(n, p) for _ in 1:nrep)
@printf("nrep = %3i\tp = %4.2f\tn = %5i\np · (1 - p) = %5.3f\tsim = %5.3f\tΔ = %3.1f%%\n",
nrep, p, n, lim, sim, lim > 0 ? abs(sim - lim) / lim * 100 : sim * 100)
end
end
| [
27,
7856,
261,
480,
29,
2788,
504,
897,
268,
666,
14,
35740,
15253,
10707,
1098,
198,
3500,
12578,
69,
11,
46567,
507,
11,
40806,
33637,
198,
198,
3605,
85,
7,
77,
3712,
5317,
11,
279,
3712,
43879,
2414,
8,
796,
43720,
7,
23927,
2... | 1.867868 | 333 |
export discreteApprox!, discreteApprox, discreteNormalApprox, discreteNormalApprox!
# ----------------------- objective functions for max entropy calcs --------------------------
function expΔTx!(tmpvec::Vector, ΔT::AbstractMatrix, x::AbstractVector)
mul!(tmpvec, ΔT, x)
tmpvec .= exp.(tmpvec)
end
# objective
function entropyObjective_f!(tmpvec::Vector, x::Vector, q::Vector, ΔT::AbstractMatrix)
expΔTx!(tmpvec, ΔT, x)
return dot(q, tmpvec)
end
# gradient
function entropyObjective_g!(grad::Vector, tmpvec::Vector, x::Vector, q::Vector, ΔT::AbstractMatrix)
n,L = size(ΔT)
expΔTx!(tmpvec, ΔT, x)
tmpvec .*= q
for l = 1:L
grad[l] = dot(tmpvec, @view(ΔT[:,l]))
end
end
function entropyObjective_fg!(grad::Vector, tmpvec::Vector, x::Vector, q::Vector, ΔT::AbstractMatrix)
entropyObjective_g!(grad, tmpvec, x, q, ΔT)
return sum(tmpvec)
end
function entropyObjective_h!(hess::Matrix{T}, tmpvec::Vector, x::Vector, q::Vector, ΔT::AbstractMatrix) where {T}
n,L = size(ΔT)
expΔTx!(tmpvec, ΔT, x)
tmpvec .*= q
fill!(hess, zero(T))
for k = 1:L
for l = 1:L
hess[l,k] = sum(@view(ΔT[:,l]) .* tmpvec .* @view(ΔT[:,k]))
end
end
end
# ----------------------- wrappers --------------------------
function discreteApprox!(p::AbstractVector, λfinal::AbstractVector, err::AbstractVector, tmp::Vector, q0::Vector, ΔT::AbstractMatrix{T}) where {T}
l = length(λfinal)
n = length(q0)
(n,l) == size(ΔT) || throw(DimensionMismatch())
n == length(p) || throw(DimensionMismatch())
l == length(err) || throw(DimensionMismatch())
n == length(tmp) || throw(DimensionMismatch())
# test that initial value is finite
λ0 = zeros(T,l)
grad = zeros(T,l)
f0 = entropyObjective_fg!(grad, tmp, λ0, q0, ΔT)
!isfinite(f0) && return Inf
!all(isfinite.(grad)) && return Inf
tdf = TwiceDifferentiable(
(x::Vector) -> entropyObjective_f!( tmp, x, q0, ΔT),
(grad::Vector, x::Vector) -> entropyObjective_g!( grad, tmp, x, q0, ΔT),
(grad::Vector, x::Vector) -> entropyObjective_fg!(grad, tmp, x, q0, ΔT),
(hess::Matrix, x::Vector) -> entropyObjective_h!( hess, tmp, x, q0, ΔT),
λ0
)
opt = Optim.optimize(tdf, λ0, Newton())
λ1 = opt.minimizer
J = opt.minimum
# update gradient
entropyObjective_g!(grad, tmp, λ1, q0, ΔT)
if norm(grad, Inf) < 1e-9 && all(isfinite.(grad)) && all(isfinite.(λ1)) && 0.0 < J < Inf && maximum(abs.(grad ./ J)) < 1e-5
λfinal .= λ1
expΔTx!(tmp, ΔT, λ1)
p .= q0 .* tmp ./ J
err .= grad ./ J
return J
else
return Inf
end
end
function ΔTmat!(ΔT::Matrix, dev::AbstractVector, Tbar::Vector)
n,L = size(ΔT)
length(dev) == n || throw(DimensionMismatch())
length(Tbar) == L || throw(DimensionMismatch())
ΔT[:,1] .= dev
for j = 2:L
ΔT[:,j] .= ΔT[:,j-1] .* dev
end
for j = 1:L
ΔT[:,j] .-= Tbar[j]
end
end
function discreteApprox!(P::AbstractMatrix, y::AbstractVector{T}, S::Union{AbstractVector, Base.Iterators.ProductIterator}, zval::Function, pdffun::Function, scaled_moments::Vector, scale_factor::Real, maxMoments::Integer, κ::Real) where {T<:Real}
nS = length(S)
n = length(y)
0 < maxMoments < n || throw(error("Must use 1 to $n-1 moments or fewer"))
(nS,n) == size(P) || throw(DimensionMismatch())
# Initialize elements that will be returned
Λ = zeros(T, nS, maxMoments)
JN = zeros(T, nS)
approxErr = zeros(T, nS, maxMoments)
numMoments = zeros(Int, nS)
# preallocate these, which will be updated each iteration
ΔT = Array{T}(undef,n, maxMoments)
z = Array{T}(undef,n)
q = Array{T}(undef,n)
tmp = Array{T}(undef,n)
for (i,st) in enumerate(S)
z .= zval.(y, st)
q .= max.(pdffun.(z), κ)
z ./= scale_factor
ΔTmat!(ΔT, z, scaled_moments)
updated = false
for l in maxMoments:-1:2
J = discreteApprox!(@view(P[i,:]), @view(Λ[i,1:l]), @view(approxErr[i,1:l]), tmp, q, @view(ΔT[:,1:l]))
if isfinite(J)
JN[i], numMoments[i] = (J, l)
updated = true
break
end
end
if !updated
sumq = sum(q)
P[i,:] .= q ./ sumq
end
end
return JN, Λ, numMoments, approxErr
end
# ----------------------- wrappers --------------------------
function discreteApprox(y::AbstractVector{T}, S::Union{AbstractVector, Base.Iterators.ProductIterator}, zval::Function, pdffun::Function, scaled_moments::Vector, scale_factor::Real, maxMoments::Integer, κ::Real) where {T<:Real}
n = length(y)
nS = length(S)
P = Array{T}(nS,n)
return (P, discreteApprox!(P, y, S, zval, pdffun, scaled_moments, scale_factor, maxMoments, κ)...)
end
function discreteNormalApprox!(P::AbstractMatrix, y::AbstractVector, S::Union{AbstractVector, Base.Iterators.ProductIterator}, zval::Function, maxMoments::Integer, κ::Real)
scale_factor = maximum(abs.(y))
scaled_moments = [m for m in NormCentralMoment(maxMoments, 1.0/scale_factor)]
discreteApprox!(P, y, S, zval, normpdf, scaled_moments, scale_factor, maxMoments, κ)
end
function discreteNormalApprox(y::AbstractVector{T}, S::Union{AbstractVector, Base.Iterators.ProductIterator}, zval::Function, maxMoments::Integer=2, κ::Real=1e-8) where {T}
n = length(y)
P = Matrix{T}(undef,n,n)
out = discreteNormalApprox!(P, y, S, zval, maxMoments, κ)
return (P, out...)
end
| [
39344,
28810,
4677,
13907,
28265,
28810,
4677,
13907,
11,
28810,
26447,
4677,
13907,
11,
28810,
26447,
4677,
13907,
0,
198,
198,
2,
41436,
6329,
9432,
5499,
329,
3509,
40709,
2386,
6359,
220,
22369,
438,
198,
198,
8818,
1033,
138,
242,
... | 2.373108 | 2,246 |
mutable struct zmp_com_observer_state_t <: LCMType
utime::Int64
com::SVector{2, Float64}
comd::SVector{2, Float64}
ground_plane_height::Float64
end
@lcmtypesetup(zmp_com_observer_state_t)
| [
76,
18187,
2878,
1976,
3149,
62,
785,
62,
672,
15388,
62,
5219,
62,
83,
1279,
25,
22228,
44,
6030,
198,
220,
220,
220,
3384,
524,
3712,
5317,
2414,
198,
220,
220,
220,
401,
3712,
50,
38469,
90,
17,
11,
48436,
2414,
92,
198,
220,
... | 2.204301 | 93 |
function save_data!(A::Array{T,1}, dset::HDF5Dataset, src_ind::AbstractRange{Int}, dest_ind::AbstractRange{Int}) where T
dsel_id = HDF5.hyperslab(dset, src_ind)
V = view(A, dest_ind)
memtype = HDF5.datatype(A)
memspace = HDF5.dataspace(V)
HDF5.h5d_write(dset.id, memtype.id, memspace.id, dsel_id, dset.xfer, V)
HDF5.close(memtype)
HDF5.close(memspace)
HDF5.h5s_close(dsel_id)
return nothing
end
| [
8818,
3613,
62,
7890,
0,
7,
32,
3712,
19182,
90,
51,
11,
16,
5512,
288,
2617,
3712,
39,
8068,
20,
27354,
292,
316,
11,
12351,
62,
521,
3712,
23839,
17257,
90,
5317,
5512,
2244,
62,
521,
3712,
23839,
17257,
90,
5317,
30072,
810,
30... | 2.128866 | 194 |
const DEBUG_ENABLED = Ref(false)
const DEBUG_CALLBACK = Ref{Function}()
@export struct GLDebugInfo <: Iterable
type::String
source::String
message::String
severity::String
end
function debug_message_callback(
_source::GLenum,
_type::GLenum,
::GLuint,
_severity::GLenum,
::GLsizei,
_msg::Ptr{GLchar},
::Ptr{Cvoid}
)::Cvoid
message = unsafe_string(_msg)
source = get(DebugSource, _source, "Unknown")
type = get(DebugType, _type, "Unknown")
severity = get(DebugSeverity, _severity, "Unknown")
if isassigned(DEBUG_CALLBACK)
GLDebugInfo(
type,
source,
message,
severity,
) |> DEBUG_CALLBACK[]
end
return nothing
end
"""
ondebug_msg(callback::Function)
Enables debugging of OpenGL functions and sets the OpenGL debug message `callback` function.
`callback` only accepts one parameter of type [`GLDebugInfo`](@ref),
that contains the information about the debug message.
"""
@export function ondebug_msg(callback::Function)
@assert hasmethod(callback, Tuple{GLDebugInfo}) "
The `ondebug_msg` function expects a `callback` that accepts
a single parameter of type `GLDebugInfo` : `callback(info::GLDebugInfo)`
"
DEBUG_CALLBACK[] = callback
if DEBUG_ENABLED[] == false
DEBUG_ENABLED[] = true
ptr_debug_callback = @cfunction(debug_message_callback, Cvoid, (
GLenum,
GLenum,
GLuint,
GLenum,
GLsizei,
Ptr{GLchar},
Ptr{Cvoid}
))
glEnable(GL_DEBUG_OUTPUT)
glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS)
GC.@preserve ptr_debug_callback begin
glDebugMessageCallback(ptr_debug_callback, C_NULL)
ids = Cuint(0)
@c glDebugMessageControl(
GL_DONT_CARE,
GL_DONT_CARE,
GL_DONT_CARE,
0,
&ids,
GL_TRUE,
)
end
end
end | [
9979,
16959,
62,
1677,
6242,
30465,
796,
6524,
7,
9562,
8,
198,
9979,
16959,
62,
34,
7036,
31098,
796,
6524,
90,
22203,
92,
3419,
198,
198,
31,
39344,
2878,
10188,
27509,
12360,
1279,
25,
40806,
540,
198,
220,
220,
2099,
3712,
10100,
... | 2.23269 | 881 |
function sqlite3_errmsg()
return ccall( (:sqlite3_errmsg, sqlite3_lib),
Ptr{Uint8}, () )
end
function sqlite3_errmsg(db::Ptr{Void})
@NULLCHECK db
return ccall( (:sqlite3_errmsg, sqlite3_lib),
Ptr{Uint8}, (Ptr{Void},), db )
end
function sqlite3_open(file::AbstractString, handle::Vector{Ptr{Void}})
return ccall( (:sqlite3_open, sqlite3_lib),
Cint, (Ptr{Uint8}, Ptr{Void}),
file, handle )
end
function sqlite3_open16(file::UTF16String, handle::Array{Ptr{Void},1})
return ccall( (:sqlite3_open16, sqlite3_lib),
Cint, (Ptr{Uint16}, Ptr{Void}),
file, handle )
end
function sqlite3_close(handle::Ptr{Void})
@NULLCHECK handle
return ccall( (:sqlite3_close, sqlite3_lib),
Cint, (Ptr{Void},), handle )
end
function sqlite3_close_v2(handle::Ptr{Void})
@NULLCHECK handle
try
return ccall( (:sqlite3_close_v2, sqlite3_lib),
Cint, (Ptr{Void},), handle )
catch
# Older versions of the library don't have this, abort to other close
warn("sqlite3_close_v2 not available.")
sqlite3_close(handle)
end
end
function sqlite3_next_stmt(db::Ptr{Void}, stmt::Ptr{Void})
@NULLCHECK db
return ccall( (:sqlite3_next_stmt, sqlite3_lib),
Ptr{Void}, (Ptr{Void}, Ptr{Void}),
db, stmt )
end
function sqlite3_prepare_v2(handle::Ptr{Void}, query::AbstractString,
stmt::Array{Ptr{Void},1}, unused::Array{Ptr{Void},1})
@NULLCHECK handle
return ccall( (:sqlite3_prepare_v2, sqlite3_lib),
Cint, (Ptr{Void}, Ptr{Uint8}, Cint, Ptr{Void}, Ptr{Void}),
handle, query, sizeof(query), stmt, unused )
end
function sqlite3_prepare16_v2(handle::Ptr{Void}, query::AbstractString,
stmt::Array{Ptr{Void},1}, unused::Array{Ptr{Void},1})
@NULLCHECK handle
return ccall( (:sqlite3_prepare16_v2, sqlite3_lib),
Cint, (Ptr{Void}, Ptr{Uint16}, Cint, Ptr{Void}, Ptr{Void}),
handle, query, sizeof(query), stmt, unused )
end
function sqlite3_finalize(stmt::Ptr{Void})
@NULLCHECK stmt
return ccall( (:sqlite3_finalize, sqlite3_lib),
Cint, (Ptr{Void},), stmt )
end
function sqlite3_step(stmt::Ptr{Void})
@NULLCHECK stmt
return ccall( (:sqlite3_step, sqlite3_lib),
Cint, (Ptr{Void},), stmt )
end
function sqlite3_column_count(stmt::Ptr{Void})
@NULLCHECK stmt
return ccall( (:sqlite3_column_count, sqlite3_lib),
Cint, (Ptr{Void},), stmt )
end
function sqlite3_column_type(stmt::Ptr{Void},col::Int)
@NULLCHECK stmt
return ccall( (:sqlite3_column_type, sqlite3_lib),
Cint, (Ptr{Void}, Cint),
stmt, col )
end
function sqlite3_column_name(stmt::Ptr{Void},n::Int)
@NULLCHECK stmt
return ccall( (:sqlite3_column_name, sqlite3_lib),
Ptr{Uint8}, (Ptr{Void}, Cint),
stmt, n )
end
function sqlite3_reset(stmt::Ptr{Void})
@NULLCHECK stmt
return ccall( (:sqlite3_reset, sqlite3_lib),
Cint, (Ptr{Void},), stmt )
end
function sqlite3_column_blob(stmt::Ptr{Void},col::Int)
@NULLCHECK stmt
return ccall( (:sqlite3_column_blob, sqlite3_lib),
Ptr{Void}, (Ptr{Void}, Cint),
stmt, col )
end
function sqlite3_column_bytes(stmt::Ptr{Void},col::Int)
@NULLCHECK stmt
return ccall( (:sqlite3_column_bytes, sqlite3_lib),
Cint, (Ptr{Void}, Cint),
stmt, col )
end
function sqlite3_column_bytes16(stmt::Ptr{Void},col::Int)
@NULLCHECK stmt
return ccall( (:sqlite3_column_bytes16, sqlite3_lib),
Cint, (Ptr{Void}, Cint),
stmt, col )
end
@inline function sqlite3_column_double(stmt::Ptr{Void},col::Int)
@NULLCHECK stmt
return ccall( (:sqlite3_column_double, sqlite3_lib),
Cdouble, (Ptr{Void}, Cint),
stmt, col )
end
function sqlite3_column_int(stmt::Ptr{Void},col::Int)
@NULLCHECK stmt
return ccall( (:sqlite3_column_int, sqlite3_lib),
Cint, (Ptr{Void}, Cint),
stmt, col )
end
@inline function sqlite3_column_int64(stmt::Ptr{Void},col::Int)
@NULLCHECK stmt
return ccall( (:sqlite3_column_int64, sqlite3_lib),
Clonglong, (Ptr{Void}, Cint),
stmt, col )
end
function sqlite3_column_text(stmt::Ptr{Void},col::Int)
@NULLCHECK stmt
return ccall( (:sqlite3_column_text, sqlite3_lib),
Ptr{Uint8}, (Ptr{Void}, Cint),
stmt, col )
end
function sqlite3_column_text16(stmt::Ptr{Void},col::Int)
@NULLCHECK stmt
return ccall( (:sqlite3_column_text16, sqlite3_lib),
Ptr{Void}, (Ptr{Void}, Cint),
stmt, col )
end
# SQLITE_API void sqlite3_result_double(sqlite3_context*, double);
function sqlite3_result_double(context::Ptr{Void},value::Float64)
return ccall( (:sqlite3_result_double, sqlite3_lib),
Void, (Ptr{Void}, Float64),
context, value )
end
# SQLITE_API void sqlite3_result_error(sqlite3_context*, const char*, int)
function sqlite3_result_error(context::Ptr{Void},msg::AbstractString)
return ccall( (:sqlite3_result_error, sqlite3_lib),
Void, (Ptr{Void}, Ptr{Uint8}, Cint),
context, value, sizeof(msg) + 1 )
end
# SQLITE_API void sqlite3_result_error16(sqlite3_context*, const void*, int)
function sqlite3_result_error(context::Ptr{Void},msg::UTF16String)
return ccall( (:sqlite3_result_error16, sqlite3_lib),
Void, (Ptr{Void}, Ptr{Uint16}, Cint),
context, value, sizeof(msg) + 1 )
end
# SQLITE_API void sqlite3_result_int(sqlite3_context*, int);
function sqlite3_result_int(context::Ptr{Void},value::Int32)
return ccall( (:sqlite3_result_int, sqlite3_lib),
Void, (Ptr{Void}, Int32),
context, value )
end
# SQLITE_API void sqlite3_result_int64(sqlite3_context*, sqlite3_int64);
function sqlite3_result_int64(context::Ptr{Void},value::Int64)
return ccall( (:sqlite3_result_int64, sqlite3_lib),
Void, (Ptr{Void}, Int64),
context, value )
end
# SQLITE_API void sqlite3_result_null(sqlite3_context*);
function sqlite3_result_null(context::Ptr{Void})
return ccall( (:sqlite3_result_null, sqlite3_lib),
Void, (Ptr{Void},),
context )
end
# SQLITE_API void sqlite3_result_text(sqlite3_context*, const char*, int n, void(*)(void*));
function sqlite3_result_text(context::Ptr{Void},value::AbstractString)
return ccall( (:sqlite3_result_text, sqlite3_lib),
Void, (Ptr{Void}, Ptr{Uint8}, Cint, Ptr{Void}),
context, value, sizeof(value) + 1, SQLITE_TRANSIENT )
end
# SQLITE_API void sqlite3_result_text16(sqlite3_context*, const void*, int, void(*)(void*));
function sqlite3_result_text16(context::Ptr{Void},value::UTF16String)
return ccall( (:sqlite3_result_text, sqlite3_lib),
Void, (Ptr{Void}, Ptr{Uint16}, Cint, Ptr{Void}),
context, value, sizeof(value) + 1, SQLITE_TRANSIENT )
end
# SQLITE_API void sqlite3_result_blob(sqlite3_context*, const void*, int n, void(*)(void*));
function sqlite3_result_blob(context::Ptr{Void},value)
return ccall( (:sqlite3_result_blob, sqlite3_lib),
Void, (Ptr{Void}, Ptr{Uint8}, Cint,Ptr{Void}),
context, value, sizeof(value), SQLITE_TRANSIENT )
end
function sqlite3_initialize()
return ccall( (:sqlite3_initialize, sqlite3_lib),
Cint, (),
)
end
function sqlite3_shutdown()
return ccall( (:sqlite3_shutdown, sqlite3_lib),
Cint, (),
)
end
sqlite3_types = Dict{Int, DataType}(
1 => Int,
2 => Float64,
3 => UTF8String,
4 => Any,
5 => Nullable,
)
| [
198,
8818,
44161,
578,
18,
62,
8056,
19662,
3419,
198,
220,
220,
220,
1441,
269,
13345,
7,
357,
25,
25410,
578,
18,
62,
8056,
19662,
11,
44161,
578,
18,
62,
8019,
828,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.01475 | 4,000 |
type Job
cmd :: AbstractString
jobid :: Int
pbs_id :: Int
end
function ex(job)
output = "$(job.pbs_id)-$(myid()-1).out"
err = "$(job.pbs_id)-$(myid()-1).err"
println("start $(job.jobid)-th job on process $(myid()-1).")
open(io->println(io, "start $(job.jobid)-th job on process $(myid()-1)."), output, "a")
open(io->println(io, "start $(job.jobid)-th job on process $(myid()-1)."), err, "a")
tic()
run(pipeline(`sh -c $(job.cmd)`, stdout = output, stderr = err, append=true))
t = toq()
println("finish $(job.jobid)-th job on process $(myid()-1) ($t sec)")
open(io->println(io, "finish $(job.jobid)-th job on process $(myid()-1) ($t sec)"), output, "a")
open(io->println(io, "finish $(job.jobid)-th job on process $(myid()-1) ($t sec)"), err, "a")
return nothing
end
| [
4906,
15768,
198,
220,
220,
220,
23991,
7904,
27741,
10100,
198,
220,
220,
220,
1693,
312,
7904,
2558,
198,
220,
220,
220,
279,
1443,
62,
312,
7904,
2558,
198,
437,
198,
198,
8818,
409,
7,
21858,
8,
198,
220,
220,
220,
5072,
796,
... | 2.3 | 360 |
<reponame>johnnychen94/NiLang.jl
export rot, plshift, prshift, arshift
"""
rot(a, b, θ)
rotate variables `a` and `b` by an angle `θ`
"""
function rot(a, b, θ)
s, c = sincos(θ)
a*c-b*s, a*s+b*c
end
"""
plshift(x, n)
periodic left shift.
"""
plshift(x, n) = (x << n) | (x >> (sizeof(x)*8-n))
"""
plshift(x, n)
periodic right shift.
"""
prshift(x, n) = (x >> n) | (x << (sizeof(x)*8-n))
"""
arshift(x, n)
right shift, sign extending.
"""
arshift(x::T, n) where T = (x >> n) | (x & (T(1) << (sizeof(x)*8-1)))
| [
27,
7856,
261,
480,
29,
30686,
3281,
6607,
5824,
14,
34153,
43,
648,
13,
20362,
198,
39344,
5724,
11,
458,
30846,
11,
778,
30846,
11,
610,
30846,
198,
198,
37811,
198,
220,
220,
220,
5724,
7,
64,
11,
275,
11,
7377,
116,
8,
198,
... | 1.945652 | 276 |
struct VolumePartsIter{
TM<:AbstractMatrix,
TV<:AbstractVector,
Tg <: AbstractVector,
Tts<:AbstractVector,
Texpand<:NamedTuple,
T<:Real} <: AbstractVolumePartsIter{TM, TV, T}
#parameters
A::TM
Ã::TM #factored A, used in regressions
G::TM
g::Tg #vector version by referene of G, should be equivelent
ts::Tts
tsM1::Tts
expand::Texpand
dims::NamedTuple
#inner constructor needed to identify elTΠ
function VolumePartsIter(
A::TM, Ã::TM, G::TM, g::Tg, ts::Tts, tsM1::Tts, expand::Texpand,
::Type{TV} = typeof(similar(A,1))) where {
TM, TV, Tg, Tts, Texpand}
return new{TM, TV, Tg, Tts, Texpand, eltype(TM)}(A,Ã,G,g,ts,tsM1, expand)
end
end
#pre-allocates for the vpexpand
function VolumePartsIter(A::TA, Ã::TA, G::TA, ts::Tts, tsM1::Tts=ts.-1,#=
::Type{TM}, ::Type{TV}, ::Type{T}=eltype(TM)=#) where {
TA<:AbstractMatrix, TM, TV, T, Tts<:AbstractVector}
g = vec(G)
expand = (
A = view(A, :, ts),
LA = view(A, :, tsM1),
à = view(Ã, :, ts),
LÃ = view(Ã, :, tsM1),
G = view(G, :, tsM1),
index = (
A = to_indices(A, (:, ts)),
LA = to_indices(A, (:, tsM1)),
à = to_indices(Ã, (:, ts)),
LÃ = to_indices(Ã, (:, tsM1)),
G = to_indices(G, (:, tsM1)),
))
Θ = VolumePartsIter(A, Ã, G, g, ts, tsM1, expand)
return Θ
end
#basic constructor from dimension arguments
function VolumePartsIter(T::Int, K::Int, ts::Vector{Int}, ::Type{Tvp}=PARAM[:itertype],
::Type{TM}=PARAM[:itergpu] ? CUDA.CuMatrix{Tvp} : Matrix{Tvp},
::Type{TV}=PARAM[:itergpu] ? CUDA.CuVector{Tvp} : Vector{Tvp},
) where {Tvp<:Real, TV<:AbstractVector{Tvp}, TM<:AbstractMatrix{Tvp}}
A = TM(abs.(randn(Tvp, K,T)))
à = TM(abs.(randn(Tvp, K,T)))
G = TM(ones(Tvp, K, T-1))
return VolumePartsIter(A, Ã, G, ts, #=TM, TV, Tvp=#)
end
#needed due to the segemented array structure
function Base.deepcopy(Θ::VolumePartsIter)
A = deepcopy(Θ.A)
à = deepcopy(Θ.Ã)
G = deepcopy(Θ.G)
return VolumePartsIter(A,Ã,G,Θ.ts,Θ.tsM1)
end
#invididual functions for computing volume constriubtions
#genabsμₖ(Aᵢ, LAᵢ, wsᵢ, Rtwsᵢ, RLwsᵢ) = abs(LAᵢ*(Rtwsᵢ - RLwsᵢ)) + abs(Aᵢ*wsᵢ-LAᵢ*Rtwsᵢ)
genabsμₖ(Aᵢ, LAᵢ, wsᵢ, RLwsᵢ) = abs(Aᵢ * wsᵢ - LAᵢ * RLwsᵢ)
genabsμₖ(Aᵢ, LAᵢ, wsᵢ, Rtwsᵢ, RLwsᵢ) = genabsμₖ(Aᵢ, LAᵢ, wsᵢ, RLwsᵢ)
signabsμₖ(Aᵢ, LAᵢ, wsᵢ, Rtwsᵢ, RLwsᵢ) = sign(Aᵢ*wsᵢ-LAᵢ*Rtwsᵢ) #derivitive
#project the volume at a particular time t
#WARNING- the below only works when sum(Lwsₜ)==sum(wsₜ). If this is not true, need to carry
#an extra vector for the previous weights
function projectt(Aᵢ, LAᵢ, wsₜ, Rtwsₜ, RLwsₜ, Mₜ)
return Mₜ * genabsμₖ.(Aᵢ', LAᵢ', wsₜ, Rtwsₜ, RLwsₜ,)
end
function projectt(Aᵢ, LAᵢ, wsₜ, Rtwsₜ, RLwsₜ, ::Nothing)
return genabsμₖ.(Aᵢ', LAᵢ', wsₜ, Rtwsₜ, RLwsₜ,)
end
#projectt(Aᵢ, LAᵢ, wsₜ, RLwsₜ, ::Nothing) = genabsμₖ.(Aᵢ', LAᵢ', wsₜ, RLwsₜ, 1.0 +sum(RLwsₜ)-sum(wsₜ))
#project the volume for each time t
function projectbyt(A, xsection)
dims = (K = size(A,1), T = size(A,2))
@unpack xws, xRtws, xRLws, xM = xsection
#project the volume at each time t, then concatenate the results
factored = reduce(vcat, broadcast(1:(dims.T-1), xws, xRtws,xRLws, xM) do t, xwsₜ, xRtwsₜ, xRLwsₜ, xMₜ
projectt(A[:,t+1], A[:,t], xwsₜ, xRtwsₜ, xRLwsₜ, xMₜ)
end)
return factored
end
#the below is used to get the active volume
genμₖ(Aᵢ, LAᵢ, wsᵢ, RLwsᵢ) = Aᵢ * wsᵢ - LAᵢ * RLwsᵢ
genμₖ(Aᵢ, LAᵢ, wsᵢ, Rtwsᵢ, RLwsᵢ) = genμₖ(Aᵢ, LAᵢ, wsᵢ, RLwsᵢ)
function projectpurchasest(Aᵢ, LAᵢ, wsₜ, Rtwsₜ, RLwsₜ, Mₜ)
return Mₜ * genμₖ.(Aᵢ', LAᵢ', wsₜ, Rtwsₜ, RLwsₜ,)
end
function projectpurchasest(Aᵢ, LAᵢ, wsₜ, Rtwsₜ, RLwsₜ, ::Nothing)
return genμₖ.(Aᵢ', LAᵢ', wsₜ, Rtwsₜ, RLwsₜ,)
end
function projectpurchasesbyt(A, xsection)
dims = (K = size(A,1), T = size(A,2))
@unpack xws, xRtws, xRLws, xM = xsection
#project the volume at each time t, then concatenate the results
factored = reduce(vcat, broadcast(1:(dims.T-1), xws, xRtws,xRLws, xM) do t, xwsₜ, xRtwsₜ, xRLwsₜ, xMₜ
projectpurchasest(A[:,t+1], A[:,t], xwsₜ, xRtwsₜ, xRLwsₜ, xMₜ)
end)
return factored
end
#this just runs the regression for the purposes of extracting the coefficients
function coef(Θ::AbstractVolumePartsIter{TM,TV,T}, Xv::XYIterControl,
RegressionType::Val) where {TM, TV, T}
@unpack G,Ã,A = Θ
dims = (K = size(A,1), T = size(A,2))
initializeÃandG!(Θ, Xv)
RHS::TM = hcat(projectbyt(Ã, Xv.xsection), Xv.W̃)
β = reg(RHS, Xv.ṽ, RegressionType)
return β
end
#version that performs cross-sectional regressions as needed
function (Θ::AbstractVolumePartsIter{TM,TV,T})(Xv::XYIterControl,
RegressionType::Val) where {TM,TV,T}
@unpack G,Ã,A = Θ
dims = (K = size(A,1), T = size(A,2))
#set à to the cumulative product of G
initializeÃandG!(Θ, Xv)
#@info "type of Xv: $(typeof(Xv))"
RHS::TM = hcat(projectbyt(Ã, Xv.xsection), Xv.W̃)
#run the regression for A₁
β = reg(RHS, Xv.ṽ, RegressionType)
#below shoudl be equivelent to A[:,1] .= β[1:dims.K]
#β = (b->ifelse(b>0, b, b-100.)).(β) |> TV
copyto!(A, 1, β, 1, dims.K)
#A[:,1] .= parent(β)[1:dims.K]
#absμ .= vec(sum(absμₖ,dims=2))
#@eval Main controlbeta = $(β |> Vector)
absμ = RHS * β
return absμ
end
function (Θ::AbstractVolumePartsIter{TM,TV,T})(Xv::XYIterControl,
RegressionType::Val{:none},
noregressionbase::T = T(PARAM[:iternoregressionbase])) where {TM,TV,T}
@unpack G,Ã,A = Θ
dims = (K = size(A,1), T = size(A,2))
#set à to the cumulative product of G
initializeÃandG!(Θ, Xv)
#@info "type of Xv: $(typeof(Xv))"
RHS::TM = hcat(projectbyt(Ã, Xv.xsection), Xv.W̃)
#run the regression for A₁
βW̃ = (size(Xv.W̃,2) > 0) ? reg(Xv.W̃, Xv.ṽ, Val{:cholesky}()) : TV()
β = [TV(ones(T, dims.K) .* noregressionbase); βW̃]
#below shoudl be equivelent to A[:,1] .= β[1:dims.K]
copyto!(A, 1, β, 1, dims.K)
#A[:,1] .= parent(β)[1:dims.K]
#absμ .= vec(sum(absμₖ,dims=2))
#@eval Main controlbeta = $(β |> Vector)
absμ = RHS * β
return absμ
end
#we can probably dispatch all of this just from Xv
#NOTE: this is mainly for ZYGOTE
function (Θ::AbstractVolumePartsIter{TM,TV,T})(G::Matrix{T},
Xv::XYIterControl,
RegressionType::Val) where {TM,TV,T}
dims = (K=size(Θ.A,1), T = size(Θ.A,2))
someones = ones(T, dims.K, 1)
prodmatG = cumprodbyrow(G)
à = hcat(someones, prodmatG) |> TM
RHS::TM = hcat(projectbyt(Ã, Xv.xsection), Xv.W̃)
#run the regression for A₁
β = reg(RHS, Xv.ṽ, RegressionType)
#β = (b->ifelse(b>0, b, b-100.)).(β) |> TV
#β = ones(size(RHS,2))
absμ = RHS * β
return absμ
end
#we can probably dispatch all of this just from Xv
#NOTE: this is mainly for ZYGOTE
function (Θ::AbstractVolumePartsIter{TM,TV,T})(G::Matrix{T},
Xv::XYIterControl,
RegressionType::Val{:none},
noregressionbase::T = T(PARAM[:iternoregressionbase])) where {TM,TV,T}
dims = (K=size(Θ.A,1), T = size(Θ.A,2))
someones = ones(T, dims.K, 1)
prodmatG = cumprodbyrow(G)
à = hcat(someones, prodmatG) |> TM
RHS::TM = hcat(projectbyt(Ã, Xv.xsection), Xv.W̃)
#run the regression for A₁
βW̃ = (size(Xv.W̃,2) > 0) ? reg(Xv.W̃, Xv.ṽ, Val{:choleskyzygote}()) : TV()
β = [TV(ones(T, dims.K) .* noregressionbase); βW̃]
absμ = RHS * β
return absμ
end
#NOTE: this is mainly for ZYGOTE
function (Θ::AbstractVolumePartsIter{TM,TV,T})(G::Matrix{T},
Xv::XYIterControl,
RegressionType::Val{:zygotefallbackreg}) where {TM,TV,T}
dims = (K=size(Θ.A,1), T = size(Θ.A,2))
someones = ones(T, dims.K, 1)
prodmatG = cumprodbyrow(G, limitA=true)
à = hcat(someones, prodmatG) |> TM
RHS::TM = hcat(projectbyt(Ã, Xv.xsection), Xv.W̃)
#run the regression for A₁
β = reg(RHS, Xv.ṽ, RegressionType)
#β = (b->ifelse(b>0, b, b-100.)).(β) |> TV
absμ = RHS * β
return absμ
end
function lossforzygote(G::Matrix{T}, Θ::AbstractVolumePartsIter{TM,TV,T}, Xv::XYIterControl,;
RegressionType::Val=Val{PARAM[:iterregressiontypezygote]}()) where {TM,TV,T}
#throw("hello")
#ε = factored * Aₜ .- Xv.ṽ
Σε2 = sum((Θ(G, Xv, RegressionType) .- Xv.ṽ).^2) #0.3ms
return Σε2
end
function ∇lossforzygote(Θ, Xv::AbstractXYIter{TM,TV,T}, GrowthType::Val;
RegressionType=Val(PARAM[:iterregressiontypezygote]),
GradType=Val(PARAM[:itergradtype])) where {TM,TV,T}
#the actual gradient function
function ∇loss(G::Matrix, GradType::Val{:zygote})
local grad
try
grad = gradient((G)->lossforzygote(G, Θ, Xv,
RegressionType=RegressionType), G)[1]
catch err
(err == InterruptException()) && throw(err)
errmessage = "$err"
errmessage = length(errmessage) > 1000 ? errmessage[1:1000] : errmessage
@warn "Gradient failed with error $errmessage\nAttempting fallback method"
try
grad = gradient((G)->lossforzygote(G, Θ, Xv,
RegressionType=Val{:zygotefallbackreg}()), G)[1]
catch err2
@error "Gradient failed with error $errmessage" exception=(err, catch_backtrace())
errmessage2 = "$err2"
errmessage2 = length(errmessage2) > 1000 ? errmessage2[1:1000] : errmessage2
@error "Gradient fallback also failed with error $errmessage2" exception=(err2, catch_backtrace())
end
end
end
function ∇loss(G::Matrix, GradType::Val{:ag})
return ∇lossag(G, Θ, Xv)
end
∇loss(G::Matrix) = ∇loss(G::Matrix, GradType)
function ∇vec!(out, x::TV, ::Val{:identity}) where TV <: Vector
if !(x===Θ.g) #if they are the same array, no need to update
copyto!(Θ.g, x)
end
copyto!(out, ∇loss(Θ.G)) #converts to TV
return nothing
end
function ∇vec!(out, x::TV, ::Val{:identity}) where TV <: CuVector
copyto!(Θ.g, x)
G = Θ.G |> Matrix
copyto!(out, ∇loss(G)) #converts to TV
return nothing
end
function ∇vec!(out, x::TV, ::Val{:log}) where TV <: Vector
#@info "∇vec! log"
(x ≡ Θ.g) && throw("x and g should not be the same vector if growthtype≠identity")
copyto!(Θ.g, exp.(x))
copyto!(out, ∇loss(Θ.G) .* Θ.G) #adjust by G since d(expy)/dx=expy*dy/dx
return nothing
end
function ∇vec!(out, x::TV, ::Val{:log}) where TV <: CuVector
(x ≡ Θ.g) && throw("x and g should not be the same vector if growthtype≠identity")
copyto!(Θ.g, exp.(x))
G = Θ.G |> Matrix
copyto!(out, ∇loss(G) .* G)
#@info "sumabsout: $(sum(abs.(out)))"
end
∇vec!(out, x) = ∇vec!(out, x |> TV, GrowthType) #this version performs the conversion
∇vec!(out, x::TV) = ∇vec!(out, x, GrowthType)
return ∇vec!
end
#Updates G based on values of A
#of course, this changes the RHS side of teh regressions, so any
#factored cross-sections need to be recalculated
updateGfromA!(args...;kwargs...) = throw("I dropped updateGfromA!. If needed, just replace
with Θ.A[:,2:end] ./ Θ.A[:,1:(end-1)] (though be careful about à vs A)")
#uses a base value of A and the G matrix to update all other values of A
function updateAfromGAₜ!(Θ::VolumePartsIter, Xv::AbstractXYIter{TM,TV,T},t::Int) where {TM,TV,T}
local A₀::TV
local Aₜ::TV = Θ.A[:,t]
dims = (T=size(Θ.A,2),K=size(Θ.A,1))
#compute the cumulative growth
ΠG::TM = similar(Θ.A)
ΠG[:,1] .= T(1.0)
ΠG[:,2:end] .= cumprodbyrow(Θ.G)
A₀ = Aₜ ./ ΠG[:,t] #identify the intial value of assets
Θ.A .= A₀ .* ΠG #use the growth to compute A
(vec(Θ.A[:,t]) ≈ Aₜ) || throw("vec(Θ.A[:,t]) ($(vec(Θ.A[:,t]))) ≠ Aₜ ($(Aₜ))! ") #quick integrity check
Θ.A[:,t] .= Aₜ #preserve exact values to partly mitigate roundoff error
return nothing
end
function updateAfromGAₜ!(Θ::VolumePartsIter, Xv::AbstractXYIter{TM,TV,T},
t::Int) where {TM<:CuMatrix,TV<:CuVector,T}
local Aₜ::TV = Θ.A[:,t]
dims = (T=size(Θ.A,2),K=size(Θ.A,1))
#compute the cumulative growth
ΠG::TM = similar(Θ.A)
ΠG[:,1] .= T(1.0)
ΠG[:,2:end] .= cumprodbyrow(Θ.G)
#ΠG .= hcat(CUDA.ones(T, dims.K, 1), cumprod(Θ.G,dims=2))
#A₀ = Aₜ ./ ΠG[:,t] #identify the intial value of assets
#Θ.A .= A₀ .* ΠG
A = (Aₜ ./ ΠG[:,t]) .* ΠG
copyto!(Θ.A, A) #use the growth to compute A
#@assert vec(Θ.A[:,t]) ≈ Aₜ #quick integrity check
Θ.A[:,t] .= Aₜ #preserve exact values to partly mitigate roundoff error
return nothing
end
#computs the loss, but does so
#given the current values of A irrespective of G
function lossfromA(Θ::AbstractVolumePartsIter{TM,TV,T}, Xv::XYIterControl, RegressionType::Val
) where {TM,TV,T}
@unpack G,Ã,A = Θ
dims = (K = size(A,1), T = size(A,2))
RHS::TM = hcat(projectbyt(A, Xv.xsection), Xv.W̃) #NOTE the use of A, not Ã
β = reg(RHS, Xv.ṽ, RegressionType)
absμ = RHS * β
loss = sum((Xv.ṽ .- absμ) .^2 )
return loss
end
#NOTE: could be used in some future GPU version
cudaGᵢ(Aᵢ,LAᵢ)= Aᵢ/LAᵢ
cudafactorAᵢ(wsᵢ, RLwsᵢ, Gᵢ) = CUDA.abs(wsᵢ - RLwsᵢ/Gᵢ)
cudafactorLAᵢ(wsᵢ, RLwsᵢ, Gᵢ) = CUDA.abs(Gᵢ*wsᵢ - RLwsᵢ)
#takes a volume part object and writes the results to a df
function formresults(panel::AbstractDataFrame, ms::MeasureSpec, Θ::AbstractVolumeParts{TM,TV,T},
Xv::XYIterControl, ::Val{:iter}) where {TM,TV,T}
grossexposures = (Θ.A |> Matrix)' .* grossleverages(ms)'
growthrates = vcat(zeros(eltype(grossexposures), size(grossexposures,2))', (Θ.G|>Matrix!)')
Fcoefs::Vector{Symbol} = ms.Fξs
Fgcoefs::Vector{Symbol} = (s->replace(string(s),r"^Z"=>"G_") |> Symbol).(Fcoefs)
FWcoefs::Vector{Symbol} = [ms.FW...;]
if length(Fcoefs) ≠ size(Θ.A,1)
throw("Coeffcients not in expected form. Fcoefs: $Fcoefs")
end
resultsgrossexposures = DataFrame(grossexposures,Fcoefs)
resultsgrowthrates = DataFrame(growthrates,Fgcoefs)
tidx::Dict = gettidx(panel, ms)
tidxinv::Vector{Date} = sort(collect(keys(tidx)))
results::DataFrame = DataFrame(date=tidxinv)
results = [results resultsgrossexposures resultsgrowthrates]
return results
end
| [
198,
198,
7249,
14701,
42670,
29993,
90,
198,
220,
220,
220,
21232,
27,
25,
23839,
46912,
11,
198,
220,
220,
220,
3195,
27,
25,
23839,
38469,
11,
198,
220,
220,
220,
309,
70,
1279,
25,
27741,
38469,
11,
198,
220,
220,
220,
309,
91... | 2.066796 | 6,677 |
# ===============================================================
# Discretize using the correction hull of the matrix exponential
# ===============================================================
"""
CorrectionHull{EM} <: AbstractApproximationModel
Discretization using the correction hull of the matrix exponential.
### Fields
- `exp` -- exponentiation method
- `order` -- order of the Taylor series expansion of the matrix exponential
### Algorithm
For the homogeneous case, this method implements the transformation:
```math
Ω_0 = CH(X_0, e^{Aδ} X_0) ⊕ FX_0
```
where ``F`` is the correction (interval) matrix.
For the inhomogeneous case, ``x' = Ax + u``, ``x ∈ X, u ∈ U``, implements
``Ω_0 = CH(X_0, exp(Aδ) X0) ⊕ FX0`` where ``F`` is the correction (interval) matrix.
In both cases, if ``A`` is an interval matrix, the exponential is overapproximated
using methods from `IntervalMatrices.jl`.
"""
struct CorrectionHull{EM} <: AbstractApproximationModel
order::Int
exp::EM
end
# convenience constructor using symbols
function CorrectionHull(; order::Int=10, exp=IntervalExpAlg(order))
return CorrectionHull(order, _alias(exp))
end
function Base.show(io::IO, alg::CorrectionHull)
print(io, "`CorrectionHull` approximation model with: \n")
print(io, " - exponentiation method: $(alg.exp) \n")
print(io, " - order: $(alg.order)\n")
end
Base.show(io::IO, m::MIME"text/plain", alg::CorrectionHull) = print(io, alg)
# -----------------------------------------------------------------
# Correction hull: homogeneous case x' = Ax, x in X
# -----------------------------------------------------------------
function discretize(ivp::IVP{<:CLCS, <:LazySet}, δ, alg::CorrectionHull)
A = state_matrix(ivp)
X0 = initial_state(ivp)
X = stateset(ivp)
# compute exp(A*δ) * X0
Φ = _exp(A, δ, alg.exp)
# compute Ω0 = CH(X0, ΦX0) ⊕ FX0
Ω0 = _discretize_chull(A, Φ, X0, δ, alg)
Sdis = ConstrainedLinearDiscreteSystem(Φ, X)
return InitialValueProblem(Sdis, Ω0)
end
function _discretize_chull(A, Φ::IntervalMatrix, X0, δ, alg)
X0z = _convert_or_overapproximate(X0, Zonotope)
Y = _overapproximate(Φ * X0z, Zonotope)
H = overapproximate(CH(X0z, Y), Zonotope)
F = correction_hull(A, δ, alg.order)
R = _overapproximate(F * X0z, Zonotope)
Ω0 = minkowski_sum(H, R)
return Ω0
end
# F(δ) without the E(δ) correction term
function _correction_hull_without_E(A, δ, p)
timeint(δ, i) = interval((i^(-i / (i-1)) - i^(-1 / (i-1))) * δ^i, 0)
F = sum(map(x -> timeint(δ, i) * x, A^i / factorial(i)) for i in 2:p)
return IntervalMatrix(F)
end
function _discretize_chull(A, Φ::AbstractMatrix, X0, δ, alg)
X0z = _convert_or_overapproximate(X0, Zonotope)
Y = linear_map(Φ, X0z)
H = overapproximate(CH(X0z, Y), Zonotope)
F = _correction_hull_without_E(A, δ, alg.order)
R = _overapproximate(F * X0z, Zonotope)
Ω0 = minkowski_sum(H, R)
return Ω0
end
# -----------------------------------------------------------------
# Correction hull: inhomogeneous case x' = Ax + u, x in X, u ∈ U
# -----------------------------------------------------------------
function discretize(ivp::IVP{<:CLCCS, <:LazySet}, δ, alg::CorrectionHull)
A = state_matrix(ivp)
X0 = initial_state(ivp)
X = stateset(ivp)
U = next_set(inputset(ivp), 1) # inputset(ivp)
n = size(A, 1)
# here U is an interval matrix map of a lazyset, TODO refactor / dispatch
if isa(U, LinearMap)
Uz = _convert_or_overapproximate(Zonotope, LazySets.set(U))
B = matrix(U)
Uz = isinterval(B) ? _overapproximate(B * Uz, Zonotope) : linear_map(B, Uz)
else # LazySet
Uz = _convert_or_overapproximate(Zonotope, U)
end
if zeros(dim(U)) ∉ Uz
error("this function is not implemented, see issue #253")
end
# TODO refactor Ω0_homog
# TODO refactor / dispatch
X0z = _convert_or_overapproximate(Zonotope, X0)
Φ = _exp(A, δ, alg.exp)
if isinterval(Φ)
Y = _overapproximate(Φ * X0z, Zonotope)
else
Y = linear_map(Φ, X0z)
end
H = overapproximate(CH(X0z, Y), Zonotope)
F = correction_hull(A, δ, alg.order)
R = _overapproximate(F * X0z, Zonotope)
Ω0_homog = minkowski_sum(H, R)
# compute C(δ) * U
Cδ = _Cδ(A, δ, alg.order)
Ud = _overapproximate(Cδ * Uz, Zonotope)
Ω0 = minkowski_sum(Ω0_homog, Ud)
# Ω0 = _apply_setops(Ω0, alg.setops) # TODO requires to add `setops` field to the struct
Idn = Φ # IntervalMatrix(one(A)) or IdentityMultiple(one(eltype(A)), n) # FIXME
Sdis = ConstrainedLinearControlDiscreteSystem(Φ, Idn, X, Ud)
return InitialValueProblem(Sdis, Ω0)
end
| [
2,
46111,
4770,
25609,
855,
198,
2,
8444,
1186,
1096,
1262,
262,
17137,
23644,
286,
262,
17593,
39682,
198,
2,
46111,
4770,
25609,
855,
198,
198,
37811,
198,
220,
220,
220,
35074,
39,
724,
90,
3620,
92,
1279,
25,
27741,
4677,
13907,
... | 2.388522 | 1,969 |
module PIPS_NLP
# package code goes here
end # module
include("ParPipsNlp.jl")
include("PipsNlp.jl") | [
21412,
30434,
3705,
62,
45,
19930,
198,
198,
2,
5301,
2438,
2925,
994,
628,
198,
437,
1303,
8265,
198,
198,
17256,
7203,
10044,
47,
2419,
45,
34431,
13,
20362,
4943,
198,
17256,
7203,
47,
2419,
45,
34431,
13,
20362,
4943
] | 2.6 | 40 |
<reponame>Datseris/FractalDimension
# %% Sensititivy to trajectory length
using DrWatson
@quickactivate :FractalDimension # uses DynamicalSystems, PyPlot
include(srcdir("style.jl"))
using DynamicalSystems, PyPlot
# %%
N = 1*10^5
systems = [:koch, :henon_chaotic]
slabels = ["Koch", "Hénon"]
qs = 2:4
Cmethod = "standard" # bueno or standard. Decides εmax for correlation sum.
eHs, eCs, Hs, Cs = [Vector{Float64}[] for i in 1:4]
for data in systems
for q in qs
qH = qC = q
@show (q, data)
# Here we simply pack all parameters into a dictionary
# (other parameters are (probably) globals)
params = @strdict N qH qC data
if data == :standardmap_chaotic
params["k"] = 1.0
# elseif data == :henon_chaotic
# params["z"] = -4
end
params["theiler"] = 0
if Cmethod ≠ "standard"
params["Cmethod"] = Cmethod
end
# This is the main call that calculates everything
output, s = produce_or_load(
datadir("main"), params, make_C_H;
prefix = string(data), suffix = "jld2", force = false,
ignores = ["data"], storepatch = false
)
@unpack eH, eC, H, C = output
push!(eHs, eH); push!(Hs, H); push!(eCs, eC); push!(Cs, C)
end
end
legendtitle = "impact of order \$q\$"
labels = [s*" \$q=$(q)\$" for s in slabels for q in qs]
fig, axs = mainplot(
Hs, Cs, eHs, eCs, labels, legendtitle;
qH = "q", qC = "q", tol = 0.25,
offsets = range(0; length = 6, step = 1.5),
dimension_fit_C = FractalDimension.linear_regression_fit_glm,
)
wsave(plotsdir("paper", "orderq"), fig)
| [
27,
7856,
261,
480,
29,
35,
1381,
263,
271,
14,
37,
974,
282,
29271,
3004,
198,
2,
43313,
14173,
270,
270,
452,
88,
284,
22942,
4129,
198,
3500,
1583,
54,
13506,
198,
31,
24209,
39022,
1058,
37,
974,
282,
29271,
3004,
1303,
3544,
... | 2.176623 | 770 |
<gh_stars>0
macro shared_fields_stanmodels()
return esc(:(
name::AbstractString; # Name of the Stan program
model::AbstractString; # Stan language model program
n_chains::Vector{Int64}; # Number of chains
seed::StanBase.RandomSeed; # Seed section of cmd to run cmdstan
init::StanBase.Init; # Init section of cmd to run cmdstan
output::StanBase.Output; # Output section of cmd to run cmdstan
tmpdir::AbstractString; # Holds all below copied/created files
output_base::AbstractString; # Used for naming failes to be created
exec_path::AbstractString; # Path to the cmdstan excutable
data_file::Vector{String}; # Array of data files input to cmdstan
init_file::Vector{String}; # Array of init files input to cmdstan
cmds::Vector{Cmd}; # Array of cmds to be spawned/pipelined
sample_file::Vector{String}; # Sample file array created by cmdstan
log_file::Vector{String}; # Log file array created by cmdstan
diagnostic_file::Vector{String}; # Diagnostic file array created by cmdstan
summary::Bool; # Store cmdstan's summary as a .csv file
printsummary::Bool; # Print the summary
cmdstan_home::AbstractString; # Directory where cmdstan can be found
))
end
| [
27,
456,
62,
30783,
29,
15,
198,
20285,
305,
4888,
62,
25747,
62,
14192,
27530,
3419,
198,
220,
1441,
3671,
7,
37498,
198,
220,
220,
220,
1438,
3712,
23839,
10100,
26,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
22... | 2.535135 | 555 |
@ms include("solvers/annealing.jl")
function loop_annealing(args)
instances = [
# "02",
# "03",
# "05",
# "08",
# "09",
# "10",
# "11",
"12",
# "13"
]
println("Instance n° & Cost & Time")
for instance_name in instances
instance_path = "data/$instance_name.alp"
inst = Instance(instance_path)
sol = Solution(inst)
initial_sort!(sol)
sv = AnnealingSolver(
inst;
temp_init_rate=0.8,
step_size=1,
startsol=sol,
temp_coef=0.985,
)
ms_start = ms() # nb secondes depuis démarrage avec précision à la ms
solve(sv, swap_close_planes!, durationmax=15*60)
ms_stop = ms()
bestsol = sv.bestsol
nb_calls = bestsol.solver.nb_calls
nb_infeasable = bestsol.solver.nb_infeasable
nb_sec = round(ms_stop - ms_start, digits=3)
nb_call_per_sec = round(nb_calls/nb_sec, digits=3)
println("Performance: ")
println(" nb_calls=$nb_calls")
println(" nb_infeasable=$nb_infeasable")
println(" nb_sec=$nb_sec")
println(" => nb_call_per_sec = $nb_call_per_sec call/sec")
println("$instance_name & $(bestsol.cost) & $nb_sec")
end
end
loop_annealing(Args.args)
| [
31,
907,
2291,
7203,
34453,
690,
14,
21952,
4272,
13,
20362,
4943,
198,
198,
8818,
9052,
62,
21952,
4272,
7,
22046,
8,
198,
220,
220,
220,
10245,
796,
685,
198,
220,
220,
220,
220,
220,
220,
220,
1303,
366,
2999,
1600,
198,
220,
2... | 1.893855 | 716 |
# Julia 0.6
function singleNumber(vector)
reduce(xor, vector)
end
a = [1,1,2,3,3]
println(singleNumber(a))
| [
2,
22300,
657,
13,
21,
198,
8818,
2060,
15057,
7,
31364,
8,
198,
197,
445,
7234,
7,
87,
273,
11,
15879,
8,
198,
437,
198,
198,
64,
796,
685,
16,
11,
16,
11,
17,
11,
18,
11,
18,
60,
198,
35235,
7,
29762,
15057,
7,
64,
4008,
... | 2.270833 | 48 |
<gh_stars>1-10
using TimeSeries, MarketData, Base.Dates
FactCheck.setstyle(:compact)
FactCheck.onlystats(true)
facts("collapse operations") do
context("collapse squishes correctly") do
@fact collapse(cl, week, first).values[2] --> 97.75
@fact collapse(cl, week, first).timestamp[2] --> Date(2000,1,10)
@fact collapse(cl, week, first, last).values[2] --> 100.44
@fact collapse(cl, week, first, last).timestamp[2] --> Date(2000,1,10)
@fact collapse(cl, month, first).values[2] --> 100.25
@fact collapse(cl, month, first).timestamp[2] --> Date(2000,2,1)
@fact collapse(ohlc, week, first).values[2, :] --> [102.0, 102.25, 94.75, 97.75]
@fact collapse(ohlc, week, first).timestamp[2] --> Date(2000,1,10)
@fact collapse(ohlc, week, first, last).values[2, :] --> [100.0, 102.25, 99.38, 100.44]
@fact collapse(ohlc, week, first, last).timestamp[2] --> Date(2000,1,10)
@fact collapse(ohlc, month, first).values[2, :] --> [104.0, 105.0, 100.0, 100.25]
@fact collapse(ohlc, month, first).timestamp[2] --> Date(2000,2,1)
end
end
facts("merge works correctly") do
cl1 = cl[1:3]
op1 = cl[2:4]
aapl = tail(AAPL)
ba = tail(BA)
context("takes colnames kwarg correctly") do
@fact merge(cl, ohlc["High", "Low"], colnames=["a","b","c"]).colnames --> ["a", "b", "c"]
@fact merge(cl, op, colnames=["a","b"]).colnames --> ["a", "b"]
@fact_throws merge(cl, op, colnames=["a"])
@fact_throws merge(cl, op, colnames=["a","b","c"])
@fact merge(cl, ohlc["High", "Low"], :inner, colnames=["a","b","c"]).colnames --> ["a", "b", "c"]
@fact merge(cl, op, :inner, colnames=["a","b"]).colnames --> ["a", "b"]
@fact_throws merge(cl, op, :inner, colnames=["a"])
@fact_throws merge(cl, op, :inner, colnames=["a","b","c"])
@fact merge(cl, ohlc["High", "Low"], :left, colnames=["a","b","c"]).colnames --> ["a", "b", "c"]
@fact merge(cl, op, :left, colnames=["a","b"]).colnames --> ["a", "b"]
@fact_throws merge(cl, op, :left, colnames=["a"])
@fact_throws merge(cl, op, :left, colnames=["a","b","c"])
@fact merge(cl, ohlc["High", "Low"], :right, colnames=["a","b","c"]).colnames --> ["a", "b", "c"]
@fact merge(cl, op, :right, colnames=["a","b"]).colnames --> ["a", "b"]
@fact_throws merge(cl, op, :right, colnames=["a"])
@fact_throws merge(cl, op, :right, colnames=["a","b","c"])
@fact merge(cl, ohlc["High", "Low"], :outer, colnames=["a","b","c"]).colnames --> ["a", "b", "c"]
@fact merge(cl, op, :outer, colnames=["a","b"]).colnames --> ["a", "b"]
@fact_throws merge(cl, op, :outer, colnames=["a"])
@fact_throws merge(cl, op, :outer, colnames=["a","b","c"])
end
context("returns correct alignment with Dates and values") do
@fact merge(cl,op).values --> merge(cl,op, :inner).values
@fact merge(cl,op).values[2,1] --> cl.values[2,1]
@fact merge(cl,op).values[2,2] --> op.values[2,1]
end
context("aligns with disparate sized objects") do
@fact merge(cl, op[2:5]).values[1,1] --> cl.values[2,1]
@fact merge(cl, op[2:5]).values[1,2] --> op.values[2,1]
@fact merge(cl, op[2:5]).timestamp[1] --> Date(2000,1,4)
@fact length(merge(cl, op[2:5])) --> 4
@fact length(merge(cl1, op1, :inner)) --> 2
@fact merge(cl1,op1, :inner).values[2,1] --> cl1.values[3,1]
@fact merge(cl1,op1, :inner).values[2,2] --> op1.values[2,1]
@fact length(merge(cl1, op1, :left)) --> 3
@fact merge(cl1,op1, :left).values[1,2] --> isnan
@fact merge(cl1,op1, :left).values[2,1] --> cl1.values[2,1]
@fact merge(cl1,op1, :left).values[2,2] --> op1.values[1,1]
@fact length(merge(cl1, op1, :right)) --> 3
@fact merge(cl1,op1, :right).values[2,1] --> cl1.values[3,1]
@fact merge(cl1,op1, :right).values[2,2] --> op1.values[2,1]
@fact merge(cl1,op1, :right).values[3,1] --> isnan
@fact length(merge(cl1, op1, :outer)) --> 4
@fact merge(cl1,op1, :outer).values[1,2] --> isnan
@fact merge(cl1,op1, :outer).values[2,1] --> cl1.values[2,1]
@fact merge(cl1,op1, :outer).values[2,2] --> op1.values[1,1]
@fact merge(cl1,op1, :outer).values[4,1] --> isnan
end
context("column names match the correct values") do
@fact merge(cl, op[2:5]).colnames --> ["Close", "Open"]
@fact merge(op[2:5], cl).colnames --> ["Open", "Close"]
@fact merge(cl, op[2:5], :inner).colnames --> ["Close", "Open"]
@fact merge(op[2:5], cl, :inner).colnames --> ["Open", "Close"]
@fact merge(cl, op[2:5], :left).colnames --> ["Close", "Open"]
@fact merge(op[2:5], cl, :left).colnames --> ["Open", "Close"]
@fact merge(cl, op[2:5], :right).colnames --> ["Close", "Open"]
@fact merge(op[2:5], cl, :right).colnames --> ["Open", "Close"]
@fact merge(cl, op[2:5], :outer).colnames --> ["Close", "Open"]
@fact merge(op[2:5], cl, :outer).colnames --> ["Open", "Close"]
end
end
facts("vcat works correctly") do
context("concatenates time series correctly in 1D") do
a = TimeArray([Date(2015, 10, 01), Date(2015, 11, 01)], [15, 16], ["Number"])
b = TimeArray([Date(2015, 12, 01)], [17], ["Number"])
c = vcat(a, b)
@fact length(c) --> length(a) + length(b)
@fact c.colnames --> a.colnames
@fact c.colnames --> b.colnames
@fact c.values --> [15, 16, 17]
end
context("concatenates time series correctly in 2D") do
a = TimeArray([Date(2015, 09, 01), Date(2015, 10, 01), Date(2015, 11, 01)], [[15 16]; [17 18]; [19 20]], ["Number 1", "Number 2"])
b = TimeArray([Date(2015, 12, 01)], [18 18], ["Number 1", "Number 2"])
c = vcat(a, b)
@fact length(c) --> length(a) + length(b)
@fact c.colnames --> a.colnames
@fact c.colnames --> b.colnames
@fact c.values --> [[15 16]; [17 18]; [19 20]; [18 18]]
end
context("rejects when column names do not match") do
a = TimeArray([Date(2015, 10, 01), Date(2015, 11, 01)], [15, 16], ["Number"])
b = TimeArray([Date(2015, 12, 01)], [17], ["Data does not match number"])
@fact_throws vcat(a, b)
end
context("rejects when metas do not match") do
a = TimeArray([Date(2015, 10, 01), Date(2015, 11, 01)], [15, 16], ["Number"], :FirstMeta)
b = TimeArray([Date(2015, 12, 01)], [17], ["Number"], :SecondMeta)
@fact_throws vcat(a, b)
end
context("rejects when dates overlap") do
a = TimeArray([Date(2015, 10, 01), Date(2015, 11, 01)], [15, 16], ["Number"])
b = TimeArray([Date(2015, 11, 01)], [17], ["Number"])
@fact_throws vcat(a, b)
end
context("still works when dates are mixed") do
a = TimeArray([Date(2015, 10, 01), Date(2015, 12, 01)], [15, 17], ["Number"])
b = TimeArray([Date(2015, 11, 01)], [16], ["Number"])
c = vcat(a, b)
@fact length(c) --> length(a) + length(b)
@fact c.colnames --> a.colnames
@fact c.colnames --> b.colnames
@fact c.values --> [15, 16, 17]
@fact c.timestamp --> issorted
end
end
facts("map works correctly") do
context("works on both time stamps and 1D values") do
a = TimeArray([Date(2015, 10, 01), Date(2015, 11, 01)], [15, 16], ["Number"], :Something)
b = map((timestamp, values) -> (timestamp + Dates.Year(1), values - 1), a)
@fact length(b) --> length(a)
@fact b.colnames --> a.colnames
@fact Dates.year(b.timestamp[1]) --> Dates.year(a.timestamp[1]) + 1
@fact b.values[1] --> a.values[1] - 1
@fact b.meta --> a.meta
end
context("works on both time stamps and 2D values") do
a = TimeArray([Date(2015, 09, 01), Date(2015, 10, 01), Date(2015, 11, 01)], [[15 16]; [17 18]; [19 20]], ["Number 1", "Number 2"])
b = map((timestamp, values) -> (timestamp + Dates.Year(1), [values[1] + 2, values[2] - 1]), a)
@fact length(b) --> length(a)
@fact b.colnames --> a.colnames
@fact Dates.year(b.timestamp[1]) --> Dates.year(a.timestamp[1]) + 1
@fact b.values[1, 1] --> a.values[1, 1] + 2
@fact b.values[1, 2] --> a.values[1, 2] - 1
end
context("works with order of elements that varies after modifications") do
a = TimeArray([Date(2015, 10, 01), Date(2015, 12, 01)], [15, 16], ["Number"])
b = map((timestamp, values) -> (timestamp + Dates.Year((timestamp >= Date(2015, 11, 01)) ? -1 : 1), values), a)
@fact length(b) --> length(a)
@fact b.timestamp --> issorted
end
end
| [
27,
456,
62,
30783,
29,
16,
12,
940,
198,
3500,
3862,
27996,
11,
220,
5991,
6601,
11,
7308,
13,
35,
689,
198,
29054,
9787,
13,
2617,
7635,
7,
25,
5589,
529,
8,
198,
29054,
9787,
13,
8807,
34242,
7,
7942,
8,
198,
198,
37473,
7203... | 2.091514 | 4,360 |
export
FoldSet,
FOLD_TRAIN,
FOLD_TEST,
foldset_match,
foldset_withhold,
check_fold_match
#########################################
const FOLD_TRAIN = 1
const FOLD_TEST = 2
immutable FoldSet
assignment::Vector{Int} # assignment[i] = j means the ith element is assigned to fold j
fold::Int # target fold we either want to match or withhold, based on `match_fold`
match_fold::Bool
end
foldset_match(assignment::Vector{Int}, fold::Int) = FoldSet(assignment, fold, true)
foldset_withhold(assignment::Vector{Int}, fold::Int) = FoldSet(assignment, fold, false)
function check_fold_match(fold::Integer, fold_assignment::Integer, match_fold::Bool)
(fold != 0) && (fold_assignment != 0) && # NOTE(tim): zero never matches
((match_fold && fold_assignment == fold) || (!match_fold && fold_assignment != fold))
end
check_fold_match(value::Integer, foldset::FoldSet) = check_fold_match(foldset.fold, value, foldset.match_fold)
#########################################
# Iterator
function _find_next_valid_fold_match(foldset::FoldSet, state::Int)
while state < length(foldset.assignment)
state += 1
if check_fold_match(foldset.assignment[state], foldset)
return state
end
end
state + 1 # returns length(foldset.assignment) + 1 on fail
end
Base.start(foldset::FoldSet) = _find_next_valid_fold_match(foldset, 0)
Base.done(foldset::FoldSet, state::Int) = state > length(foldset.assignment)
function Base.next(foldset::FoldSet, state::Int)
@assert(check_fold_match(foldset.assignment[state], foldset))
state, _find_next_valid_fold_match(foldset, state)
end
function Base.length(foldset::FoldSet)
len = 0
state = start(foldset)
while !done(foldset, state)
item, state = next(foldset, state)
len += 1
end
len
end
function Base.collect(foldset::FoldSet)
len = length(foldset)
retval = Array{Int}(len)
for (i,j) in enumerate(foldset)
retval[i] = j
end
retval
end | [
39344,
198,
220,
220,
220,
39957,
7248,
11,
628,
220,
220,
220,
376,
15173,
62,
51,
3861,
1268,
11,
198,
220,
220,
220,
376,
15173,
62,
51,
6465,
11,
628,
220,
220,
220,
5591,
2617,
62,
15699,
11,
198,
220,
220,
220,
5591,
2617,
... | 2.640052 | 764 |
<reponame>byuflowlab/AircraftSystems
#=##############################################################################################
Filename: solve_rotor.jl
Author: <NAME>
Contact: <EMAIL>
README: define an `Action` object to solve a CCBlade rotor
=###############################################################################################
"""
solve_rotor(aircraft, parameters, freestream, environment, step_range, stepi, step_symbol)
Action function.
# Arguments:
* `aircraft::Aircraft` : `Aircraft` system object
* `parameters<:Parameters` `Parameters` struct
* `freestream::Freestream` : `Freestream` object
* `environment::Environment` `Environment` object
* `step_range::AbstractArray` : array of times for which the simulation is run
* `stepi::Int` : index of the current step
* `step_symbol::String` : defines the step, e.g. `alpha` or `time`
NOTE: `omegas` is commanded while `Js` is calculated
`parameters <: Parameters` requires the following elements:
* `omegas::Vector{Float64}` : a vector of rotational speeds in rad/s at the current step (commanded)
* `Js::Array{Float64,2}` : each i,jth element is the advance ratio of the ith rotor at the jth step (calculated)
* `Ts::Array{Float64,2}` : each i,jth element is the thrust of the ith rotor at the jth step
* `Qs::Array{Float64,2}` : each i,jth element is the torque of the ith rotor at the jth step
* `Ps::Array{Float64,2}`: [i,j]th element is the power of the ith rotor at the jth step
* `us::Vector{Vector{Vector{Float64}}}` : each [i][j][k]th element is the axial induced velocity at ith step of the jth rotor at the kth radial section
* `vs::Vector{Vector{Vector{Float64}}}` : each [i][j][k]th element is the swirl induced velocity at ith step of the jth rotor at the kth radial section
"""
function solve_rotor(aircraft, parameters, freestream, environment, step_range, stepi, step_symbol)
# extract parameters
omegas = parameters.omegas[stepi]
Js = parameters.Js[:,stepi]
Ts = parameters.Ts[:,stepi]
Qs = parameters.Qs[:,stepi]
Ps = parameters.Ps[:,stepi]
us = parameters.us[stepi]
vs = parameters.vs[stepi]
solve_rotors!(Js, Ts, Qs, Ps, us, vs, aircraft.rotor_system, omegas, freestream, environment)
return false
end
"""
solve_rotor(aircraft, step_range)
Method returns initialized elements required for the `parameters <: Parameters` struct during simulation.
# Arguments:
* `aircraft::Aircraft`: system to be simulated
* `step_range::AbstractArray`: defines each step of the simulation
# Returns:
* `omegas::Array{Float64,2}`: [i,j]th elemnt is the rotational speeds in rad/s of the ith rotor at the jth step
* `Js::Array{Float64,2}`: [i,j]th element is the advance ratio of the ith rotor at the jth step
* `Ts::Array{Float64,2}`: [i,j]th element is the thrust of the ith rotor at the jth step
* `Qs::Array{Float64,2}`: [i,j]th element is the torque of the ith rotor at the jth step
* `Ps::Array{Float64,2}`: [i,j]th element is the power of the ith rotor at the jth step
* `us::Vector{Vector{Vector{Float64}}}`: each [i][j][k]th element is the axial induced velocity at ith step of the jth rotor at the kth radial section
* `vs::Vector{Vector{Vector{Float64}}}`: each [i][j][k]th element is the swirl induced velocity at ith step of the jth rotor at the kth radial section
"""
function solve_rotor(aircraft, step_range)
nrotors = length(aircraft.rotor_system.index) # number of rotors
omegas = ones(Float64, nrotors, length(step_range))
Js = zeros(nrotors, length(step_range))
Ts = zeros(nrotors, length(step_range))
Qs = zeros(nrotors, length(step_range))
Ps = zeros(nrotors, length(step_range))
us = [[zeros(length(aircraft.rotor_system.rlists[i])) for i in aircraft.rotor_system.index] for _ in 1:length(step_range)]
vs = deepcopy(us)
return omegas, Js, Ts, Qs, Ps, us, vs
end
# # prepare solutionkeys and solutioninits for this particular system and simulation
# solutionkeys = [
# "thrust",
# "torque",
# "efficiency",
# "u",
# "v"
# ]
| [
27,
7856,
261,
480,
29,
1525,
84,
2704,
4883,
397,
14,
32,
27002,
11964,
82,
198,
2,
28,
29113,
29113,
14468,
7804,
4242,
2235,
198,
35063,
25,
8494,
62,
10599,
273,
13,
20362,
198,
13838,
25,
1279,
20608,
29,
198,
17829,
25,
1279,
... | 2.941134 | 1,376 |
module SpecialMatrices
import Base: getindex, size, *
struct JordanBlock{T<:Number} <: AbstractMatrix{T}
blocks::Vector{Int}
diag::T
end
size(A::JordanBlock) = Tuple([1; 1] * sum(A.blocks))
function getindex(A::JordanBlock{T}, i::Int, j::Int) where T <: Number
if i == j
A.diag
elseif i > j || j > i + 1
zero(T)
else
if i in cumsum(A.blocks)
zero(T)
else
one(T)
end
end
end
function *(J::JordanBlock, A::AbstractMatrix)
n = size(J, 2)
na, ma = size(A)
n == na || error("dimension mismatch")
M = A * J.diag
for i = 1:n-1
if J[i,i+1] != 0
M[i,:] += A[i+1,:]
end
end
M
end
end #module
| [
198,
21412,
6093,
19044,
45977,
198,
198,
11748,
7308,
25,
220,
651,
9630,
11,
2546,
11,
1635,
198,
198,
7249,
8078,
12235,
90,
51,
27,
25,
15057,
92,
1279,
25,
27741,
46912,
90,
51,
92,
198,
220,
7021,
3712,
38469,
90,
5317,
92,
... | 2.148867 | 309 |
<gh_stars>0
#--------------------------------------------------------------------
# DNSS.jl
# Soham 03-2022
#--------------------------------------------------------------------
module DNSS
using NLsolve, Random, LinearAlgebra, Printf, Distributed
using PyPlot, LaTeXStrings
export Manifold, Space, ProductSpace, SingleSpaces,
Field, Operator, U, V, Parameters
export save
export extract, range, Field, rmse
export reshapeFromTuple, reshapeToTuple, enforcebc!, enforceregularity!
export setup, distribute, distribute_
export Cardinal, ChebyshevGL, Chebyshev
export value, space, linsolve, norm
export basistransform, project, prolongate, restrict, filtertophalf
export computeη, constraints, residuals, lineconstraints, R, M, expansion
include("AbstractTypes.jl")
include("Spectral/ChebyshevGL.jl")
include("Spectral/1Dspace.jl")
include("Spectral/2Dspace.jl")
include("Spectral/BasisTransform.jl")
include("AnySpace.jl")
include("ArraySpace.jl")
include("BoundaryUtils.jl")
include("SolverUtils.jl")
include("Distribute.jl")
include("Save.jl")
include("SphericalSymmetry.jl")
end
| [
27,
456,
62,
30783,
29,
15,
198,
2,
10097,
650,
198,
2,
45080,
5432,
13,
20362,
198,
2,
311,
1219,
321,
7643,
12,
1238,
1828,
198,
2,
10097,
650,
198,
198,
21412,
45080,
5432,
628,
220,
220,
220,
1262,
22879,
82,
6442,
11,
14534,
... | 3.01519 | 395 |
"""
```
plot_scenario(m, var, class, scen; title = "", kwargs...)
plot_scenario(m, vars, class, scen; untrans = false, fourquarter = false,
plotroot = figurespath(m, \"scenarios\"), titles = [], tick_size = 1,
kwargs...)
```
Plot `var` or `vars` *in deviations from baseline* for the alternative scenario
specified by `key` and `vint`.
### Inputs
- `var::Symbol` or `vars::Vector{Symbol}`: variable(s) to be plotted,
e.g. `:obs_gdp` or `[:obs_gdp, :obs_nominalrate]`
- `class::Symbol`
- `scen::AbstractScenario`: scenario
### Keyword Arguments
- `untrans::Bool`: whether to plot untransformed (model units) forecast
- `fourquarter::Bool`: whether to plot four-quarter forecast
- `plotroot::String`: if nonempty, plots will be saved in that directory
- `title::String` or `titles::Vector{String}`
- `tick_size::Int`: x-axis (time) tick size in units of years
- `legend`
See `?histforecast` for additional keyword arguments, all of which can be passed
into `plot_scenario`.
### Output
- `p::Plot` or `plots::OrderedDict{Symbol, Plot}`
"""
function plot_scenario(m::AbstractModel, var::Symbol, class::Symbol, scen::AbstractScenario;
title::String = "", kwargs...)
plots = plot_scenario(m, [var], class, scen;
titles = isempty(title) ? String[] : [title],
kwargs...)
return plots[var]
end
function plot_scenario(m::AbstractModel, vars::Vector{Symbol}, class::Symbol,
scen::AbstractScenario; untrans::Bool = false, fourquarter::Bool = false,
plotroot::String = figurespath(m, "scenarios"),
titles::Vector{String} = String[],
tick_size::Int = 1, legend = :none,
verbose::Symbol = :low,
kwargs...)
# Determine output_var
fcast_prod = if untrans && fourquarter
error("Only one of untrans or fourquarter can be true")
elseif untrans
:forecastut
elseif fourquarter
:forecast4q
else
:forecast
end
# Read in MeansBands
hist = MeansBands()
fcast = read_scenario_mb(m, scen, Symbol(fcast_prod, class))
# Get titles if not provided
if isempty(titles)
detexify_title = typeof(Plots.backend()) == Plots.GRBackend
titles = map(var -> describe_series(m, var, class, detexify = detexify_title), vars)
end
# Loop through variables
plots = OrderedDict{Symbol, Plots.Plot}()
for (var, title) in zip(vars, titles)
ylabel = series_ylabel(m, var, class, untrans = untrans, fourquarter = fourquarter)
ylabel = ylabel * " (deviations from baseline)"
plots[var] = histforecast(var, hist, fcast;
start_date = date_forecast_start(m),
title = title, legend = legend, kwargs...)
# Save if output_file provided
output_file = if isempty(plotroot)
""
else
get_scenario_filename(m, scen, Symbol(fcast_prod, "_", detexify(var)),
pathfcn = figurespath,
fileformat = plot_extension())
end
save_plot(plots[var], output_file, verbose = verbose)
end
return plots
end | [
37811,
198,
15506,
63,
198,
29487,
62,
1416,
39055,
7,
76,
11,
1401,
11,
1398,
11,
4408,
26,
3670,
796,
366,
1600,
479,
86,
22046,
23029,
198,
198,
29487,
62,
1416,
39055,
7,
76,
11,
410,
945,
11,
1398,
11,
4408,
26,
1418,
26084,
... | 2.249321 | 1,472 |
<filename>src/ICD10Utilities.jl
module ICD10Utilities
using CSV, TypedTables, Dates, CategoricalArrays, Missings
using FileIO
import Base: isless, show, (==)
export ICDOPTS
export AbstractICD10
export ICD10
export ICD10AM, ACHI
export ICD10CA, ICD10CM, ICD10GM
export ICD10CM
export ICD10AMAge
export icd3
export isvalidcode
export icd3, icd4
export seticdpunct
export ICDO3
export ICD10AMcodes, ACHIcodes
"""
ICDOPTS
Dictionary of options for ICD10Utilities.
`:punct`: if `true` (default), ICD-10 codes are displayed with `.` after third character,
and ACHI codes are displayed with `-` after fifth character.
"""
const ICDOPTS = Dict{Symbol,Any}(:punct => true)
# easily set options
"""
seticdpunct(v::Bool)
Set `ICDOPTS[:punct]` to new value.
"""
seticdpunct(v::Bool) = ICDOPTS[:punct] = v
"""
AbstractICD10
Abstract type for ICD-10 codes
"""
abstract type AbstractICD10 end
Broadcast.broadcastable(icdcode::T) where {T<:AbstractICD10} = Ref(icdcode)
include("icd10.jl")
include("icd10fns.jl")
include("othericd10.jl")
include("icd10cm.jl")
include("achi.jl")
include("icd10amfns.jl")
include("icdO3.jl")
end # module
| [
27,
34345,
29,
10677,
14,
2149,
35,
940,
18274,
2410,
13,
20362,
198,
21412,
314,
8610,
940,
18274,
2410,
198,
198,
3500,
44189,
11,
17134,
276,
51,
2977,
11,
44712,
11,
327,
2397,
12409,
3163,
20477,
11,
4544,
654,
198,
3500,
9220,
... | 2.518681 | 455 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.